swagger: '2.0' info: title: Microsoft Azure SearchServiceClient description: >- Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. version: 2019-05-06-Preview x-ms-code-generation-settings: useDateTimeOffset: true x-ms-parameterized-host: hostTemplate: '{endpoint}' useSchemePrefix: false parameters: - $ref: '#/parameters/EndpointParameter' consumes: - application/json produces: - application/json paths: /datasources('{dataSourceName}'): put: tags: - DataSources operationId: microsoftAzureDatasourcesCreateorupdate x-ms-examples: SearchServiceCreateOrUpdateDataSource: $ref: ./examples/SearchServiceCreateOrUpdateDataSource.json description: Creates a new datasource or updates a datasource if it already exists. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Update-Data-Source parameters: - name: dataSourceName in: path required: true type: string description: The name of the datasource to create or update. - name: dataSource in: body required: true schema: $ref: '#/definitions/SearchIndexerDataSource' description: The definition of the datasource to create or update. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/PreferHeaderParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndexerDataSource' '201': description: '' schema: $ref: '#/definitions/SearchIndexerDataSource' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Put Datasources Datasourcename' delete: tags: - DataSources operationId: microsoftAzureDatasourcesDelete x-ms-examples: SearchServiceDeleteDataSource: $ref: ./examples/SearchServiceDeleteDataSource.json description: Deletes a datasource. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Delete-Data-Source parameters: - name: dataSourceName in: path required: true type: string description: The name of the datasource to delete. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: '' '404': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Delete Datasources Datasourcename' get: tags: - DataSources operationId: microsoftAzureDatasourcesGet x-ms-examples: SearchServiceGetDataSource: $ref: ./examples/SearchServiceGetDataSource.json description: Retrieves a datasource definition. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Data-Source parameters: - name: dataSourceName in: path required: true type: string description: The name of the datasource to retrieve. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndexerDataSource' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Get Datasources Datasourcename' /datasources: get: tags: - DataSources operationId: microsoftAzureDatasourcesList x-ms-examples: SearchServiceListDataSources: $ref: ./examples/SearchServiceListDataSources.json description: Lists all datasources available for a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/List-Data-Sources parameters: - name: $select in: query required: false type: string description: >- Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/ListDataSourcesResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Datasources post: tags: - DataSources operationId: microsoftAzureDatasourcesCreate x-ms-examples: SearchServiceCreateDataSource: $ref: ./examples/SearchServiceCreateDataSource.json description: Creates a new datasource. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Data-Source parameters: - name: dataSource in: body required: true schema: $ref: '#/definitions/SearchIndexerDataSource' description: The definition of the datasource to create. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '201': description: '' schema: $ref: '#/definitions/SearchIndexerDataSource' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Datasources /indexers('{indexerName}')/search.reset: post: tags: - Indexers operationId: microsoftAzureIndexersReset x-ms-examples: SearchServiceResetIndexer: $ref: ./examples/SearchServiceResetIndexer.json description: Resets the change tracking state associated with an indexer. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Reset-Indexer parameters: - name: indexerName in: path required: true type: string description: The name of the indexer to reset. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Indexers Indexername Search Reset /indexers('{indexerName}')/search.run: post: tags: - Indexers operationId: microsoftAzureIndexersRun x-ms-examples: SearchServiceRunIndexer: $ref: ./examples/SearchServiceRunIndexer.json description: Runs an indexer on-demand. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Run-Indexer parameters: - name: indexerName in: path required: true type: string description: The name of the indexer to run. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '202': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Indexers Indexername Search Run /indexers('{indexerName}'): put: tags: - Indexers operationId: microsoftAzureIndexersCreateorupdate x-ms-examples: SearchServiceCreateOrUpdateIndexer: $ref: ./examples/SearchServiceCreateOrUpdateIndexer.json description: Creates a new indexer or updates an indexer if it already exists. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Indexer parameters: - name: indexerName in: path required: true type: string description: The name of the indexer to create or update. - name: indexer in: body required: true schema: $ref: '#/definitions/SearchIndexer' description: The definition of the indexer to create or update. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/PreferHeaderParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndexer' '201': description: '' schema: $ref: '#/definitions/SearchIndexer' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Put Indexers Indexername' delete: tags: - Indexers operationId: microsoftAzureIndexersDelete x-ms-examples: SearchServiceDeleteIndexer: $ref: ./examples/SearchServiceDeleteIndexer.json description: Deletes an indexer. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Delete-Indexer parameters: - name: indexerName in: path required: true type: string description: The name of the indexer to delete. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: '' '404': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Delete Indexers Indexername' get: tags: - Indexers operationId: microsoftAzureIndexersGet x-ms-examples: SearchServiceGetIndexer: $ref: ./examples/SearchServiceGetIndexer.json description: Retrieves an indexer definition. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Indexer parameters: - name: indexerName in: path required: true type: string description: The name of the indexer to retrieve. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndexer' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Get Indexers Indexername' /indexers: get: tags: - Indexers operationId: microsoftAzureIndexersList x-ms-examples: SearchServiceListIndexers: $ref: ./examples/SearchServiceListIndexers.json description: Lists all indexers available for a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/List-Indexers parameters: - name: $select in: query required: false type: string description: >- Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/ListIndexersResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Indexers post: tags: - Indexers operationId: microsoftAzureIndexersCreate x-ms-examples: SearchServiceCreateIndexer: $ref: ./examples/SearchServiceCreateIndexer.json description: Creates a new indexer. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Indexer parameters: - name: indexer in: body required: true schema: $ref: '#/definitions/SearchIndexer' description: The definition of the indexer to create. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '201': description: '' schema: $ref: '#/definitions/SearchIndexer' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Indexers /indexers('{indexerName}')/search.status: get: tags: - Indexers operationId: microsoftAzureIndexersGetstatus x-ms-examples: SearchServiceGetIndexerStatus: $ref: ./examples/SearchServiceGetIndexerStatus.json description: Returns the current status and execution history of an indexer. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Indexer-Status parameters: - name: indexerName in: path required: true type: string description: The name of the indexer for which to retrieve status. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndexerStatus' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Indexers Indexername Search Status /skillsets('{skillsetName}'): put: tags: - Skillsets operationId: microsoftAzureSkillsetsCreateorupdate x-ms-examples: SearchServiceCreateOrUpdateSkillset: $ref: ./examples/SearchServiceCreateOrUpdateSkillset.json description: >- Creates a new skillset in a search service or updates the skillset if it already exists. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/update-skillset parameters: - name: skillsetName in: path required: true type: string description: The name of the skillset to create or update. - name: skillset in: body required: true schema: $ref: '#/definitions/SearchIndexerSkillset' description: >- The skillset containing one or more skills to create or update in a search service. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/PreferHeaderParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: The skillset is successfully updated. schema: $ref: '#/definitions/SearchIndexerSkillset' '201': description: The skillset is successfully created. schema: $ref: '#/definitions/SearchIndexerSkillset' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Put Skillsets Skillsetname' delete: tags: - Skillsets operationId: microsoftAzureSkillsetsDelete x-ms-examples: SearchServiceDeleteSkillset: $ref: ./examples/SearchServiceDeleteSkillset.json description: Deletes a skillset in a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/delete-skillset parameters: - name: skillsetName in: path required: true type: string description: The name of the skillset to delete. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: The skillset is successfully deleted. '404': description: The provided skillset name is not found. default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Delete Skillsets Skillsetname' get: tags: - Skillsets operationId: microsoftAzureSkillsetsGet x-ms-examples: SearchServiceGetSkillset: $ref: ./examples/SearchServiceGetSkillset.json description: Retrieves a skillset in a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/get-skillset parameters: - name: skillsetName in: path required: true type: string description: The name of the skillset to retrieve. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: The skillset is successfully returned. schema: $ref: '#/definitions/SearchIndexerSkillset' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Get Skillsets Skillsetname' /skillsets: get: tags: - Skillsets operationId: microsoftAzureSkillsetsList x-ms-examples: SearchServiceListSkillsets: $ref: ./examples/SearchServiceListSkillsets.json description: List all skillsets in a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/list-skillset parameters: - name: $select in: query required: false type: string description: >- Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: The list is successfully returned. schema: $ref: '#/definitions/ListSkillsetsResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Skillsets post: tags: - Skillsets operationId: microsoftAzureSkillsetsCreate x-ms-examples: SearchServiceCreateSkillset: $ref: ./examples/SearchServiceCreateSkillset.json description: Creates a new skillset in a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/create-skillset parameters: - name: skillset in: body required: true schema: $ref: '#/definitions/SearchIndexerSkillset' description: >- The skillset containing one or more skills to create in a search service. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '201': description: The skillset is successfully created. schema: $ref: '#/definitions/SearchIndexerSkillset' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Skillsets /synonymmaps('{synonymMapName}'): put: tags: - SynonymMaps operationId: microsoftAzureSynonymmapsCreateorupdate x-ms-examples: SearchServiceCreateOrUpdateSynonymMap: $ref: ./examples/SearchServiceCreateOrUpdateSynonymMap.json description: Creates a new synonym map or updates a synonym map if it already exists. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Update-Synonym-Map parameters: - name: synonymMapName in: path required: true type: string description: The name of the synonym map to create or update. - name: synonymMap in: body required: true schema: $ref: '#/definitions/SynonymMap' description: The definition of the synonym map to create or update. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/PreferHeaderParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SynonymMap' '201': description: '' schema: $ref: '#/definitions/SynonymMap' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Put Synonymmaps Synonymmapname' delete: tags: - SynonymMaps operationId: microsoftAzureSynonymmapsDelete x-ms-examples: SearchServiceDeleteSynonymMap: $ref: ./examples/SearchServiceDeleteSynonymMap.json description: Deletes a synonym map. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Delete-Synonym-Map parameters: - name: synonymMapName in: path required: true type: string description: The name of the synonym map to delete. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: '' '404': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Delete Synonymmaps Synonymmapname' get: tags: - SynonymMaps operationId: microsoftAzureSynonymmapsGet x-ms-examples: SearchServiceGetSynonymMap: $ref: ./examples/SearchServiceGetSynonymMap.json description: Retrieves a synonym map definition. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Synonym-Map parameters: - name: synonymMapName in: path required: true type: string description: The name of the synonym map to retrieve. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SynonymMap' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Get Synonymmaps Synonymmapname' /synonymmaps: get: tags: - SynonymMaps operationId: microsoftAzureSynonymmapsList x-ms-examples: SearchServiceListSynonymMaps: $ref: ./examples/SearchServiceListSynonymMaps.json description: Lists all synonym maps available for a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/List-Synonym-Maps parameters: - name: $select in: query required: false type: string description: >- Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/ListSynonymMapsResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Synonymmaps post: tags: - SynonymMaps operationId: microsoftAzureSynonymmapsCreate x-ms-examples: SearchServiceCreateSynonymMap: $ref: ./examples/SearchServiceCreateSynonymMap.json description: Creates a new synonym map. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Synonym-Map parameters: - name: synonymMap in: body required: true schema: $ref: '#/definitions/SynonymMap' description: The definition of the synonym map to create. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '201': description: '' schema: $ref: '#/definitions/SynonymMap' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Synonymmaps /indexes: post: tags: - Indexes operationId: microsoftAzureIndexesCreate x-ms-examples: SearchServiceCreateIndex: $ref: ./examples/SearchServiceCreateIndex.json description: Creates a new search index. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Index parameters: - name: index in: body required: true schema: $ref: '#/definitions/SearchIndex' description: The definition of the index to create. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '201': description: '' schema: $ref: '#/definitions/SearchIndex' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Indexes get: tags: - Indexes operationId: microsoftAzureIndexesList x-ms-examples: SearchServiceListIndexes: $ref: ./examples/SearchServiceListIndexes.json description: Lists all indexes available for a search service. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/List-Indexes parameters: - name: $select in: query required: false type: string description: >- Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/ListIndexesResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' x-ms-pageable: nextLinkName: summary: Microsoft Azure Get Indexes /indexes('{indexName}'): put: tags: - Indexes operationId: microsoftAzureIndexesCreateorupdate x-ms-examples: SearchServiceCreateOrUpdateIndex: $ref: ./examples/SearchServiceCreateOrUpdateIndex.json description: Creates a new search index or updates an index if it already exists. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Update-Index parameters: - name: indexName in: path required: true type: string description: The definition of the index to create or update. - name: index in: body required: true schema: $ref: '#/definitions/SearchIndex' description: The definition of the index to create or update. - name: allowIndexDowntime in: query required: false type: boolean description: >- Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/PreferHeaderParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndex' '201': description: '' schema: $ref: '#/definitions/SearchIndex' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Put Indexes Indexname' delete: tags: - Indexes operationId: microsoftAzureIndexesDelete x-ms-examples: SearchServiceDeleteIndex: $ref: ./examples/SearchServiceDeleteIndex.json description: >- Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Delete-Index parameters: - name: indexName in: path required: true type: string description: The name of the index to delete. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/IfMatchParameter' - $ref: '#/parameters/IfNoneMatchParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '204': description: '' '404': description: '' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Delete Indexes Indexname' get: tags: - Indexes operationId: microsoftAzureIndexesGet x-ms-examples: SearchServiceGetIndex: $ref: ./examples/SearchServiceGetIndex.json description: Retrieves an index definition. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Index parameters: - name: indexName in: path required: true type: string description: The name of the index to retrieve. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/SearchIndex' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: 'Microsoft Azure Get Indexes Indexname' /indexes('{indexName}')/search.stats: get: tags: - Indexes operationId: microsoftAzureIndexesGetstatistics x-ms-examples: SearchServiceGetIndexStatistics: $ref: ./examples/SearchServiceGetIndexStatistics.json description: >- Returns statistics for the given index, including a document count and storage usage. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Get-Index-Statistics parameters: - name: indexName in: path required: true type: string description: The name of the index for which to retrieve statistics. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/GetIndexStatisticsResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Indexes Indexname Search Stats /indexes('{indexName}')/search.analyze: post: tags: - Indexes operationId: microsoftAzureIndexesAnalyze x-ms-examples: SearchServiceIndexAnalyze: $ref: ./examples/SearchServiceIndexAnalyze.json description: Shows how an analyzer breaks text into tokens. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/test-analyzer parameters: - name: indexName in: path required: true type: string description: The name of the index for which to test an analyzer. - name: request in: body required: true schema: $ref: '#/definitions/AnalyzeRequest' description: The text and analyzer or analysis components to test. - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/AnalyzeResult' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Post Indexes Indexname Search Analyze /servicestats: get: tags: [] operationId: microsoftAzureGetservicestatistics x-ms-examples: SearchServiceGetServiceStatistics: $ref: ./examples/SearchServiceGetServiceStatistics.json description: Gets service level statistics for a search service. parameters: - $ref: '#/parameters/ClientRequestIdParameter' - $ref: '#/parameters/ApiVersionParameter' x-ms-request-id: request-id responses: '200': description: '' schema: $ref: '#/definitions/ServiceStatistics' default: description: Error response. schema: $ref: '#/definitions/SearchError' summary: Microsoft Azure Get Servicestats definitions: AnalyzeRequest: properties: text: type: string description: The text to break into tokens. analyzer: $ref: '#/definitions/LexicalAnalyzerName' description: >- The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. tokenizer: $ref: '#/definitions/LexicalTokenizerName' description: >- The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. tokenFilters: type: array items: $ref: '#/definitions/TokenFilterName' x-nullable: false description: >- An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. charFilters: type: array items: $ref: '#/definitions/CharFilterName' x-nullable: false description: >- An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. required: - text description: >- Specifies some text and analysis components used to break that text into tokens. AnalyzeResult: properties: tokens: type: array items: $ref: '#/definitions/AnalyzedTokenInfo' description: The list of tokens returned by the analyzer specified in the request. required: - tokens description: The result of testing an analyzer on text. AnalyzedTokenInfo: properties: token: type: string readOnly: true description: The token returned by the analyzer. startOffset: type: integer format: int32 readOnly: true x-nullable: false description: The index of the first character of the token in the input text. endOffset: type: integer format: int32 readOnly: true x-nullable: false description: The index of the last character of the token in the input text. position: type: integer format: int32 readOnly: true x-nullable: false description: >- The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. required: - token - startOffset - endOffset - position description: Information about a token returned by an analyzer. LexicalAnalyzerName: type: string enum: - ar.microsoft - ar.lucene - hy.lucene - bn.microsoft - eu.lucene - bg.microsoft - bg.lucene - ca.microsoft - ca.lucene - zh-Hans.microsoft - zh-Hans.lucene - zh-Hant.microsoft - zh-Hant.lucene - hr.microsoft - cs.microsoft - cs.lucene - da.microsoft - da.lucene - nl.microsoft - nl.lucene - en.microsoft - en.lucene - et.microsoft - fi.microsoft - fi.lucene - fr.microsoft - fr.lucene - gl.lucene - de.microsoft - de.lucene - el.microsoft - el.lucene - gu.microsoft - he.microsoft - hi.microsoft - hi.lucene - hu.microsoft - hu.lucene - is.microsoft - id.microsoft - id.lucene - ga.lucene - it.microsoft - it.lucene - ja.microsoft - ja.lucene - kn.microsoft - ko.microsoft - ko.lucene - lv.microsoft - lv.lucene - lt.microsoft - ml.microsoft - ms.microsoft - mr.microsoft - nb.microsoft - no.lucene - fa.lucene - pl.microsoft - pl.lucene - pt-BR.microsoft - pt-BR.lucene - pt-PT.microsoft - pt-PT.lucene - pa.microsoft - ro.microsoft - ro.lucene - ru.microsoft - ru.lucene - sr-cyrillic.microsoft - sr-latin.microsoft - sk.microsoft - sl.microsoft - es.microsoft - es.lucene - sv.microsoft - sv.lucene - ta.microsoft - te.microsoft - th.microsoft - th.lucene - tr.microsoft - tr.lucene - uk.microsoft - ur.microsoft - vi.microsoft - standard.lucene - standardasciifolding.lucene - keyword - pattern - simple - stop - whitespace x-ms-enum: name: LexicalAnalyzerName modelAsString: true values: - value: ar.microsoft name: ArMicrosoft description: Microsoft analyzer for Arabic. - value: ar.lucene name: ArLucene description: Lucene analyzer for Arabic. - value: hy.lucene name: HyLucene description: Lucene analyzer for Armenian. - value: bn.microsoft name: BnMicrosoft description: Microsoft analyzer for Bangla. - value: eu.lucene name: EuLucene description: Lucene analyzer for Basque. - value: bg.microsoft name: BgMicrosoft description: Microsoft analyzer for Bulgarian. - value: bg.lucene name: BgLucene description: Lucene analyzer for Bulgarian. - value: ca.microsoft name: CaMicrosoft description: Microsoft analyzer for Catalan. - value: ca.lucene name: CaLucene description: Lucene analyzer for Catalan. - value: zh-Hans.microsoft name: ZhHansMicrosoft description: Microsoft analyzer for Chinese (Simplified). - value: zh-Hans.lucene name: ZhHansLucene description: Lucene analyzer for Chinese (Simplified). - value: zh-Hant.microsoft name: ZhHantMicrosoft description: Microsoft analyzer for Chinese (Traditional). - value: zh-Hant.lucene name: ZhHantLucene description: Lucene analyzer for Chinese (Traditional). - value: hr.microsoft name: HrMicrosoft description: Microsoft analyzer for Croatian. - value: cs.microsoft name: CsMicrosoft description: Microsoft analyzer for Czech. - value: cs.lucene name: CsLucene description: Lucene analyzer for Czech. - value: da.microsoft name: DaMicrosoft description: Microsoft analyzer for Danish. - value: da.lucene name: DaLucene description: Lucene analyzer for Danish. - value: nl.microsoft name: NlMicrosoft description: Microsoft analyzer for Dutch. - value: nl.lucene name: NlLucene description: Lucene analyzer for Dutch. - value: en.microsoft name: EnMicrosoft description: Microsoft analyzer for English. - value: en.lucene name: EnLucene description: Lucene analyzer for English. - value: et.microsoft name: EtMicrosoft description: Microsoft analyzer for Estonian. - value: fi.microsoft name: FiMicrosoft description: Microsoft analyzer for Finnish. - value: fi.lucene name: FiLucene description: Lucene analyzer for Finnish. - value: fr.microsoft name: FrMicrosoft description: Microsoft analyzer for French. - value: fr.lucene name: FrLucene description: Lucene analyzer for French. - value: gl.lucene name: GlLucene description: Lucene analyzer for Galician. - value: de.microsoft name: DeMicrosoft description: Microsoft analyzer for German. - value: de.lucene name: DeLucene description: Lucene analyzer for German. - value: el.microsoft name: ElMicrosoft description: Microsoft analyzer for Greek. - value: el.lucene name: ElLucene description: Lucene analyzer for Greek. - value: gu.microsoft name: GuMicrosoft description: Microsoft analyzer for Gujarati. - value: he.microsoft name: HeMicrosoft description: Microsoft analyzer for Hebrew. - value: hi.microsoft name: HiMicrosoft description: Microsoft analyzer for Hindi. - value: hi.lucene name: HiLucene description: Lucene analyzer for Hindi. - value: hu.microsoft name: HuMicrosoft description: Microsoft analyzer for Hungarian. - value: hu.lucene name: HuLucene description: Lucene analyzer for Hungarian. - value: is.microsoft name: IsMicrosoft description: Microsoft analyzer for Icelandic. - value: id.microsoft name: IdMicrosoft description: Microsoft analyzer for Indonesian (Bahasa). - value: id.lucene name: IdLucene description: Lucene analyzer for Indonesian. - value: ga.lucene name: GaLucene description: Lucene analyzer for Irish. - value: it.microsoft name: ItMicrosoft description: Microsoft analyzer for Italian. - value: it.lucene name: ItLucene description: Lucene analyzer for Italian. - value: ja.microsoft name: JaMicrosoft description: Microsoft analyzer for Japanese. - value: ja.lucene name: JaLucene description: Lucene analyzer for Japanese. - value: kn.microsoft name: KnMicrosoft description: Microsoft analyzer for Kannada. - value: ko.microsoft name: KoMicrosoft description: Microsoft analyzer for Korean. - value: ko.lucene name: KoLucene description: Lucene analyzer for Korean. - value: lv.microsoft name: LvMicrosoft description: Microsoft analyzer for Latvian. - value: lv.lucene name: LvLucene description: Lucene analyzer for Latvian. - value: lt.microsoft name: LtMicrosoft description: Microsoft analyzer for Lithuanian. - value: ml.microsoft name: MlMicrosoft description: Microsoft analyzer for Malayalam. - value: ms.microsoft name: MsMicrosoft description: Microsoft analyzer for Malay (Latin). - value: mr.microsoft name: MrMicrosoft description: Microsoft analyzer for Marathi. - value: nb.microsoft name: NbMicrosoft description: Microsoft analyzer for Norwegian (Bokmål). - value: no.lucene name: NoLucene description: Lucene analyzer for Norwegian. - value: fa.lucene name: FaLucene description: Lucene analyzer for Persian. - value: pl.microsoft name: PlMicrosoft description: Microsoft analyzer for Polish. - value: pl.lucene name: PlLucene description: Lucene analyzer for Polish. - value: pt-BR.microsoft name: PtBrMicrosoft description: Microsoft analyzer for Portuguese (Brazil). - value: pt-BR.lucene name: PtBrLucene description: Lucene analyzer for Portuguese (Brazil). - value: pt-PT.microsoft name: PtPtMicrosoft description: Microsoft analyzer for Portuguese (Portugal). - value: pt-PT.lucene name: PtPtLucene description: Lucene analyzer for Portuguese (Portugal). - value: pa.microsoft name: PaMicrosoft description: Microsoft analyzer for Punjabi. - value: ro.microsoft name: RoMicrosoft description: Microsoft analyzer for Romanian. - value: ro.lucene name: RoLucene description: Lucene analyzer for Romanian. - value: ru.microsoft name: RuMicrosoft description: Microsoft analyzer for Russian. - value: ru.lucene name: RuLucene description: Lucene analyzer for Russian. - value: sr-cyrillic.microsoft name: SrCyrillicMicrosoft description: Microsoft analyzer for Serbian (Cyrillic). - value: sr-latin.microsoft name: SrLatinMicrosoft description: Microsoft analyzer for Serbian (Latin). - value: sk.microsoft name: SkMicrosoft description: Microsoft analyzer for Slovak. - value: sl.microsoft name: SlMicrosoft description: Microsoft analyzer for Slovenian. - value: es.microsoft name: EsMicrosoft description: Microsoft analyzer for Spanish. - value: es.lucene name: EsLucene description: Lucene analyzer for Spanish. - value: sv.microsoft name: SvMicrosoft description: Microsoft analyzer for Swedish. - value: sv.lucene name: SvLucene description: Lucene analyzer for Swedish. - value: ta.microsoft name: TaMicrosoft description: Microsoft analyzer for Tamil. - value: te.microsoft name: TeMicrosoft description: Microsoft analyzer for Telugu. - value: th.microsoft name: ThMicrosoft description: Microsoft analyzer for Thai. - value: th.lucene name: ThLucene description: Lucene analyzer for Thai. - value: tr.microsoft name: TrMicrosoft description: Microsoft analyzer for Turkish. - value: tr.lucene name: TrLucene description: Lucene analyzer for Turkish. - value: uk.microsoft name: UkMicrosoft description: Microsoft analyzer for Ukrainian. - value: ur.microsoft name: UrMicrosoft description: Microsoft analyzer for Urdu. - value: vi.microsoft name: ViMicrosoft description: Microsoft analyzer for Vietnamese. - value: standard.lucene name: StandardLucene description: Standard Lucene analyzer. - value: standardasciifolding.lucene name: StandardAsciiFoldingLucene description: >- Standard ASCII Folding Lucene analyzer. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers - value: keyword name: Keyword description: >- Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html - value: pattern name: Pattern description: >- Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html - value: simple name: Simple description: >- Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html - value: stop name: Stop description: >- Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html - value: whitespace name: Whitespace description: >- An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html description: >- Defines the names of all text analyzers supported by Azure Cognitive Search. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Language-support LexicalTokenizerName: type: string enum: - classic - edgeNGram - keyword_v2 - letter - lowercase - microsoft_language_tokenizer - microsoft_language_stemming_tokenizer - nGram - path_hierarchy_v2 - pattern - standard_v2 - uax_url_email - whitespace x-ms-enum: name: LexicalTokenizerName modelAsString: true values: - value: classic name: Classic description: >- Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html - value: edgeNGram name: EdgeNGram description: >- Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html - value: keyword_v2 name: Keyword description: >- Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html - value: letter name: Letter description: >- Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html - value: lowercase name: Lowercase description: >- Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html - value: microsoft_language_tokenizer name: MicrosoftLanguageTokenizer description: Divides text using language-specific rules. - value: microsoft_language_stemming_tokenizer name: MicrosoftLanguageStemmingTokenizer description: >- Divides text using language-specific rules and reduces words to their base forms. - value: nGram name: NGram description: >- Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html - value: path_hierarchy_v2 name: PathHierarchy description: >- Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html - value: pattern name: Pattern description: >- Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html - value: standard_v2 name: Standard description: >- Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html - value: uax_url_email name: UaxUrlEmail description: >- Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html - value: whitespace name: Whitespace description: >- Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html description: Defines the names of all tokenizers supported by Azure Cognitive Search. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search TokenFilterName: type: string enum: - arabic_normalization - apostrophe - asciifolding - cjk_bigram - cjk_width - classic - common_grams - edgeNGram_v2 - elision - german_normalization - hindi_normalization - indic_normalization - keyword_repeat - kstem - length - limit - lowercase - nGram_v2 - persian_normalization - phonetic - porter_stem - reverse - scandinavian_normalization - scandinavian_folding - shingle - snowball - sorani_normalization - stemmer - stopwords - trim - truncate - unique - uppercase - word_delimiter x-ms-enum: name: TokenFilterName modelAsString: true values: - value: arabic_normalization name: ArabicNormalization description: >- A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html - value: apostrophe name: Apostrophe description: >- Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html - value: asciifolding name: AsciiFolding description: >- Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html - value: cjk_bigram name: CjkBigram description: >- Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html - value: cjk_width name: CjkWidth description: >- Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html - value: classic name: Classic description: >- Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html - value: common_grams name: CommonGram description: >- Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html - value: edgeNGram_v2 name: EdgeNGram description: >- Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html - value: elision name: Elision description: >- Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html - value: german_normalization name: GermanNormalization description: >- Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html - value: hindi_normalization name: HindiNormalization description: >- Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html - value: indic_normalization name: IndicNormalization description: >- Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html - value: keyword_repeat name: KeywordRepeat description: >- Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html - value: kstem name: KStem description: >- A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html - value: length name: Length description: >- Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html - value: limit name: Limit description: >- Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html - value: lowercase name: Lowercase description: >- Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm - value: nGram_v2 name: NGram description: >- Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html - value: persian_normalization name: PersianNormalization description: >- Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html - value: phonetic name: Phonetic description: >- Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html - value: porter_stem name: PorterStem description: >- Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer - value: reverse name: Reverse description: >- Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html - value: scandinavian_normalization name: ScandinavianNormalization description: >- Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html - value: scandinavian_folding name: ScandinavianFoldingNormalization description: >- Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html - value: shingle name: Shingle description: >- Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html - value: snowball name: Snowball description: >- A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html - value: sorani_normalization name: SoraniNormalization description: >- Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html - value: stemmer name: Stemmer description: >- Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters - value: stopwords name: Stopwords description: >- Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html - value: trim name: Trim description: >- Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html - value: truncate name: Truncate description: >- Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html - value: unique name: Unique description: >- Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html - value: uppercase name: Uppercase description: >- Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html - value: word_delimiter name: WordDelimiter description: >- Splits words into subwords and performs optional transformations on subword groups. description: >- Defines the names of all token filters supported by Azure Cognitive Search. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search CharFilterName: type: string enum: - html_strip x-ms-enum: name: CharFilterName modelAsString: true values: - value: html_strip name: HtmlStrip description: >- A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html description: >- Defines the names of all character filters supported by Azure Cognitive Search. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search RegexFlags: type: string enum: - CANON_EQ - CASE_INSENSITIVE - COMMENTS - DOTALL - LITERAL - MULTILINE - UNICODE_CASE - UNIX_LINES x-ms-enum: name: RegexFlags modelAsString: true values: - value: CANON_EQ name: CanonEq description: Enables canonical equivalence. - value: CASE_INSENSITIVE name: CaseInsensitive description: Enables case-insensitive matching. - value: COMMENTS name: Comments description: Permits whitespace and comments in the pattern. - value: DOTALL name: DotAll description: Enables dotall mode. - value: LITERAL name: Literal description: Enables literal parsing of the pattern. - value: MULTILINE name: Multiline description: Enables multiline mode. - value: UNICODE_CASE name: UnicodeCase description: Enables Unicode-aware case folding. - value: UNIX_LINES name: UnixLines description: Enables Unix lines mode. description: >- Defines flags that can be combined to control how regular expressions are used in the pattern analyzer and pattern tokenizer. externalDocs: url: >- http://docs.oracle.com/javase/6/docs/api/java/util/regex/Pattern.html#field_summary SearchFieldDataType: type: string enum: - Edm.String - Edm.Int32 - Edm.Int64 - Edm.Double - Edm.Boolean - Edm.DateTimeOffset - Edm.GeographyPoint - Edm.ComplexType x-ms-enum: name: SearchFieldDataType modelAsString: true values: - value: Edm.String name: String description: Indicates that a field contains a string. - value: Edm.Int32 name: Int32 description: Indicates that a field contains a 32-bit signed integer. - value: Edm.Int64 name: Int64 description: Indicates that a field contains a 64-bit signed integer. - value: Edm.Double name: Double description: >- Indicates that a field contains an IEEE double-precision floating point number. - value: Edm.Boolean name: Boolean description: Indicates that a field contains a Boolean value (true or false). - value: Edm.DateTimeOffset name: DateTimeOffset description: >- Indicates that a field contains a date/time value, including timezone information. - value: Edm.GeographyPoint name: GeographyPoint description: >- Indicates that a field contains a geo-location in terms of longitude and latitude. - value: Edm.ComplexType name: Complex description: >- Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. description: Defines the data type of a field in a search index. LexicalAnalyzer: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the analyzer. name: type: string externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/custom-analyzers-in-azure-search#index-attribute-reference description: >- The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. required: - '@odata.type' - name description: Base type for analyzers. CustomAnalyzer: x-ms-discriminator-value: '#Microsoft.Azure.Search.CustomAnalyzer' allOf: - $ref: '#/definitions/LexicalAnalyzer' properties: tokenizer: $ref: '#/definitions/LexicalTokenizerName' description: >- The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. tokenFilters: type: array items: $ref: '#/definitions/TokenFilterName' x-nullable: false description: >- A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. charFilters: type: array items: $ref: '#/definitions/CharFilterName' x-nullable: false description: >- A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. required: - tokenizer description: >- Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. PatternAnalyzer: x-ms-discriminator-value: '#Microsoft.Azure.Search.PatternAnalyzer' allOf: - $ref: '#/definitions/LexicalAnalyzer' properties: lowercase: x-ms-client-name: LowerCaseTerms type: boolean default: true description: >- A value indicating whether terms should be lower-cased. Default is true. pattern: type: string default: \W+ description: >- A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. flags: $ref: '#/definitions/RegexFlags' description: Regular expression flags. stopwords: type: array items: type: string description: A list of stopwords. description: >- Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html LuceneStandardAnalyzer: x-ms-discriminator-value: '#Microsoft.Azure.Search.StandardAnalyzer' allOf: - $ref: '#/definitions/LexicalAnalyzer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. stopwords: type: array items: type: string description: A list of stopwords. description: >- Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardAnalyzer.html StopAnalyzer: x-ms-discriminator-value: '#Microsoft.Azure.Search.StopAnalyzer' allOf: - $ref: '#/definitions/LexicalAnalyzer' properties: stopwords: type: array items: type: string description: A list of stopwords. description: >- Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html LexicalTokenizer: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the tokenizer. name: type: string externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/custom-analyzers-in-azure-search#index-attribute-reference description: >- The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. required: - '@odata.type' - name description: Base type for tokenizers. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search ClassicTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.ClassicTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. description: >- Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html TokenCharacterKind: type: string enum: - letter - digit - whitespace - punctuation - symbol x-ms-enum: name: TokenCharacterKind modelAsString: false values: - value: letter name: Letter description: Keeps letters in tokens. - value: digit name: Digit description: Keeps digits in tokens. - value: whitespace name: Whitespace description: Keeps whitespace in tokens. - value: punctuation name: Punctuation description: Keeps punctuation in tokens. - value: symbol name: Symbol description: Keeps symbols in tokens. description: Represents classes of characters on which a token filter can operate. EdgeNGramTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.EdgeNGramTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: minGram: type: integer format: int32 default: 1 maximum: 300 description: >- The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 maximum: 300 description: The maximum n-gram length. Default is 2. Maximum is 300. tokenChars: type: array items: $ref: '#/definitions/TokenCharacterKind' x-nullable: false description: Character classes to keep in the tokens. description: >- Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. externalDocs: url: >- https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html KeywordTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.KeywordTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: bufferSize: type: integer format: int32 default: 256 description: The read buffer size in bytes. Default is 256. description: >- Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html x-az-search-deprecated: true KeywordTokenizerV2: x-ms-discriminator-value: '#Microsoft.Azure.Search.KeywordTokenizerV2' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 256 maximum: 300 description: >- The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. description: >- Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html MicrosoftTokenizerLanguage: type: string enum: - bangla - bulgarian - catalan - chineseSimplified - chineseTraditional - croatian - czech - danish - dutch - english - french - german - greek - gujarati - hindi - icelandic - indonesian - italian - japanese - kannada - korean - malay - malayalam - marathi - norwegianBokmaal - polish - portuguese - portugueseBrazilian - punjabi - romanian - russian - serbianCyrillic - serbianLatin - slovenian - spanish - swedish - tamil - telugu - thai - ukrainian - urdu - vietnamese x-ms-enum: name: MicrosoftTokenizerLanguage modelAsString: false values: - value: bangla name: Bangla description: Selects the Microsoft tokenizer for Bangla. - value: bulgarian name: Bulgarian description: Selects the Microsoft tokenizer for Bulgarian. - value: catalan name: Catalan description: Selects the Microsoft tokenizer for Catalan. - value: chineseSimplified name: ChineseSimplified description: Selects the Microsoft tokenizer for Chinese (Simplified). - value: chineseTraditional name: ChineseTraditional description: Selects the Microsoft tokenizer for Chinese (Traditional). - value: croatian name: Croatian description: Selects the Microsoft tokenizer for Croatian. - value: czech name: Czech description: Selects the Microsoft tokenizer for Czech. - value: danish name: Danish description: Selects the Microsoft tokenizer for Danish. - value: dutch name: Dutch description: Selects the Microsoft tokenizer for Dutch. - value: english name: English description: Selects the Microsoft tokenizer for English. - value: french name: French description: Selects the Microsoft tokenizer for French. - value: german name: German description: Selects the Microsoft tokenizer for German. - value: greek name: Greek description: Selects the Microsoft tokenizer for Greek. - value: gujarati name: Gujarati description: Selects the Microsoft tokenizer for Gujarati. - value: hindi name: Hindi description: Selects the Microsoft tokenizer for Hindi. - value: icelandic name: Icelandic description: Selects the Microsoft tokenizer for Icelandic. - value: indonesian name: Indonesian description: Selects the Microsoft tokenizer for Indonesian. - value: italian name: Italian description: Selects the Microsoft tokenizer for Italian. - value: japanese name: Japanese description: Selects the Microsoft tokenizer for Japanese. - value: kannada name: Kannada description: Selects the Microsoft tokenizer for Kannada. - value: korean name: Korean description: Selects the Microsoft tokenizer for Korean. - value: malay name: Malay description: Selects the Microsoft tokenizer for Malay. - value: malayalam name: Malayalam description: Selects the Microsoft tokenizer for Malayalam. - value: marathi name: Marathi description: Selects the Microsoft tokenizer for Marathi. - value: norwegianBokmaal name: NorwegianBokmaal description: Selects the Microsoft tokenizer for Norwegian (Bokmål). - value: polish name: Polish description: Selects the Microsoft tokenizer for Polish. - value: portuguese name: Portuguese description: Selects the Microsoft tokenizer for Portuguese. - value: portugueseBrazilian name: PortugueseBrazilian description: Selects the Microsoft tokenizer for Portuguese (Brazil). - value: punjabi name: Punjabi description: Selects the Microsoft tokenizer for Punjabi. - value: romanian name: Romanian description: Selects the Microsoft tokenizer for Romanian. - value: russian name: Russian description: Selects the Microsoft tokenizer for Russian. - value: serbianCyrillic name: SerbianCyrillic description: Selects the Microsoft tokenizer for Serbian (Cyrillic). - value: serbianLatin name: SerbianLatin description: Selects the Microsoft tokenizer for Serbian (Latin). - value: slovenian name: Slovenian description: Selects the Microsoft tokenizer for Slovenian. - value: spanish name: Spanish description: Selects the Microsoft tokenizer for Spanish. - value: swedish name: Swedish description: Selects the Microsoft tokenizer for Swedish. - value: tamil name: Tamil description: Selects the Microsoft tokenizer for Tamil. - value: telugu name: Telugu description: Selects the Microsoft tokenizer for Telugu. - value: thai name: Thai description: Selects the Microsoft tokenizer for Thai. - value: ukrainian name: Ukrainian description: Selects the Microsoft tokenizer for Ukrainian. - value: urdu name: Urdu description: Selects the Microsoft tokenizer for Urdu. - value: vietnamese name: Vietnamese description: Selects the Microsoft tokenizer for Vietnamese. description: Lists the languages supported by the Microsoft language tokenizer. MicrosoftLanguageTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. isSearchTokenizer: type: boolean default: false description: >- A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. language: $ref: '#/definitions/MicrosoftTokenizerLanguage' description: The language to use. The default is English. description: Divides text using language-specific rules. MicrosoftStemmingTokenizerLanguage: type: string enum: - arabic - bangla - bulgarian - catalan - croatian - czech - danish - dutch - english - estonian - finnish - french - german - greek - gujarati - hebrew - hindi - hungarian - icelandic - indonesian - italian - kannada - latvian - lithuanian - malay - malayalam - marathi - norwegianBokmaal - polish - portuguese - portugueseBrazilian - punjabi - romanian - russian - serbianCyrillic - serbianLatin - slovak - slovenian - spanish - swedish - tamil - telugu - turkish - ukrainian - urdu x-ms-enum: name: MicrosoftStemmingTokenizerLanguage modelAsString: false values: - value: arabic name: Arabic description: Selects the Microsoft stemming tokenizer for Arabic. - value: bangla name: Bangla description: Selects the Microsoft stemming tokenizer for Bangla. - value: bulgarian name: Bulgarian description: Selects the Microsoft stemming tokenizer for Bulgarian. - value: catalan name: Catalan description: Selects the Microsoft stemming tokenizer for Catalan. - value: croatian name: Croatian description: Selects the Microsoft stemming tokenizer for Croatian. - value: czech name: Czech description: Selects the Microsoft stemming tokenizer for Czech. - value: danish name: Danish description: Selects the Microsoft stemming tokenizer for Danish. - value: dutch name: Dutch description: Selects the Microsoft stemming tokenizer for Dutch. - value: english name: English description: Selects the Microsoft stemming tokenizer for English. - value: estonian name: Estonian description: Selects the Microsoft stemming tokenizer for Estonian. - value: finnish name: Finnish description: Selects the Microsoft stemming tokenizer for Finnish. - value: french name: French description: Selects the Microsoft stemming tokenizer for French. - value: german name: German description: Selects the Microsoft stemming tokenizer for German. - value: greek name: Greek description: Selects the Microsoft stemming tokenizer for Greek. - value: gujarati name: Gujarati description: Selects the Microsoft stemming tokenizer for Gujarati. - value: hebrew name: Hebrew description: Selects the Microsoft stemming tokenizer for Hebrew. - value: hindi name: Hindi description: Selects the Microsoft stemming tokenizer for Hindi. - value: hungarian name: Hungarian description: Selects the Microsoft stemming tokenizer for Hungarian. - value: icelandic name: Icelandic description: Selects the Microsoft stemming tokenizer for Icelandic. - value: indonesian name: Indonesian description: Selects the Microsoft stemming tokenizer for Indonesian. - value: italian name: Italian description: Selects the Microsoft stemming tokenizer for Italian. - value: kannada name: Kannada description: Selects the Microsoft stemming tokenizer for Kannada. - value: latvian name: Latvian description: Selects the Microsoft stemming tokenizer for Latvian. - value: lithuanian name: Lithuanian description: Selects the Microsoft stemming tokenizer for Lithuanian. - value: malay name: Malay description: Selects the Microsoft stemming tokenizer for Malay. - value: malayalam name: Malayalam description: Selects the Microsoft stemming tokenizer for Malayalam. - value: marathi name: Marathi description: Selects the Microsoft stemming tokenizer for Marathi. - value: norwegianBokmaal name: NorwegianBokmaal description: Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). - value: polish name: Polish description: Selects the Microsoft stemming tokenizer for Polish. - value: portuguese name: Portuguese description: Selects the Microsoft stemming tokenizer for Portuguese. - value: portugueseBrazilian name: PortugueseBrazilian description: Selects the Microsoft stemming tokenizer for Portuguese (Brazil). - value: punjabi name: Punjabi description: Selects the Microsoft stemming tokenizer for Punjabi. - value: romanian name: Romanian description: Selects the Microsoft stemming tokenizer for Romanian. - value: russian name: Russian description: Selects the Microsoft stemming tokenizer for Russian. - value: serbianCyrillic name: SerbianCyrillic description: Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). - value: serbianLatin name: SerbianLatin description: Selects the Microsoft stemming tokenizer for Serbian (Latin). - value: slovak name: Slovak description: Selects the Microsoft stemming tokenizer for Slovak. - value: slovenian name: Slovenian description: Selects the Microsoft stemming tokenizer for Slovenian. - value: spanish name: Spanish description: Selects the Microsoft stemming tokenizer for Spanish. - value: swedish name: Swedish description: Selects the Microsoft stemming tokenizer for Swedish. - value: tamil name: Tamil description: Selects the Microsoft stemming tokenizer for Tamil. - value: telugu name: Telugu description: Selects the Microsoft stemming tokenizer for Telugu. - value: turkish name: Turkish description: Selects the Microsoft stemming tokenizer for Turkish. - value: ukrainian name: Ukrainian description: Selects the Microsoft stemming tokenizer for Ukrainian. - value: urdu name: Urdu description: Selects the Microsoft stemming tokenizer for Urdu. description: >- Lists the languages supported by the Microsoft language stemming tokenizer. MicrosoftLanguageStemmingTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. isSearchTokenizer: type: boolean default: false description: >- A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. language: $ref: '#/definitions/MicrosoftStemmingTokenizerLanguage' description: The language to use. The default is English. description: >- Divides text using language-specific rules and reduces words to their base forms. NGramTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.NGramTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: minGram: type: integer format: int32 default: 1 maximum: 300 description: >- The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 maximum: 300 description: The maximum n-gram length. Default is 2. Maximum is 300. tokenChars: type: array items: $ref: '#/definitions/TokenCharacterKind' x-nullable: false description: Character classes to keep in the tokens. description: >- Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html PathHierarchyTokenizerV2: x-ms-discriminator-value: '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: delimiter: type: string format: char default: / description: The delimiter character to use. Default is "/". replacement: type: string format: char default: / description: >- A value that, if set, replaces the delimiter character. Default is "/". maxTokenLength: type: integer format: int32 default: 300 maximum: 300 description: The maximum token length. Default and maximum is 300. reverse: x-ms-client-name: ReverseTokenOrder type: boolean default: false description: >- A value indicating whether to generate tokens in reverse order. Default is false. skip: x-ms-client-name: NumberOfTokensToSkip type: integer format: int32 default: 0 description: The number of initial tokens to skip. Default is 0. description: >- Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html PatternTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.PatternTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: pattern: type: string default: \W+ description: >- A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. flags: $ref: '#/definitions/RegexFlags' description: Regular expression flags. group: type: integer format: int32 default: -1 description: >- The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. description: >- Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html LuceneStandardTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.StandardTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 description: >- The maximum token length. Default is 255. Tokens longer than the maximum length are split. description: >- Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html x-az-search-deprecated: true LuceneStandardTokenizerV2: x-ms-discriminator-value: '#Microsoft.Azure.Search.StandardTokenizerV2' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. description: >- Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html UaxUrlEmailTokenizer: x-ms-discriminator-value: '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' allOf: - $ref: '#/definitions/LexicalTokenizer' properties: maxTokenLength: type: integer format: int32 default: 255 maximum: 300 description: >- The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. description: >- Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html TokenFilter: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the token filter. name: type: string externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/custom-analyzers-in-azure-search#index-attribute-reference description: >- The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. required: - '@odata.type' - name description: Base type for token filters. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search AsciiFoldingTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: preserveOriginal: type: boolean default: false description: >- A value indicating whether the original token will be kept. Default is false. description: >- Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html CjkBigramTokenFilterScripts: type: string enum: - han - hiragana - katakana - hangul x-ms-enum: name: CjkBigramTokenFilterScripts modelAsString: false values: - value: han name: Han description: Ignore Han script when forming bigrams of CJK terms. - value: hiragana name: Hiragana description: Ignore Hiragana script when forming bigrams of CJK terms. - value: katakana name: Katakana description: Ignore Katakana script when forming bigrams of CJK terms. - value: hangul name: Hangul description: Ignore Hangul script when forming bigrams of CJK terms. description: Scripts that can be ignored by CjkBigramTokenFilter. CjkBigramTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.CjkBigramTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: ignoreScripts: type: array items: $ref: '#/definitions/CjkBigramTokenFilterScripts' x-nullable: false description: The scripts to ignore. outputUnigrams: type: boolean default: false description: >- A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. description: >- Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html CommonGramTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.CommonGramTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: commonWords: type: array items: type: string description: The set of common words. ignoreCase: type: boolean default: false description: >- A value indicating whether common words matching will be case insensitive. Default is false. queryMode: x-ms-client-name: UseQueryMode type: boolean default: false description: >- A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. required: - commonWords description: >- Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html DictionaryDecompounderTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: wordList: type: array items: type: string description: The list of words to match against. minWordSize: type: integer format: int32 default: 5 maximum: 300 description: >- The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. minSubwordSize: type: integer format: int32 default: 2 maximum: 300 description: >- The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. maxSubwordSize: type: integer format: int32 default: 15 maximum: 300 description: >- The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. onlyLongestMatch: type: boolean default: false description: >- A value indicating whether to add only the longest matching subword to the output. Default is false. required: - wordList description: >- Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.html EdgeNGramTokenFilterSide: type: string enum: - front - back x-ms-enum: name: EdgeNGramTokenFilterSide modelAsString: false values: - value: front name: Front description: >- Specifies that the n-gram should be generated from the front of the input. - value: back name: Back description: >- Specifies that the n-gram should be generated from the back of the input. description: Specifies which side of the input an n-gram should be generated from. EdgeNGramTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.EdgeNGramTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: minGram: type: integer format: int32 default: 1 description: >- The minimum n-gram length. Default is 1. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 description: The maximum n-gram length. Default is 2. side: $ref: '#/definitions/EdgeNGramTokenFilterSide' default: front description: >- Specifies which side of the input the n-gram should be generated from. Default is "front". description: >- Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html x-az-search-deprecated: true EdgeNGramTokenFilterV2: x-ms-discriminator-value: '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' allOf: - $ref: '#/definitions/TokenFilter' properties: minGram: type: integer format: int32 default: 1 maximum: 300 description: >- The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 maximum: 300 description: The maximum n-gram length. Default is 2. Maximum is 300. side: $ref: '#/definitions/EdgeNGramTokenFilterSide' default: front description: >- Specifies which side of the input the n-gram should be generated from. Default is "front". description: >- Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html ElisionTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.ElisionTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: articles: type: array items: type: string description: The set of articles to remove. description: >- Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html KeepTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.KeepTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: keepWords: type: array items: type: string description: The list of words to keep. keepWordsCase: x-ms-client-name: LowerCaseKeepWords type: boolean default: false description: >- A value indicating whether to lower case all words first. Default is false. required: - keepWords description: >- A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeepWordFilter.html KeywordMarkerTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: keywords: type: array items: type: string description: A list of words to mark as keywords. ignoreCase: type: boolean default: false description: >- A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. required: - keywords description: >- Marks terms as keywords. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilter.html LengthTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.LengthTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: min: x-ms-client-name: minLength type: integer format: int32 default: 0 maximum: 300 description: >- The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. max: x-ms-client-name: maxLength type: integer format: int32 default: 300 maximum: 300 description: The maximum length in characters. Default and maximum is 300. description: >- Removes words that are too long or too short. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html LimitTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.LimitTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: maxTokenCount: type: integer format: int32 default: 1 description: The maximum number of tokens to produce. Default is 1. consumeAllTokens: type: boolean default: false description: >- A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. description: >- Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html NGramTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.NGramTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: minGram: type: integer format: int32 default: 1 description: >- The minimum n-gram length. Default is 1. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 description: The maximum n-gram length. Default is 2. description: >- Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html x-az-search-deprecated: true NGramTokenFilterV2: x-ms-discriminator-value: '#Microsoft.Azure.Search.NGramTokenFilterV2' allOf: - $ref: '#/definitions/TokenFilter' properties: minGram: type: integer format: int32 default: 1 maximum: 300 description: >- The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. maxGram: type: integer format: int32 default: 2 maximum: 300 description: The maximum n-gram length. Default is 2. Maximum is 300. description: >- Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html PatternCaptureTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.PatternCaptureTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: patterns: type: array items: type: string description: A list of patterns to match against each token. preserveOriginal: type: boolean default: true description: >- A value indicating whether to return the original token even if one of the patterns matches. Default is true. required: - patterns description: >- Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternCaptureGroupTokenFilter.html PatternReplaceTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.PatternReplaceTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: pattern: type: string description: A regular expression pattern. replacement: type: string description: The replacement text. required: - pattern - replacement description: >- A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternReplaceFilter.html PhoneticEncoder: type: string enum: - metaphone - doubleMetaphone - soundex - refinedSoundex - caverphone1 - caverphone2 - cologne - nysiis - koelnerPhonetik - haasePhonetik - beiderMorse x-ms-enum: name: PhoneticEncoder modelAsString: false values: - value: metaphone name: Metaphone description: Encodes a token into a Metaphone value. - value: doubleMetaphone name: DoubleMetaphone description: Encodes a token into a double metaphone value. - value: soundex name: Soundex description: Encodes a token into a Soundex value. - value: refinedSoundex name: RefinedSoundex description: Encodes a token into a Refined Soundex value. - value: caverphone1 name: Caverphone1 description: Encodes a token into a Caverphone 1.0 value. - value: caverphone2 name: Caverphone2 description: Encodes a token into a Caverphone 2.0 value. - value: cologne name: Cologne description: Encodes a token into a Cologne Phonetic value. - value: nysiis name: Nysiis description: Encodes a token into a NYSIIS value. - value: koelnerPhonetik name: KoelnerPhonetik description: Encodes a token using the Kölner Phonetik algorithm. - value: haasePhonetik name: HaasePhonetik description: >- Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. - value: beiderMorse name: BeiderMorse description: Encodes a token into a Beider-Morse value. description: Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. PhoneticTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.PhoneticTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: encoder: $ref: '#/definitions/PhoneticEncoder' default: metaphone description: The phonetic encoder to use. Default is "metaphone". replace: x-ms-client-name: ReplaceOriginalTokens type: boolean default: true description: >- A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. description: >- Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. externalDocs: url: >- https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html ShingleTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.ShingleTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: maxShingleSize: type: integer format: int32 default: 2 minimum: 2 description: The maximum shingle size. Default and minimum value is 2. minShingleSize: type: integer format: int32 default: 2 minimum: 2 description: >- The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. outputUnigrams: type: boolean default: true description: >- A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. outputUnigramsIfNoShingles: type: boolean default: false description: >- A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. tokenSeparator: type: string default: ' ' description: >- The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). filterToken: type: string default: _ description: >- The string to insert for each position at which there is no token. Default is an underscore ("_"). description: >- Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html SnowballTokenFilterLanguage: type: string enum: - armenian - basque - catalan - danish - dutch - english - finnish - french - german - german2 - hungarian - italian - kp - lovins - norwegian - porter - portuguese - romanian - russian - spanish - swedish - turkish x-ms-enum: name: SnowballTokenFilterLanguage modelAsString: false values: - value: armenian name: Armenian description: Selects the Lucene Snowball stemming tokenizer for Armenian. - value: basque name: Basque description: Selects the Lucene Snowball stemming tokenizer for Basque. - value: catalan name: Catalan description: Selects the Lucene Snowball stemming tokenizer for Catalan. - value: danish name: Danish description: Selects the Lucene Snowball stemming tokenizer for Danish. - value: dutch name: Dutch description: Selects the Lucene Snowball stemming tokenizer for Dutch. - value: english name: English description: Selects the Lucene Snowball stemming tokenizer for English. - value: finnish name: Finnish description: Selects the Lucene Snowball stemming tokenizer for Finnish. - value: french name: French description: Selects the Lucene Snowball stemming tokenizer for French. - value: german name: German description: Selects the Lucene Snowball stemming tokenizer for German. - value: german2 name: German2 description: >- Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. - value: hungarian name: Hungarian description: Selects the Lucene Snowball stemming tokenizer for Hungarian. - value: italian name: Italian description: Selects the Lucene Snowball stemming tokenizer for Italian. - value: kp name: Kp description: >- Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. - value: lovins name: Lovins description: >- Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. - value: norwegian name: Norwegian description: Selects the Lucene Snowball stemming tokenizer for Norwegian. - value: porter name: Porter description: >- Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. - value: portuguese name: Portuguese description: Selects the Lucene Snowball stemming tokenizer for Portuguese. - value: romanian name: Romanian description: Selects the Lucene Snowball stemming tokenizer for Romanian. - value: russian name: Russian description: Selects the Lucene Snowball stemming tokenizer for Russian. - value: spanish name: Spanish description: Selects the Lucene Snowball stemming tokenizer for Spanish. - value: swedish name: Swedish description: Selects the Lucene Snowball stemming tokenizer for Swedish. - value: turkish name: Turkish description: Selects the Lucene Snowball stemming tokenizer for Turkish. description: The language to use for a Snowball token filter. SnowballTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.SnowballTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: language: $ref: '#/definitions/SnowballTokenFilterLanguage' description: The language to use. required: - language description: >- A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html StemmerTokenFilterLanguage: type: string enum: - arabic - armenian - basque - brazilian - bulgarian - catalan - czech - danish - dutch - dutchKp - english - lightEnglish - minimalEnglish - possessiveEnglish - porter2 - lovins - finnish - lightFinnish - french - lightFrench - minimalFrench - galician - minimalGalician - german - german2 - lightGerman - minimalGerman - greek - hindi - hungarian - lightHungarian - indonesian - irish - italian - lightItalian - sorani - latvian - norwegian - lightNorwegian - minimalNorwegian - lightNynorsk - minimalNynorsk - portuguese - lightPortuguese - minimalPortuguese - portugueseRslp - romanian - russian - lightRussian - spanish - lightSpanish - swedish - lightSwedish - turkish x-ms-enum: name: StemmerTokenFilterLanguage modelAsString: false values: - value: arabic name: Arabic description: Selects the Lucene stemming tokenizer for Arabic. - value: armenian name: Armenian description: Selects the Lucene stemming tokenizer for Armenian. - value: basque name: Basque description: Selects the Lucene stemming tokenizer for Basque. - value: brazilian name: Brazilian description: Selects the Lucene stemming tokenizer for Portuguese (Brazil). - value: bulgarian name: Bulgarian description: Selects the Lucene stemming tokenizer for Bulgarian. - value: catalan name: Catalan description: Selects the Lucene stemming tokenizer for Catalan. - value: czech name: Czech description: Selects the Lucene stemming tokenizer for Czech. - value: danish name: Danish description: Selects the Lucene stemming tokenizer for Danish. - value: dutch name: Dutch description: Selects the Lucene stemming tokenizer for Dutch. - value: dutchKp name: DutchKp description: >- Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. - value: english name: English description: Selects the Lucene stemming tokenizer for English. - value: lightEnglish name: LightEnglish description: >- Selects the Lucene stemming tokenizer for English that does light stemming. - value: minimalEnglish name: MinimalEnglish description: >- Selects the Lucene stemming tokenizer for English that does minimal stemming. - value: possessiveEnglish name: PossessiveEnglish description: >- Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. - value: porter2 name: Porter2 description: >- Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. - value: lovins name: Lovins description: >- Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. - value: finnish name: Finnish description: Selects the Lucene stemming tokenizer for Finnish. - value: lightFinnish name: LightFinnish description: >- Selects the Lucene stemming tokenizer for Finnish that does light stemming. - value: french name: French description: Selects the Lucene stemming tokenizer for French. - value: lightFrench name: LightFrench description: >- Selects the Lucene stemming tokenizer for French that does light stemming. - value: minimalFrench name: MinimalFrench description: >- Selects the Lucene stemming tokenizer for French that does minimal stemming. - value: galician name: Galician description: Selects the Lucene stemming tokenizer for Galician. - value: minimalGalician name: MinimalGalician description: >- Selects the Lucene stemming tokenizer for Galician that does minimal stemming. - value: german name: German description: Selects the Lucene stemming tokenizer for German. - value: german2 name: German2 description: >- Selects the Lucene stemming tokenizer that uses the German variant algorithm. - value: lightGerman name: LightGerman description: >- Selects the Lucene stemming tokenizer for German that does light stemming. - value: minimalGerman name: MinimalGerman description: >- Selects the Lucene stemming tokenizer for German that does minimal stemming. - value: greek name: Greek description: Selects the Lucene stemming tokenizer for Greek. - value: hindi name: Hindi description: Selects the Lucene stemming tokenizer for Hindi. - value: hungarian name: Hungarian description: Selects the Lucene stemming tokenizer for Hungarian. - value: lightHungarian name: LightHungarian description: >- Selects the Lucene stemming tokenizer for Hungarian that does light stemming. - value: indonesian name: Indonesian description: Selects the Lucene stemming tokenizer for Indonesian. - value: irish name: Irish description: Selects the Lucene stemming tokenizer for Irish. - value: italian name: Italian description: Selects the Lucene stemming tokenizer for Italian. - value: lightItalian name: LightItalian description: >- Selects the Lucene stemming tokenizer for Italian that does light stemming. - value: sorani name: Sorani description: Selects the Lucene stemming tokenizer for Sorani. - value: latvian name: Latvian description: Selects the Lucene stemming tokenizer for Latvian. - value: norwegian name: Norwegian description: Selects the Lucene stemming tokenizer for Norwegian (Bokmål). - value: lightNorwegian name: LightNorwegian description: >- Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. - value: minimalNorwegian name: MinimalNorwegian description: >- Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. - value: lightNynorsk name: LightNynorsk description: >- Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. - value: minimalNynorsk name: MinimalNynorsk description: >- Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. - value: portuguese name: Portuguese description: Selects the Lucene stemming tokenizer for Portuguese. - value: lightPortuguese name: LightPortuguese description: >- Selects the Lucene stemming tokenizer for Portuguese that does light stemming. - value: minimalPortuguese name: MinimalPortuguese description: >- Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. - value: portugueseRslp name: PortugueseRslp description: >- Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. - value: romanian name: Romanian description: Selects the Lucene stemming tokenizer for Romanian. - value: russian name: Russian description: Selects the Lucene stemming tokenizer for Russian. - value: lightRussian name: LightRussian description: >- Selects the Lucene stemming tokenizer for Russian that does light stemming. - value: spanish name: Spanish description: Selects the Lucene stemming tokenizer for Spanish. - value: lightSpanish name: LightSpanish description: >- Selects the Lucene stemming tokenizer for Spanish that does light stemming. - value: swedish name: Swedish description: Selects the Lucene stemming tokenizer for Swedish. - value: lightSwedish name: LightSwedish description: >- Selects the Lucene stemming tokenizer for Swedish that does light stemming. - value: turkish name: Turkish description: Selects the Lucene stemming tokenizer for Turkish. description: The language to use for a stemmer token filter. StemmerTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.StemmerTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: language: $ref: '#/definitions/StemmerTokenFilterLanguage' description: The language to use. required: - language description: >- Language specific stemming filter. This token filter is implemented using Apache Lucene. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters StemmerOverrideTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: rules: type: array items: type: string description: >- A list of stemming rules in the following format: "word => stem", for example: "ran => run". required: - rules description: >- Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.html StopwordsList: type: string enum: - arabic - armenian - basque - brazilian - bulgarian - catalan - czech - danish - dutch - english - finnish - french - galician - german - greek - hindi - hungarian - indonesian - irish - italian - latvian - norwegian - persian - portuguese - romanian - russian - sorani - spanish - swedish - thai - turkish x-ms-enum: name: StopwordsList modelAsString: false values: - value: arabic name: Arabic description: Selects the stopword list for Arabic. - value: armenian name: Armenian description: Selects the stopword list for Armenian. - value: basque name: Basque description: Selects the stopword list for Basque. - value: brazilian name: Brazilian description: Selects the stopword list for Portuguese (Brazil). - value: bulgarian name: Bulgarian description: Selects the stopword list for Bulgarian. - value: catalan name: Catalan description: Selects the stopword list for Catalan. - value: czech name: Czech description: Selects the stopword list for Czech. - value: danish name: Danish description: Selects the stopword list for Danish. - value: dutch name: Dutch description: Selects the stopword list for Dutch. - value: english name: English description: Selects the stopword list for English. - value: finnish name: Finnish description: Selects the stopword list for Finnish. - value: french name: French description: Selects the stopword list for French. - value: galician name: Galician description: Selects the stopword list for Galician. - value: german name: German description: Selects the stopword list for German. - value: greek name: Greek description: Selects the stopword list for Greek. - value: hindi name: Hindi description: Selects the stopword list for Hindi. - value: hungarian name: Hungarian description: Selects the stopword list for Hungarian. - value: indonesian name: Indonesian description: Selects the stopword list for Indonesian. - value: irish name: Irish description: Selects the stopword list for Irish. - value: italian name: Italian description: Selects the stopword list for Italian. - value: latvian name: Latvian description: Selects the stopword list for Latvian. - value: norwegian name: Norwegian description: Selects the stopword list for Norwegian. - value: persian name: Persian description: Selects the stopword list for Persian. - value: portuguese name: Portuguese description: Selects the stopword list for Portuguese. - value: romanian name: Romanian description: Selects the stopword list for Romanian. - value: russian name: Russian description: Selects the stopword list for Russian. - value: sorani name: Sorani description: Selects the stopword list for Sorani. - value: spanish name: Spanish description: Selects the stopword list for Spanish. - value: swedish name: Swedish description: Selects the stopword list for Swedish. - value: thai name: Thai description: Selects the stopword list for Thai. - value: turkish name: Turkish description: Selects the stopword list for Turkish. description: Identifies a predefined list of language-specific stopwords. StopwordsTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.StopwordsTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: stopwords: type: array items: type: string description: >- The list of stopwords. This property and the stopwords list property cannot both be set. stopwordsList: $ref: '#/definitions/StopwordsList' default: english description: >- A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. ignoreCase: type: boolean default: false description: >- A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. removeTrailing: x-ms-client-name: RemoveTrailingStopWords type: boolean default: true description: >- A value indicating whether to ignore the last search term if it's a stop word. Default is true. description: >- Removes stop words from a token stream. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html SynonymTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.SynonymTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: synonyms: type: array items: type: string description: >- A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. ignoreCase: type: boolean default: false description: >- A value indicating whether to case-fold input for matching. Default is false. expand: type: boolean default: true description: >- A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. required: - synonyms description: >- Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/synonym/SynonymFilter.html TruncateTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.TruncateTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: length: type: integer format: int32 default: 300 maximum: 300 description: >- The length at which terms will be truncated. Default and maximum is 300. description: >- Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html UniqueTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.UniqueTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: onlyOnSamePosition: type: boolean default: false description: >- A value indicating whether to remove duplicates only at the same position. Default is false. description: >- Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html WordDelimiterTokenFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.WordDelimiterTokenFilter' allOf: - $ref: '#/definitions/TokenFilter' properties: generateWordParts: type: boolean default: true description: >- A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. generateNumberParts: type: boolean default: true description: >- A value indicating whether to generate number subwords. Default is true. catenateWords: type: boolean default: false description: >- A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. catenateNumbers: type: boolean default: false description: >- A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. catenateAll: type: boolean default: false description: >- A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. splitOnCaseChange: type: boolean default: true description: >- A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. preserveOriginal: type: boolean default: false description: >- A value indicating whether original words will be preserved and added to the subword list. Default is false. splitOnNumerics: type: boolean default: true description: >- A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. stemEnglishPossessive: type: boolean default: true description: >- A value indicating whether to remove trailing "'s" for each subword. Default is true. protectedWords: type: array items: type: string description: A list of tokens to protect from being delimited. description: >- Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. externalDocs: url: >- http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilter.html CharFilter: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the char filter. name: type: string externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/custom-analyzers-in-azure-search#index-attribute-reference description: >- The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. required: - '@odata.type' - name description: Base type for character filters. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search MappingCharFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.MappingCharFilter' allOf: - $ref: '#/definitions/CharFilter' properties: mappings: type: array items: type: string description: >- A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). required: - mappings description: >- A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. externalDocs: url: >- https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/MappingCharFilter.html PatternReplaceCharFilter: x-ms-discriminator-value: '#Microsoft.Azure.Search.PatternReplaceCharFilter' allOf: - $ref: '#/definitions/CharFilter' properties: pattern: type: string description: A regular expression pattern. replacement: type: string description: The replacement text. required: - pattern - replacement description: >- A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. externalDocs: url: >- https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.html Similarity: discriminator: '@odata.type' properties: '@odata.type': type: string required: - '@odata.type' description: >- Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. externalDocs: url: https://docs.microsoft.com/azure/search/index-ranking-similarity ClassicSimilarity: x-ms-discriminator-value: '#Microsoft.Azure.Search.ClassicSimilarity' allOf: - $ref: '#/definitions/Similarity' description: >- Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. BM25Similarity: x-ms-discriminator-value: '#Microsoft.Azure.Search.BM25Similarity' allOf: - $ref: '#/definitions/Similarity' properties: k1: type: number format: double description: >- This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. b: type: number format: double description: >- This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. description: >- Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). DataSourceCredentials: properties: connectionString: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Data-Source type: string description: The connection string for the datasource. description: Represents credentials that can be used to connect to a datasource. SearchIndexerDataContainer: properties: name: type: string description: >- The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. query: type: string description: >- A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. required: - name description: >- Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. DataChangeDetectionPolicy: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the data change detection policy. required: - '@odata.type' description: Base type for data change detection policies. HighWaterMarkChangeDetectionPolicy: description: >- Defines a data change detection policy that captures changes based on the value of a high water mark column. x-ms-discriminator-value: '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' allOf: - $ref: '#/definitions/DataChangeDetectionPolicy' properties: highWaterMarkColumnName: type: string description: The name of the high water mark column. required: - highWaterMarkColumnName SqlIntegratedChangeTrackingPolicy: description: >- Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. x-ms-discriminator-value: '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' allOf: - $ref: '#/definitions/DataChangeDetectionPolicy' DataDeletionDetectionPolicy: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the data deletion detection policy. required: - '@odata.type' description: Base type for data deletion detection policies. SoftDeleteColumnDeletionDetectionPolicy: description: >- Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. x-ms-discriminator-value: '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' allOf: - $ref: '#/definitions/DataDeletionDetectionPolicy' properties: softDeleteColumnName: type: string description: The name of the column to use for soft-deletion detection. softDeleteMarkerValue: type: string description: The marker value that identifies an item as deleted. SearchIndexerDataSourceType: type: string enum: - azuresql - cosmosdb - azureblob - azuretable - mysql x-ms-enum: name: SearchIndexerDataSourceType modelAsString: true values: - value: azuresql name: AzureSql description: Indicates an Azure SQL datasource. - value: cosmosdb name: CosmosDb description: Indicates a CosmosDB datasource. - value: azureblob name: AzureBlob description: Indicates a Azure Blob datasource. - value: azuretable name: AzureTable description: Indicates a Azure Table datasource. - value: mysql name: MySql description: Indicates a MySql datasource. description: Defines the type of a datasource. SearchIndexerDataSource: properties: name: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: string description: The name of the datasource. description: type: string description: The description of the datasource. type: $ref: '#/definitions/SearchIndexerDataSourceType' description: The type of the datasource. credentials: $ref: '#/definitions/DataSourceCredentials' description: Credentials for the datasource. container: $ref: '#/definitions/SearchIndexerDataContainer' description: The data container for the datasource. dataChangeDetectionPolicy: $ref: '#/definitions/DataChangeDetectionPolicy' description: The data change detection policy for the datasource. dataDeletionDetectionPolicy: $ref: '#/definitions/DataDeletionDetectionPolicy' description: The data deletion detection policy for the datasource. '@odata.etag': x-ms-client-name: ETag type: string description: The ETag of the data source. required: - name - type - credentials - container description: >- Represents a datasource definition, which can be used to configure an indexer. ListDataSourcesResult: properties: value: x-ms-client-name: DataSources type: array readOnly: true items: $ref: '#/definitions/SearchIndexerDataSource' description: The datasources in the Search service. required: - value description: >- Response from a List Datasources request. If successful, it includes the full definitions of all datasources. IndexingSchedule: properties: interval: type: string format: duration description: The interval of time between indexer executions. startTime: type: string format: date-time description: The time when an indexer should start running. required: - interval description: Represents a schedule for indexer execution. IndexingParameters: properties: batchSize: type: integer format: int32 description: >- The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. maxFailedItems: type: integer format: int32 default: 0 description: >- The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. maxFailedItemsPerBatch: type: integer format: int32 default: 0 description: >- The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. configuration: type: object additionalProperties: type: object description: >- A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. description: Represents parameters for indexer execution. FieldMappingFunction: properties: name: type: string description: The name of the field mapping function. parameters: type: object additionalProperties: type: object description: >- A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. required: - name description: >- Represents a function that transforms a value from a data source before indexing. externalDocs: url: https://docs.microsoft.com/azure/search/search-indexer-field-mappings FieldMapping: properties: sourceFieldName: type: string description: The name of the field in the data source. targetFieldName: type: string description: >- The name of the target field in the index. Same as the source field name by default. mappingFunction: $ref: '#/definitions/FieldMappingFunction' description: A function to apply to each source field value before indexing. required: - sourceFieldName description: >- Defines a mapping between a field in a data source and a target field in an index. externalDocs: url: https://docs.microsoft.com/azure/search/search-indexer-field-mappings SearchIndexer: properties: name: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: string description: The name of the indexer. description: type: string description: The description of the indexer. dataSourceName: type: string description: The name of the datasource from which this indexer reads data. skillsetName: type: string description: The name of the skillset executing with this indexer. targetIndexName: type: string description: The name of the index to which this indexer writes data. schedule: $ref: '#/definitions/IndexingSchedule' description: The schedule for this indexer. parameters: $ref: '#/definitions/IndexingParameters' description: Parameters for indexer execution. fieldMappings: type: array items: $ref: '#/definitions/FieldMapping' description: >- Defines mappings between fields in the data source and corresponding target fields in the index. externalDocs: url: >- https://docs.microsoft.com/azure/search/search-indexer-field-mappings outputFieldMappings: type: array items: $ref: '#/definitions/FieldMapping' description: >- Output field mappings are applied after enrichment and immediately before indexing. externalDocs: url: >- https://docs.microsoft.com/azure/search/search-indexer-field-mappings disabled: x-ms-client-name: IsDisabled type: boolean default: false description: A value indicating whether the indexer is disabled. Default is false. '@odata.etag': x-ms-client-name: ETag type: string description: The ETag of the indexer. required: - name - dataSourceName - targetIndexName externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Indexer-operations description: Represents an indexer. ListIndexersResult: properties: value: x-ms-client-name: Indexers type: array readOnly: true items: $ref: '#/definitions/SearchIndexer' description: The indexers in the Search service. required: - value description: >- Response from a List Indexers request. If successful, it includes the full definitions of all indexers. SearchIndexerError: properties: key: type: string readOnly: true description: The key of the item for which indexing failed. errorMessage: type: string readOnly: true description: >- The message describing the error that occurred while processing the item. statusCode: type: integer format: int32 x-nullable: false readOnly: true description: >- The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. name: type: string readOnly: true description: >- The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. details: type: string readOnly: true description: >- Additional, verbose details about the error to assist in debugging the indexer. This may not be always available. documentationLink: type: string readOnly: true description: >- A link to a troubleshooting guide for these classes of errors. This may not be always available. required: - errorMessage - statusCode description: Represents an item- or document-level indexing error. SearchIndexerWarning: properties: key: type: string readOnly: true description: The key of the item which generated a warning. message: type: string readOnly: true description: >- The message describing the warning that occurred while processing the item. name: type: string readOnly: true description: >- The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. details: type: string readOnly: true description: >- Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available. documentationLink: type: string readOnly: true description: >- A link to a troubleshooting guide for these classes of warnings. This may not be always available. required: - message description: Represents an item-level warning. IndexerExecutionResult: properties: status: $ref: '#/definitions/IndexerExecutionStatus' readOnly: true description: The outcome of this indexer execution. errorMessage: type: string readOnly: true description: The error message indicating the top-level error, if any. startTime: type: string format: date-time readOnly: true description: The start time of this indexer execution. endTime: type: string format: date-time readOnly: true description: >- The end time of this indexer execution, if the execution has already completed. errors: type: array readOnly: true items: $ref: '#/definitions/SearchIndexerError' description: The item-level indexing errors. warnings: type: array readOnly: true items: $ref: '#/definitions/SearchIndexerWarning' description: The item-level indexing warnings. itemsProcessed: x-ms-client-name: ItemCount type: integer format: int32 x-nullable: false readOnly: true description: >- The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. itemsFailed: x-ms-client-name: FailedItemCount type: integer format: int32 x-nullable: false readOnly: true description: >- The number of items that failed to be indexed during this indexer execution. initialTrackingState: type: string readOnly: true description: Change tracking state with which an indexer execution started. finalTrackingState: type: string readOnly: true description: Change tracking state with which an indexer execution finished. required: - status - errors - warnings - itemsProcessed - itemsFailed description: Represents the result of an individual indexer execution. IndexerExecutionStatus: type: string enum: - transientFailure - success - inProgress - reset x-ms-enum: name: IndexerExecutionStatus modelAsString: false values: - value: transientFailure name: TransientFailure description: >- An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. - value: success name: Success description: Indexer execution completed successfully. - value: inProgress name: InProgress description: Indexer execution is in progress. - value: reset name: Reset description: Indexer has been reset. x-nullable: false description: Represents the status of an individual indexer execution. SearchIndexerStatus: properties: status: $ref: '#/definitions/IndexerStatus' readOnly: true description: Overall indexer status. lastResult: $ref: '#/definitions/IndexerExecutionResult' readOnly: true description: The result of the most recent or an in-progress indexer execution. executionHistory: type: array readOnly: true items: $ref: '#/definitions/IndexerExecutionResult' description: >- History of the recent indexer executions, sorted in reverse chronological order. limits: $ref: '#/definitions/SearchIndexerLimits' readOnly: true description: The execution limits for the indexer. required: - status - executionHistory - limits description: Represents the current status and execution history of an indexer. IndexerStatus: type: string enum: - unknown - error - running x-ms-enum: name: IndexerStatus modelAsString: false values: - value: unknown name: Unknown description: Indicates that the indexer is in an unknown state. - value: error name: Error description: >- Indicates that the indexer experienced an error that cannot be corrected without human intervention. - value: running name: Running description: Indicates that the indexer is running normally. x-nullable: false description: Represents the overall indexer status. SearchIndexerLimits: properties: maxRunTime: type: string format: duration readOnly: true description: >- The maximum duration that the indexer is permitted to run for one execution. maxDocumentExtractionSize: type: number format: int64 readOnly: true description: >- The maximum size of a document, in bytes, which will be considered valid for indexing. maxDocumentContentCharactersToExtract: type: number format: int64 readOnly: true description: >- The maximum number of characters that will be extracted from a document picked up for indexing. SearchField: properties: name: type: string description: >- The name of the field, which must be unique within the fields collection of the index or parent field. externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: $ref: '#/definitions/SearchFieldDataType' description: The data type of the field. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/supported-data-types key: type: boolean description: >- A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. retrievable: type: boolean description: >- A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields and null for complex fields. searchable: type: boolean description: >- A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an additional tokenized version of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. filterable: type: boolean description: >- A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. sortable: type: boolean description: >- A value indicating whether to enable the field to be referenced in $orderby expressions. By default Azure Cognitive Search sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. facetable: type: boolean description: >- A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. analyzer: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Language-support $ref: '#/definitions/LexicalAnalyzerName' description: >- The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. searchAnalyzer: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Language-support $ref: '#/definitions/LexicalAnalyzerName' description: >- The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. indexAnalyzer: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Language-support $ref: '#/definitions/LexicalAnalyzerName' description: >- The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. synonymMaps: externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Synonym-Map-operations type: array items: type: string description: >- A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. fields: type: array items: $ref: '#/definitions/SearchField' description: >- A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. required: - name - type externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Index description: >- Represents a field in an index definition, which describes the name, data type, and search behavior of a field. TextWeights: properties: weights: type: object additionalProperties: type: number format: double x-nullable: false description: >- The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. required: - weights description: >- Defines weights on index fields for which matches should boost scoring in search queries. ScoringFunction: discriminator: type properties: type: type: string description: >- Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. fieldName: type: string description: The name of the field used as input to the scoring function. boost: type: number format: double description: >- A multiplier for the raw score. Must be a positive number not equal to 1.0. interpolation: $ref: '#/definitions/ScoringFunctionInterpolation' description: >- A value indicating how boosting will be interpolated across document scores; defaults to "Linear". required: - type - fieldName - boost externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: Base type for functions that can modify document scores during ranking. DistanceScoringFunction: x-ms-discriminator-value: distance allOf: - $ref: '#/definitions/ScoringFunction' properties: distance: x-ms-client-name: Parameters $ref: '#/definitions/DistanceScoringParameters' description: Parameter values for the distance scoring function. required: - distance externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: >- Defines a function that boosts scores based on distance from a geographic location. DistanceScoringParameters: properties: referencePointParameter: type: string description: >- The name of the parameter passed in search queries to specify the reference location. boostingDistance: type: number format: double description: >- The distance in kilometers from the reference location where the boosting range ends. required: - referencePointParameter - boostingDistance description: Provides parameter values to a distance scoring function. FreshnessScoringFunction: x-ms-discriminator-value: freshness allOf: - $ref: '#/definitions/ScoringFunction' properties: freshness: x-ms-client-name: Parameters $ref: '#/definitions/FreshnessScoringParameters' description: Parameter values for the freshness scoring function. required: - freshness externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: >- Defines a function that boosts scores based on the value of a date-time field. FreshnessScoringParameters: properties: boostingDuration: type: string format: duration description: >- The expiration period after which boosting will stop for a particular document. required: - boostingDuration description: Provides parameter values to a freshness scoring function. MagnitudeScoringFunction: x-ms-discriminator-value: magnitude allOf: - $ref: '#/definitions/ScoringFunction' properties: magnitude: x-ms-client-name: Parameters $ref: '#/definitions/MagnitudeScoringParameters' description: Parameter values for the magnitude scoring function. required: - magnitude externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: >- Defines a function that boosts scores based on the magnitude of a numeric field. MagnitudeScoringParameters: properties: boostingRangeStart: type: number format: double description: The field value at which boosting starts. boostingRangeEnd: type: number format: double description: The field value at which boosting ends. constantBoostBeyondRange: x-ms-client-name: ShouldBoostBeyondRangeByConstant type: boolean description: >- A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. required: - boostingRangeStart - boostingRangeEnd description: Provides parameter values to a magnitude scoring function. TagScoringFunction: x-ms-discriminator-value: tag allOf: - $ref: '#/definitions/ScoringFunction' properties: tag: x-ms-client-name: Parameters $ref: '#/definitions/TagScoringParameters' description: Parameter values for the tag scoring function. required: - tag externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: >- Defines a function that boosts scores of documents with string values matching a given list of tags. TagScoringParameters: properties: tagsParameter: type: string description: >- The name of the parameter passed in search queries to specify the list of tags to compare against the target field. required: - tagsParameter description: Provides parameter values to a tag scoring function. ScoringFunctionInterpolation: type: string enum: - linear - constant - quadratic - logarithmic x-ms-enum: name: ScoringFunctionInterpolation modelAsString: false values: - value: linear name: Linear description: >- Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. - value: constant name: Constant description: Boosts scores by a constant factor. - value: quadratic name: Quadratic description: >- Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. - value: logarithmic name: Logarithmic description: >- Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. description: >- Defines the function used to interpolate score boosting across a range of documents. ScoringProfile: properties: name: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: string description: The name of the scoring profile. text: x-ms-client-name: TextWeights $ref: '#/definitions/TextWeights' description: >- Parameters that boost scoring based on text matches in certain index fields. functions: type: array items: $ref: '#/definitions/ScoringFunction' description: The collection of functions that influence the scoring of documents. functionAggregation: $ref: '#/definitions/ScoringFunctionAggregation' description: >- A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. required: - name externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Add-scoring-profiles-to-a-search-index description: >- Defines parameters for a search index that influence scoring in search queries. ScoringFunctionAggregation: type: string enum: - sum - average - minimum - maximum - firstMatching x-ms-enum: name: ScoringFunctionAggregation modelAsString: false values: - value: sum name: Sum description: Boost scores by the sum of all scoring function results. - value: average name: Average description: Boost scores by the average of all scoring function results. - value: minimum name: Minimum description: Boost scores by the minimum of all scoring function results. - value: maximum name: Maximum description: Boost scores by the maximum of all scoring function results. - value: firstMatching name: FirstMatching description: >- Boost scores using the first applicable scoring function in the scoring profile. description: >- Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. CorsOptions: properties: allowedOrigins: type: array items: type: string description: >- The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). maxAgeInSeconds: type: integer format: int64 description: >- The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. required: - allowedOrigins externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Create-Index description: >- Defines options to control Cross-Origin Resource Sharing (CORS) for an index. Suggester: properties: name: type: string description: The name of the suggester. searchMode: type: string enum: - analyzingInfixMatching x-ms-enum: name: SuggesterSearchMode modelAsString: false values: - value: analyzingInfixMatching name: AnalyzingInfixMatching description: >- Matches consecutive whole terms and prefixes in a field. For example, for the field 'The fastest brown fox', the queries 'fast' and 'fastest brow' would both match. description: A value indicating the capabilities of the suggester. sourceFields: type: array items: type: string description: >- The list of field names to which the suggester applies. Each field must be searchable. required: - name - searchMode - sourceFields description: >- Defines how the Suggest API should apply to a group of fields in the index. SearchIndex: properties: name: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: string description: The name of the index. fields: type: array items: $ref: '#/definitions/SearchField' description: The fields of the index. scoringProfiles: type: array items: $ref: '#/definitions/ScoringProfile' description: The scoring profiles for the index. defaultScoringProfile: type: string description: >- The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. corsOptions: $ref: '#/definitions/CorsOptions' description: Options to control Cross-Origin Resource Sharing (CORS) for the index. suggesters: type: array items: $ref: '#/definitions/Suggester' description: The suggesters for the index. analyzers: type: array items: $ref: '#/definitions/LexicalAnalyzer' description: The analyzers for the index. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search tokenizers: type: array items: $ref: '#/definitions/LexicalTokenizer' description: The tokenizers for the index. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search tokenFilters: type: array items: $ref: '#/definitions/TokenFilter' description: The token filters for the index. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search charFilters: type: array items: $ref: '#/definitions/CharFilter' description: The character filters for the index. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search encryptionKey: $ref: '#/definitions/SearchResourceEncryptionKey' description: >- A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. externalDocs: url: https://aka.ms/azure-search-encryption-with-cmk similarity: $ref: '#/definitions/Similarity' description: >- The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. externalDocs: url: https://docs.microsoft.com/azure/search/index-ranking-similarity '@odata.etag': x-ms-client-name: ETag type: string description: The ETag of the index. required: - name - fields description: >- Represents a search index definition, which describes the fields and search behavior of an index. GetIndexStatisticsResult: properties: documentCount: type: integer format: int64 x-nullable: false readOnly: true description: The number of documents in the index. storageSize: type: integer format: int64 x-nullable: false readOnly: true description: The amount of storage in bytes consumed by the index. required: - documentCount - storageSize description: >- Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. ListIndexesResult: properties: value: x-ms-client-name: Indexes type: array readOnly: true items: $ref: '#/definitions/SearchIndex' description: The indexes in the Search service. required: - value description: >- Response from a List Indexes request. If successful, it includes the full definitions of all indexes. SearchIndexerSkillset: properties: name: type: string description: The name of the skillset. description: type: string description: The description of the skillset. skills: type: array items: $ref: '#/definitions/SearchIndexerSkill' description: A list of skills in the skillset. cognitiveServices: x-ms-client-name: CognitiveServicesAccount $ref: '#/definitions/CognitiveServicesAccount' description: Details about cognitive services to be used when running skills. knowledgeStore: $ref: '#/definitions/SearchIndexerKnowledgeStore' description: >- Definition of additional projections to azure blob, table, or files, of enriched data. '@odata.etag': x-ms-client-name: ETag type: string description: The ETag of the skillset. required: - name - skills externalDocs: url: https://docs.microsoft.com/azure/search/cognitive-search-tutorial-blob description: A list of skills. CognitiveServicesAccount: discriminator: '@odata.type' properties: '@odata.type': type: string description: >- Identifies the concrete type of the cognitive service resource attached to a skillset. description: type: string description: Description of the cognitive service resource attached to a skillset. required: - '@odata.type' description: >- Base type for describing any cognitive service resource attached to a skillset. DefaultCognitiveServicesAccount: description: >- An empty object that represents the default cognitive service resource for a skillset. x-ms-discriminator-value: '#Microsoft.Azure.Search.DefaultCognitiveServices' allOf: - $ref: '#/definitions/CognitiveServicesAccount' CognitiveServicesAccountKey: description: >- A cognitive service resource provisioned with a key that is attached to a skillset. x-ms-discriminator-value: '#Microsoft.Azure.Search.CognitiveServicesByKey' allOf: - $ref: '#/definitions/CognitiveServicesAccount' properties: key: type: string description: >- The key used to provision the cognitive service resource attached to a skillset. required: - key SearchIndexerKnowledgeStore: properties: storageConnectionString: type: string description: >- The connection string to the storage account projections will be stored in. projections: type: array items: $ref: '#/definitions/SearchIndexerKnowledgeStoreProjection' x-nullable: false description: A list of additional projections to perform during indexing. required: - storageConnectionString - projections externalDocs: url: >- https://docs.microsoft.com/azure/search/knowledge-store-projection-overview description: >- Definition of additional projections to azure blob, table, or files, of enriched data. SearchIndexerKnowledgeStoreProjection: properties: tables: type: array items: $ref: '#/definitions/SearchIndexerKnowledgeStoreTableProjectionSelector' x-nullable: false description: Projections to Azure Table storage. objects: type: array items: $ref: '#/definitions/SearchIndexerKnowledgeStoreObjectProjectionSelector' x-nullable: false description: Projections to Azure Blob storage. files: type: array items: $ref: '#/definitions/SearchIndexerKnowledgeStoreFileProjectionSelector' x-nullable: false description: Projections to Azure File storage. description: Container object for various projection selectors. SearchIndexerKnowledgeStoreProjectionSelector: properties: referenceKeyName: type: string description: Name of reference key to different projection. generatedKeyName: type: string description: Name of generated key to store projection under. source: type: string description: Source data to project. sourceContext: type: string description: Source context for complex projections. inputs: type: array items: $ref: '#/definitions/InputFieldMappingEntry' description: Nested inputs for complex projections. description: Abstract class to share properties between concrete selectors. SearchIndexerKnowledgeStoreBlobProjectionSelector: properties: storageContainer: type: string description: Blob container to store projections in. allOf: - $ref: '#/definitions/SearchIndexerKnowledgeStoreProjectionSelector' required: - storageContainer description: Abstract class to share properties between concrete selectors. SearchIndexerKnowledgeStoreTableProjectionSelector: properties: tableName: type: string description: Name of the Azure table to store projected data in. required: - generatedKeyName - tableName allOf: - $ref: '#/definitions/SearchIndexerKnowledgeStoreProjectionSelector' description: Description for what data to store in Azure Tables. SearchIndexerKnowledgeStoreObjectProjectionSelector: allOf: - $ref: '#/definitions/SearchIndexerKnowledgeStoreBlobProjectionSelector' description: Projection definition for what data to store in Azure Blob. SearchIndexerKnowledgeStoreFileProjectionSelector: allOf: - $ref: '#/definitions/SearchIndexerKnowledgeStoreBlobProjectionSelector' description: Projection definition for what data to store in Azure Files. SearchIndexerSkill: discriminator: '@odata.type' properties: '@odata.type': type: string description: Identifies the concrete type of the skill. name: type: string description: >- The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. description: type: string description: >- The description of the skill which describes the inputs, outputs, and usage of the skill. context: type: string description: >- Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. inputs: type: array items: $ref: '#/definitions/InputFieldMappingEntry' description: >- Inputs of the skills could be a column in the source data set, or the output of an upstream skill. outputs: type: array items: $ref: '#/definitions/OutputFieldMappingEntry' description: >- The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. required: - '@odata.type' - inputs - outputs externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-predefined-skills description: Base type for skills. InputFieldMappingEntry: properties: name: type: string description: The name of the input. source: type: string description: The source of the input. sourceContext: type: string description: The source context used for selecting recursive inputs. inputs: type: array items: $ref: '#/definitions/InputFieldMappingEntry' description: The recursive inputs used when creating a complex type. required: - name description: Input field mapping for a skill. OutputFieldMappingEntry: properties: name: type: string description: The name of the output defined by the skill. targetName: type: string description: The target name of the output. It is optional and default to name. required: - name externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/naming-rules description: Output field mapping for a skill. ConditionalSkill: x-ms-discriminator-value: '#Microsoft.Skills.Util.ConditionalSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-conditional description: >- A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. KeyPhraseExtractionSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultLanguageCode: $ref: '#/definitions/KeyPhraseExtractionSkillLanguage' description: A value indicating which language code to use. Default is en. maxKeyPhraseCount: type: integer format: int32 x-nullable: true description: >- A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. modelVersion: type: string x-nullable: true description: >- The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-keyphrases description: A skill that uses text analytics for key phrase extraction. OcrSkill: x-ms-discriminator-value: '#Microsoft.Skills.Vision.OcrSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultLanguageCode: $ref: '#/definitions/OcrSkillLanguage' description: A value indicating which language code to use. Default is en. detectOrientation: x-ms-client-name: ShouldDetectOrientation type: boolean default: false description: >- A value indicating to turn orientation detection on or not. Default is false. externalDocs: url: https://docs.microsoft.com/azure/search/cognitive-search-skill-ocr description: A skill that extracts text from image files. ImageAnalysisSkill: x-ms-discriminator-value: '#Microsoft.Skills.Vision.ImageAnalysisSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultLanguageCode: $ref: '#/definitions/ImageAnalysisSkillLanguage' description: A value indicating which language code to use. Default is en. visualFeatures: type: array items: $ref: '#/definitions/VisualFeature' x-nullable: false description: A list of visual features. details: type: array items: $ref: '#/definitions/ImageDetail' x-nullable: false description: A string indicating which domain-specific details to return. externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-image-analysis description: >- A skill that analyzes image files. It extracts a rich set of visual features based on the image content. LanguageDetectionSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.LanguageDetectionSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultCountryHint: type: string x-nullable: true description: >- A country code to use as a hint to the language detection model if it cannot disambiguate the language. modelVersion: type: string x-nullable: true description: >- The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-language-detection description: >- A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. ShaperSkill: x-ms-discriminator-value: '#Microsoft.Skills.Util.ShaperSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' externalDocs: url: https://docs.microsoft.com/azure/search/cognitive-search-skill-shaper description: >- A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). MergeSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.MergeSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: insertPreTag: type: string default: ' ' description: >- The tag indicates the start of the merged text. By default, the tag is an empty space. insertPostTag: type: string default: ' ' description: >- The tag indicates the end of the merged text. By default, the tag is an empty space. externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-textmerger description: >- A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. EntityRecognitionSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.EntityRecognitionSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: categories: type: array items: $ref: '#/definitions/EntityCategory' x-nullable: false description: A list of entity categories that should be extracted. defaultLanguageCode: $ref: '#/definitions/EntityRecognitionSkillLanguage' description: A value indicating which language code to use. Default is en. includeTypelessEntities: type: boolean x-nullable: true description: >- Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. minimumPrecision: type: number format: double x-nullable: true description: >- A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-entity-recognition description: Text analytics entity recognition. SentimentSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.SentimentSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultLanguageCode: $ref: '#/definitions/SentimentSkillLanguage' description: A value indicating which language code to use. Default is en. externalDocs: url: https://docs.microsoft.com/azure/search/cognitive-search-skill-sentiment description: >- Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. SplitSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.SplitSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultLanguageCode: $ref: '#/definitions/SplitSkillLanguage' description: A value indicating which language code to use. Default is en. textSplitMode: $ref: '#/definitions/TextSplitMode' x-nullable: false description: A value indicating which split mode to perform. maximumPageLength: type: integer format: int32 x-nullable: true description: The desired maximum page length. Default is 10000. externalDocs: url: https://docs.microsoft.com/azure/search/cognitive-search-skill-textsplit description: A skill to split a string into chunks of text. TextTranslationSkill: x-ms-discriminator-value: '#Microsoft.Skills.Text.TranslationSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: defaultToLanguageCode: $ref: '#/definitions/TextTranslationSkillLanguage' description: >- The language code to translate documents into for documents that don't specify the to language explicitly. defaultFromLanguageCode: $ref: '#/definitions/TextTranslationSkillLanguage' description: >- The language code to translate documents from for documents that don't specify the from language explicitly. suggestedFrom: $ref: '#/definitions/TextTranslationSkillLanguage' description: >- The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. required: - defaultToLanguageCode externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-skill-text-translation description: A skill to translate text from one language to another. WebApiSkill: x-ms-discriminator-value: '#Microsoft.Skills.Custom.WebApiSkill' allOf: - $ref: '#/definitions/SearchIndexerSkill' properties: uri: type: string description: The url for the Web API. httpHeaders: $ref: '#/definitions/WebApiHttpHeaders' description: The headers required to make the http request. httpMethod: type: string description: The method for the http request. timeout: type: string format: duration description: The desired timeout for the request. Default is 30 seconds. batchSize: type: integer format: int32 x-nullable: true description: The desired batch size which indicates number of documents. degreeOfParallelism: type: integer format: int32 x-nullable: true description: If set, the number of parallel calls that can be made to the Web API. required: - uri externalDocs: url: >- https://docs.microsoft.com/azure/search/cognitive-search-custom-skill-web-api description: >- A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. WebApiHttpHeaders: type: object additionalProperties: type: string description: A dictionary of http request headers. ListSkillsetsResult: properties: value: x-ms-client-name: Skillsets type: array readOnly: true items: $ref: '#/definitions/SearchIndexerSkillset' description: The skillsets defined in the Search service. required: - value description: >- Response from a list skillset request. If successful, it includes the full definitions of all skillsets. TextSplitMode: type: string enum: - pages - sentences x-ms-enum: name: TextSplitMode modelAsString: true values: - value: pages name: Pages description: Split the text into individual pages. - value: sentences name: Sentences description: Split the text into individual sentences. description: A value indicating which split mode to perform. VisualFeature: type: string enum: - adult - brands - categories - description - faces - objects - tags x-ms-enum: name: VisualFeature modelAsString: true values: - value: adult name: Adult description: Visual features recognized as adult persons. - value: brands name: Brands description: Visual features recognized as commercial brands. - value: categories name: Categories description: Categories. - value: description name: Description description: Description. - value: faces name: Faces description: Visual features recognized as people faces. - value: objects name: Objects description: Visual features recognized as objects. - value: tags name: Tags description: Tags. description: The strings indicating what visual feature types to return. ImageDetail: type: string enum: - celebrities - landmarks x-ms-enum: name: ImageDetail modelAsString: true values: - value: celebrities name: Celebrities description: Details recognized as celebrities. - value: landmarks name: Landmarks description: Details recognized as landmarks. description: A string indicating which domain-specific details to return. EntityCategory: type: string enum: - location - organization - person - quantity - datetime - url - email x-ms-enum: name: EntityCategory modelAsString: true values: - value: location name: Location description: Entities describing a physical location. - value: organization name: Organization description: Entities describing an organization. - value: person name: Person description: Entities describing a person. - value: quantity name: Quantity description: Entities describing a quantity. - value: datetime name: Datetime description: Entities describing a date and time. - value: url name: Url description: Entities describing a URL. - value: email name: Email description: Entities describing an email address. description: A string indicating what entity categories to return. SentimentSkillLanguage: type: string enum: - da - nl - en - fi - fr - de - el - it - 'no' - pl - pt-PT - ru - es - sv - tr x-ms-enum: name: SentimentSkillLanguage modelAsString: true values: - value: da description: Danish - value: nl description: Dutch - value: en description: English - value: fi description: Finnish - value: fr description: French - value: de description: German - value: el description: Greek - value: it description: Italian - value: 'no' description: Norwegian (Bokmaal) - value: pl description: Polish - value: pt-PT description: Portuguese (Portugal) - value: ru description: Russian - value: es description: Spanish - value: sv description: Swedish - value: tr description: Turkish description: The language codes supported for input text by SentimentSkill. KeyPhraseExtractionSkillLanguage: type: string enum: - da - nl - en - fi - fr - de - it - ja - ko - 'no' - pl - pt-PT - pt-BR - ru - es - sv x-ms-enum: name: KeyPhraseExtractionSkillLanguage modelAsString: true values: - value: da description: Danish - value: nl description: Dutch - value: en description: English - value: fi description: Finnish - value: fr description: French - value: de description: German - value: it description: Italian - value: ja description: Japanese - value: ko description: Korean - value: 'no' description: Norwegian (Bokmaal) - value: pl description: Polish - value: pt-PT description: Portuguese (Portugal) - value: pt-BR description: Portuguese (Brazil) - value: ru description: Russian - value: es description: Spanish - value: sv description: Swedish description: The language codes supported for input text by KeyPhraseExtractionSkill. OcrSkillLanguage: type: string enum: - zh-Hans - zh-Hant - cs - da - nl - en - fi - fr - de - el - hu - it - ja - ko - nb - pl - pt - ru - es - sv - tr - ar - ro - sr-Cyrl - sr-Latn - sk x-ms-enum: name: OcrSkillLanguage modelAsString: true values: - value: zh-Hans description: Chinese-Simplified - value: zh-Hant description: Chinese-Traditional - value: cs description: Czech - value: da description: Danish - value: nl description: Dutch - value: en description: English - value: fi description: Finnish - value: fr description: French - value: de description: German - value: el description: Greek - value: hu description: Hungarian - value: it description: Italian - value: ja description: Japanese - value: ko description: Korean - value: nb description: Norwegian (Bokmaal) - value: pl description: Polish - value: pt description: Portuguese - value: ru description: Russian - value: es description: Spanish - value: sv description: Swedish - value: tr description: Turkish - value: ar description: Arabic - value: ro description: Romanian - value: sr-Cyrl description: Serbian (Cyrillic, Serbia) - value: sr-Latn description: Serbian (Latin, Serbia) - value: sk description: Slovak description: The language codes supported for input by OcrSkill. SplitSkillLanguage: type: string enum: - da - de - en - es - fi - fr - it - ko - pt x-ms-enum: name: SplitSkillLanguage modelAsString: true values: - value: da description: Danish - value: de description: German - value: en description: English - value: es description: Spanish - value: fi description: Finnish - value: fr description: French - value: it description: Italian - value: ko description: Korean - value: pt description: Portuguese description: The language codes supported for input text by SplitSkill. EntityRecognitionSkillLanguage: type: string enum: - ar - cs - zh-Hans - zh-Hant - da - nl - en - fi - fr - de - el - hu - it - ja - ko - 'no' - pl - pt-PT - pt-BR - ru - es - sv - tr x-ms-enum: name: EntityRecognitionSkillLanguage modelAsString: true values: - value: ar description: Arabic - value: cs description: Czech - value: zh-Hans description: Chinese-Simplified - value: zh-Hant description: Chinese-Traditional - value: da description: Danish - value: nl description: Dutch - value: en description: English - value: fi description: Finnish - value: fr description: French - value: de description: German - value: el description: Greek - value: hu description: Hungarian - value: it description: Italian - value: ja description: Japanese - value: ko description: Korean - value: 'no' description: Norwegian (Bokmaal) - value: pl description: Polish - value: pt-PT description: Portuguese (Portugal) - value: pt-BR description: Portuguese (Brazil) - value: ru description: Russian - value: es description: Spanish - value: sv description: Swedish - value: tr description: Turkish description: The language codes supported for input text by EntityRecognitionSkill. TextTranslationSkillLanguage: type: string enum: - af - ar - bn - bs - bg - yue - ca - zh-Hans - zh-Hant - hr - cs - da - nl - en - et - fj - fil - fi - fr - de - el - ht - he - hi - mww - hu - is - id - it - ja - sw - tlh - ko - lv - lt - mg - ms - mt - nb - fa - pl - pt - otq - ro - ru - sm - sr-Cyrl - sr-Latn - sk - sl - es - sv - ty - ta - te - th - to - tr - uk - ur - vi - cy - yua x-ms-enum: name: TextTranslationSkillLanguage modelAsString: true values: - value: af description: Afrikaans - value: ar description: Arabic - value: bn description: Bangla - value: bs description: Bosnian (Latin) - value: bg description: Bulgarian - value: yue description: Cantonese (Traditional) - value: ca description: Catalan - value: zh-Hans description: Chinese Simplified - value: zh-Hant description: Chinese Traditional - value: hr description: Croatian - value: cs description: Czech - value: da description: Danish - value: nl description: Dutch - value: en description: English - value: et description: Estonian - value: fj description: Fijian - value: fil description: Filipino - value: fi description: Finnish - value: fr description: French - value: de description: German - value: el description: Greek - value: ht description: Haitian Creole - value: he description: Hebrew - value: hi description: Hindi - value: mww description: Hmong Daw - value: hu description: Hungarian - value: is description: Icelandic - value: id description: Indonesian - value: it description: Italian - value: ja description: Japanese - value: sw description: Kiswahili - value: tlh description: Klingon - value: ko description: Korean - value: lv description: Latvian - value: lt description: Lithuanian - value: mg description: Malagasy - value: ms description: Malay - value: mt description: Maltese - value: nb description: Norwegian - value: fa description: Persian - value: pl description: Polish - value: pt description: Portuguese - value: otq description: Queretaro Otomi - value: ro description: Romanian - value: ru description: Russian - value: sm description: Samoan - value: sr-Cyrl description: Serbian (Cyrillic) - value: sr-Latn description: Serbian (Latin) - value: sk description: Slovak - value: sl description: Slovenian - value: es description: Spanish - value: sv description: Swedish - value: ty description: Tahitian - value: ta description: Tamil - value: te description: Telugu - value: th description: Thai - value: to description: Tongan - value: tr description: Turkish - value: uk description: Ukrainian - value: ur description: Urdu - value: vi description: Vietnamese - value: cy description: Welsh - value: yua description: Yucatec Maya description: The language codes supported for input text by TextTranslationSkill. ImageAnalysisSkillLanguage: type: string enum: - en - es - ja - pt - zh x-ms-enum: name: ImageAnalysisSkillLanguage modelAsString: true values: - value: en description: English - value: es description: Spanish - value: ja description: Japanese - value: pt description: Portuguese - value: zh description: Chinese description: The language codes supported for input by ImageAnalysisSkill. SynonymMap: properties: name: externalDocs: url: https://docs.microsoft.com/rest/api/searchservice/Naming-rules type: string description: The name of the synonym map. format: type: string enum: - solr x-ms-enum: name: SynonymMapFormat modelAsString: false values: - value: solr name: Solr description: Selects the SOLR format for synonym maps. description: >- The format of the synonym map. Only the 'solr' format is currently supported. synonyms: type: string description: >- A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. externalDocs: url: >- https://docs.microsoft.com/rest/api/searchservice/Create-Synonym-Map#SynonymMapFormat encryptionKey: $ref: '#/definitions/SearchResourceEncryptionKey' description: >- A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. externalDocs: url: https://aka.ms/azure-search-encryption-with-cmk '@odata.etag': x-ms-client-name: ETag type: string description: The ETag of the synonym map. required: - name - format - synonyms description: Represents a synonym map definition. ListSynonymMapsResult: properties: value: x-ms-client-name: SynonymMaps type: array readOnly: true items: $ref: '#/definitions/SynonymMap' description: The synonym maps in the Search service. required: - value description: >- Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. SearchResourceEncryptionKey: properties: keyVaultKeyName: x-ms-client-name: keyName type: string description: >- The name of your Azure Key Vault key to be used to encrypt your data at rest. keyVaultKeyVersion: x-ms-client-name: keyVersion type: string description: >- The version of your Azure Key Vault key to be used to encrypt your data at rest. keyVaultUri: x-ms-client-name: vaultUri type: string description: >- The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be https://my-keyvault-name.vault.azure.net. accessCredentials: $ref: '#/definitions/AzureActiveDirectoryApplicationCredentials' description: >- Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. externalDocs: url: https://aka.ms/azure-search-msi required: - keyVaultKeyName - keyVaultKeyVersion - keyVaultUri description: >- A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. AzureActiveDirectoryApplicationCredentials: properties: applicationId: type: string description: >- An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. applicationSecret: type: string description: The authentication key of the specified AAD application. required: - applicationId description: >- Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. ServiceStatistics: properties: counters: $ref: '#/definitions/ServiceCounters' description: Service level resource counters. limits: $ref: '#/definitions/ServiceLimits' description: Service level general limits. required: - counters - limits description: >- Response from a get service statistics request. If successful, it includes service level counters and limits. ServiceCounters: properties: documentCount: x-ms-client-name: documentCounter $ref: '#/definitions/ResourceCounter' description: Total number of documents across all indexes in the service. indexesCount: x-ms-client-name: indexCounter $ref: '#/definitions/ResourceCounter' description: Total number of indexes. indexersCount: x-ms-client-name: indexerCounter $ref: '#/definitions/ResourceCounter' description: Total number of indexers. dataSourcesCount: x-ms-client-name: dataSourceCounter $ref: '#/definitions/ResourceCounter' description: Total number of data sources. storageSize: x-ms-client-name: storageSizeCounter $ref: '#/definitions/ResourceCounter' description: Total size of used storage in bytes. synonymMaps: x-ms-client-name: synonymMapCounter $ref: '#/definitions/ResourceCounter' description: Total number of synonym maps. skillsetCount: x-ms-client-name: skillsetCounter $ref: '#/definitions/ResourceCounter' description: Total number of skillsets. required: - documentCount - indexesCount - indexersCount - dataSourcesCount - storageSize - synonymMaps description: Represents service-level resource counters and quotas. ServiceLimits: properties: maxFieldsPerIndex: type: integer format: int32 x-nullable: true description: The maximum allowed fields per index. maxFieldNestingDepthPerIndex: type: integer format: int32 x-nullable: true description: >- The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. maxComplexCollectionFieldsPerIndex: type: integer format: int32 x-nullable: true description: >- The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. maxComplexObjectsInCollectionsPerDocument: type: integer format: int32 x-nullable: true description: >- The maximum number of objects in complex collections allowed per document. description: Represents various service level limits. ResourceCounter: properties: usage: type: integer format: int64 x-nullable: false description: The resource usage amount. quota: type: integer format: int64 x-nullable: true description: The resource amount quota. required: - usage description: Represents a resource's usage and quota. SearchError: properties: code: type: string readOnly: true description: One of a server-defined set of error codes. message: type: string readOnly: true description: A human-readable representation of the error. details: type: array items: $ref: '#/definitions/SearchError' readOnly: true description: >- An array of details about specific errors that led to this reported error. required: - message description: Describes an error condition for the Azure Cognitive Search API. parameters: ApiVersionParameter: name: api-version in: query required: true type: string description: Client Api Version. ClientRequestIdParameter: name: x-ms-client-request-id in: header required: false type: string format: uuid description: The tracking ID sent with the request to help with debugging. x-ms-client-request-id: true x-ms-parameter-grouping: name: request-options x-ms-parameter-location: method IfMatchParameter: name: If-Match in: header required: false type: string description: >- Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. x-ms-parameter-location: method IfNoneMatchParameter: name: If-None-Match in: header required: false type: string description: >- Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. x-ms-parameter-location: method PreferHeaderParameter: name: Prefer in: header required: true type: string enum: - return=representation description: >- For HTTP PUT requests, instructs the service to return the created/updated resource on success. x-ms-parameter-location: method EndpointParameter: name: endpoint in: path required: true type: string x-ms-skip-url-encoding: true description: The endpoint URL of the search service. x-ms-parameter-location: client tags: - name: DataSources - name: Indexers - name: Indexes - name: Skillsets - name: SynonymMaps