{
    "cells": [
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "68e1c158",
            "metadata": {},
            "source": [
                "# Multiple Results\n"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "fb81bacd",
            "metadata": {},
            "source": [
                "In this notebook we show how you can in a single request, have the LLM model return multiple results per prompt. This is useful for running experiments where you want to evaluate the robustness of your prompt and the parameters of your config against a particular large language model.\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "a77bdf89",
            "metadata": {},
            "outputs": [],
            "source": [
                "!python -m pip install semantic-kernel==1.0.3"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "3f4bfee4",
            "metadata": {},
            "outputs": [],
            "source": [
                "from services import Service\n",
                "\n",
                "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n",
                "selectedService = Service.OpenAI"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "508ad44f",
            "metadata": {},
            "outputs": [],
            "source": [
                "from semantic_kernel.contents import ChatHistory  # noqa: F401\n",
                "\n",
                "if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:\n",
                "    from semantic_kernel.connectors.ai.open_ai import (  # noqa: F401\n",
                "        AzureChatCompletion,\n",
                "        AzureChatPromptExecutionSettings,\n",
                "        AzureTextCompletion,\n",
                "        OpenAIChatCompletion,\n",
                "        OpenAIChatPromptExecutionSettings,\n",
                "        OpenAITextCompletion,\n",
                "        OpenAITextPromptExecutionSettings,\n",
                "    )\n",
                "if selectedService == Service.HuggingFace:\n",
                "    from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion  # noqa: F401"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "d8ddffc1",
            "metadata": {},
            "source": [
                "First, we will set up the text and chat services we will be submitting prompts to.\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "8f8dcbc6",
            "metadata": {},
            "outputs": [],
            "source": [
                "from semantic_kernel import Kernel\n",
                "\n",
                "kernel = Kernel()\n",
                "\n",
                "# Configure Azure LLM service\n",
                "if selectedService == Service.AzureOpenAI:\n",
                "    azure_text_service = AzureTextCompletion(\n",
                "        service_id=\"aoai_text\"\n",
                "    )  # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct)\n",
                "    azure_chat_service = AzureChatCompletion(\n",
                "        service_id=\"aoai_chat\"\n",
                "    )  # set the deployment name to the value of your chat model\n",
                "\n",
                "# Configure OpenAI service\n",
                "if selectedService == Service.OpenAI:\n",
                "    oai_text_service = OpenAITextCompletion(service_id=\"oai_text\", ai_model_id=\"gpt-3.5-turbo-instruct\")\n",
                "    oai_chat_service = OpenAIChatCompletion(service_id=\"oai_chat\", ai_model_id=\"gpt-3.5-turbo\")\n",
                "\n",
                "# Configure Hugging Face service\n",
                "if selectedService == Service.HuggingFace:\n",
                "    hf_text_service = HuggingFaceTextCompletion(service_id=\"hf_text\", ai_model_id=\"distilgpt2\", task=\"text-generation\")"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "50561d82",
            "metadata": {},
            "source": [
                "Next, we'll set up the completion request settings for text completion services.\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "628c843e",
            "metadata": {},
            "outputs": [],
            "source": [
                "oai_text_prompt_execution_settings = OpenAITextPromptExecutionSettings(\n",
                "    service=\"oai_text\",\n",
                "    extension_data={\n",
                "        \"max_tokens\": 80,\n",
                "        \"temperature\": 0.7,\n",
                "        \"top_p\": 1,\n",
                "        \"frequency_penalty\": 0.5,\n",
                "        \"presence_penalty\": 0.5,\n",
                "        \"number_of_responses\": 3,\n",
                "    },\n",
                ")"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "857a9c89",
            "metadata": {},
            "source": [
                "## Multiple Open AI Text Completions\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "e2979db8",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.OpenAI:\n",
                "    prompt = \"What is the purpose of a rubber duck?\"\n",
                "\n",
                "    results = await oai_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)\n",
                "    i = 1\n",
                "    for result in results:\n",
                "        print(f\"Result {i}: {result}\")\n",
                "        i += 1"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "4288d09f",
            "metadata": {},
            "source": [
                "## Multiple Azure Open AI Text Completions\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "5319f14d",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.AzureOpenAI:\n",
                "    prompt = \"provide me a list of possible meanings for the acronym 'ORLD'\"\n",
                "\n",
                "    results = await azure_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)\n",
                "    i = 1\n",
                "    for result in results:\n",
                "        print(f\"Result {i}: {result}\")\n",
                "        i += 1"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "eb548f9c",
            "metadata": {},
            "source": [
                "## Multiple Hugging Face Text Completions\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "4a148709",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.HuggingFace:\n",
                "    from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (\n",
                "        HuggingFacePromptExecutionSettings,\n",
                "    )\n",
                "\n",
                "    hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n",
                "        service_id=\"hf_text\", extension_data={\"max_new_tokens\": 80, \"temperature\": 0.7, \"top_p\": 1}\n",
                "    )"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "9525e4f3",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.HuggingFace:\n",
                "    prompt = \"The purpose of a rubber duck is\"\n",
                "\n",
                "    results = await hf_text_service.get_text_contents(\n",
                "        prompt=prompt, prompt_execution_settings=hf_prompt_execution_settings\n",
                "    )\n",
                "    print(\"\".join(results))"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "da632e12",
            "metadata": {},
            "source": [
                "Here, we're setting up the settings for Chat completions.\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "e5f11e46",
            "metadata": {},
            "outputs": [],
            "source": [
                "oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(\n",
                "    service_id=\"oai_chat\",\n",
                "    max_tokens=80,\n",
                "    temperature=0.7,\n",
                "    top_p=1,\n",
                "    frequency_penalty=0.5,\n",
                "    presence_penalty=0.5,\n",
                "    number_of_responses=3,\n",
                ")"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "d6bf238e",
            "metadata": {},
            "source": [
                "## Multiple OpenAI Chat Completions\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "dabc6a4c",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.OpenAI:\n",
                "    chat = ChatHistory()\n",
                "    chat.add_user_message(\n",
                "        \"It's a beautiful day outside, birds are singing, flowers are blooming. On days like these, kids like you...\"\n",
                "    )\n",
                "    results = await oai_chat_service.get_chat_message_contents(\n",
                "        chat_history=chat, settings=oai_chat_prompt_execution_settings\n",
                "    )\n",
                "    i = 0\n",
                "    for result in results:\n",
                "        print(f\"Result {i+1}: {str(result)}\")\n",
                "        i += 1"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "cdb8f740",
            "metadata": {},
            "source": [
                "## Multiple Azure OpenAI Chat Completions\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "66ba4767",
            "metadata": {},
            "outputs": [],
            "source": [
                "az_oai_prompt_execution_settings = AzureChatPromptExecutionSettings(\n",
                "    service_id=\"aoai_chat\",\n",
                "    max_tokens=80,\n",
                "    temperature=0.7,\n",
                "    top_p=1,\n",
                "    frequency_penalty=0.5,\n",
                "    presence_penalty=0.5,\n",
                "    number_of_responses=3,\n",
                ")"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "b74a64a9",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.AzureOpenAI:\n",
                "    content = (\n",
                "        \"Tomorrow is going to be a great day, I can feel it. I'm going to wake up early, go for a run, and then...\"\n",
                "    )\n",
                "    chat = ChatHistory()\n",
                "    chat.add_user_message(content)\n",
                "    results = await azure_chat_service.get_chat_message_contents(\n",
                "        chat_history=chat, settings=az_oai_prompt_execution_settings\n",
                "    )\n",
                "    i = 0\n",
                "    for result in results:\n",
                "        print(f\"Result {i+1}: {str(result)}\")\n",
                "        i += 1"
            ]
        },
        {
            "attachments": {},
            "cell_type": "markdown",
            "id": "98c8191d",
            "metadata": {},
            "source": [
                "## Streaming Multiple Results\n",
                "\n",
                "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n"
            ]
        },
        {
            "cell_type": "code",
            "execution_count": null,
            "id": "26a37702",
            "metadata": {},
            "outputs": [],
            "source": [
                "if selectedService == Service.OpenAI:\n",
                "    import os\n",
                "    import time\n",
                "\n",
                "    from IPython.display import clear_output\n",
                "\n",
                "    # Determine the clear command based on OS\n",
                "    clear_command = \"cls\" if os.name == \"nt\" else \"clear\"\n",
                "\n",
                "    chat = ChatHistory()\n",
                "    chat.add_user_message(\"what is the purpose of a rubber duck?\")\n",
                "\n",
                "    stream = oai_chat_service.get_streaming_chat_message_contents(\n",
                "        chat_history=chat, settings=oai_chat_prompt_execution_settings\n",
                "    )\n",
                "    number_of_responses = oai_chat_prompt_execution_settings.number_of_responses\n",
                "    texts = [\"\"] * number_of_responses\n",
                "\n",
                "    last_clear_time = time.time()\n",
                "    clear_interval = 0.5  # seconds\n",
                "\n",
                "    # Note: there are some quirks with displaying the output, which sometimes flashes and disappears.\n",
                "    # This could be influenced by a few factors specific to Jupyter notebooks and asynchronous processing.\n",
                "    # The following code attempts to buffer the results to avoid the output flashing on/off the screen.\n",
                "\n",
                "    async for results in stream:\n",
                "        current_time = time.time()\n",
                "\n",
                "        # Update texts with new results\n",
                "        for idx, result in enumerate(results):\n",
                "            if idx < number_of_responses:\n",
                "                texts[idx] += str(result)\n",
                "\n",
                "        # Clear and display output at intervals\n",
                "        if current_time - last_clear_time > clear_interval:\n",
                "            clear_output(wait=True)\n",
                "            for idx, text in enumerate(texts):\n",
                "                print(f\"Result {idx + 1}: {text}\")\n",
                "            last_clear_time = current_time\n",
                "\n",
                "    print(\"----------------------------------------\")"
            ]
        }
    ],
    "metadata": {
        "kernelspec": {
            "display_name": "Python 3 (ipykernel)",
            "language": "python",
            "name": "python3"
        },
        "language_info": {
            "codemirror_mode": {
                "name": "ipython",
                "version": 3
            },
            "file_extension": ".py",
            "mimetype": "text/x-python",
            "name": "python",
            "nbconvert_exporter": "python",
            "pygments_lexer": "ipython3",
            "version": "3.10.12"
        }
    },
    "nbformat": 4,
    "nbformat_minor": 5
}