{"cells": [{"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import openai\n", "import google.generativeai as palm\n", "import together"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["llm_api_options = [\"OpenAI API\",\"Azure OpenAI API\",\"Google PaLM API\", \"Llama 2\"]\n", "TEST_MESSAGE = \"Write an introductory paragraph to explain Generative AI to the reader of this content.\"\n", "openai_models = [\"gpt-4\", \"gpt-4-0613\", \"gpt-4-32k\", \"gpt-4-32k-0613\", \"gpt-3.5-turbo\",\n", " \"gpt-3.5-turbo-0613\", \"gpt-3.5-turbo-16k\", \"gpt-3.5-turbo-16k-0613\", \"text-davinci-003\",\n", " \"text-davinci-002\", \"text-curie-001\", \"text-babbage-001\", \"text-ada-001\"]"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["google_palm_models = [\"models/text-bison-001\", \"models/chat-bison-001\",\"models/embedding-gecko-001\"]"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["temperature = 0.7"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def openai_text_completion(openai_api_key: str, prompt: str, model: str):\n", " try:\n", " system_prompt: str = \"Explain in detail to help student understand the concept.\",\n", " assistant_prompt: str = None,\n", " messages = [\n", " {\"role\": \"user\", \"content\": f\"{prompt}\"},\n", " {\"role\": \"system\", \"content\": f\"{system_prompt}\"},\n", " {\"role\": \"assistant\", \"content\": f\"{assistant_prompt}\"}\n", " ]\n", " openai.api_key = openai_api_key\n", " openai.api_version = '2020-11-07'\n", " completion = openai.ChatCompletion.create(\n", " model = model,\n", " messages = messages,\n", " temperature = temperature\n", " )\n", " response = completion[\"choices\"][0][\"message\"].content\n", " return \"\", response\n", " except Exception as exception:\n", " print(f\"Exception Name: {type(exception).__name__}\")\n", " print(exception)\n", " return f\" openai_text_completion Error - {exception}\", \"\""]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def azure_openai_text_completion(azure_openai_api_key: str, azure_endpoint: str, azure_deployment_name: str, prompt: str, model: str):\n", " try:\n", " system_prompt: str = \"Explain in detail to help student understand the concept.\",\n", " assistant_prompt: str = None,\n", " messages = [\n", " {\"role\": \"user\", \"content\": f\"{prompt}\"},\n", " {\"role\": \"system\", \"content\": f\"{system_prompt}\"},\n", " {\"role\": \"assistant\", \"content\": f\"{assistant_prompt}\"}\n", " ]\n", " openai.api_key = azure_openai_api_key\n", " openai.api_type = \"azure\"\n", " openai.api_version = \"2023-05-15\"\n", " openai.api_base = f\"https://{azure_endpoint}.openai.azure.com\"\n", " completion = openai.ChatCompletion.create(\n", " model = model,\n", " engine = azure_deployment_name,\n", " messages = messages,\n", " temperature = temperature\n", " )\n", " response = completion[\"choices\"][0][\"message\"].content\n", " return \"\", response\n", " except Exception as exception:\n", " print(f\"Exception Name: {type(exception).__name__}\")\n", " print(exception)\n", " return f\" azure_openai_text_completion Error - {exception}\", \"\""]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def palm_text_completion(google_palm_key: str, prompt: str, model: str):\n", " try:\n", " candidate_count = 1\n", " top_k = 40\n", " top_p = 0.95\n", " max_output_tokens = 1024\n", " palm.configure(api_key=google_palm_key)\n", " defaults = {\n", " 'model': model,\n", " 'temperature': temperature,\n", " 'candidate_count': candidate_count,\n", " 'top_k': top_k,\n", " 'top_p': top_p,\n", " 'max_output_tokens': max_output_tokens,\n", " 'stop_sequences': [],\n", " 'safety_settings': [{\"category\":\"HARM_CATEGORY_DEROGATORY\",\"threshold\":1},{\"category\":\"HARM_CATEGORY_TOXICITY\",\"threshold\":1},{\"category\":\"HARM_CATEGORY_VIOLENCE\",\"threshold\":2},{\"category\":\"HARM_CATEGORY_SEXUAL\",\"threshold\":2},{\"category\":\"HARM_CATEGORY_MEDICAL\",\"threshold\":2},{\"category\":\"HARM_CATEGORY_DANGEROUS\",\"threshold\":2}],\n", " }\n", " response = palm.generate_text(\n", " **defaults,\n", " prompt=prompt\n", " )\n", " return \"\", response.result\n", " except Exception as exception:\n", " print(f\"Exception Name: {type(exception).__name__}\")\n", " print(exception)\n", " return f\" palm_text_completion Error - {exception}\", \"\""]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def test_handler(optionSelection,\n", " openai_key,\n", " azure_openai_key,\n", " azure_openai_api_base,\n", " azure_openai_deployment_name,\n", " google_generative_api_key,\n", "\t\t together_api_key,\n", " prompt: str = TEST_MESSAGE,\n", " openai_model_name: str =\"gpt-4\",\n", " google_model_name: str =\"models/text-bison-001\",\n", "\t\t together_model_name: str = \"togethercomputer/llama-2-70b-chat\"\n", "\t\t):\n", " match optionSelection:\n", " case \"OpenAI API\":\n", " message, response = openai_text_completion(openai_key, prompt,openai_model_name)\n", " return message, response\n", " case \"Azure OpenAI API\":\n", " message, response = azure_openai_text_completion(azure_openai_key, azure_openai_api_base, azure_openai_deployment_name, prompt,openai_model_name)\n", " return message, response\n", " case \"Google PaLM API\":\n", " message, response = palm_text_completion(google_generative_api_key, prompt,google_model_name)\n", " return message, response\n", " case \"Llama 2\":\n", " together.api_key = together_api_key\n", " model: str = together_model_name\n", " output = together.Complete.create(prompt, model=model,temperature=temperature)\n", "\t return \"Response from Together API\", output['output']['choices'][0]['text']\n", " case _:\n", " if optionSelection not in llm_api_options:\n", " return ValueError(\"Invalid choice!\"), \"\""]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with gr.Blocks() as LLMDemoTabbedScreen:\n", " with gr.Tab(\"Text-to-Text (Text Completion)\"):\n", " llm_options = gr.Radio(llm_api_options, label=\"Select one\", info=\"Which service do you want to use?\", value=\"OpenAI API\")\n", " with gr.Row():\n", " with gr.Column():\n", " test_string = gr.Textbox(label=\"Try String\", value=TEST_MESSAGE, lines=5)\n", " test_string_response = gr.Textbox(label=\"Response\", lines=5)\n", " test_string_output_info = gr.Label(value=\"Output Info\", label=\"Info\")\n", " test_button = gr.Button(\"Try it\")\n", " with gr.Tab(\"API Settings\"):\n", " with gr.Tab(\"Open AI\"):\n", " openai_model = gr.Dropdown(openai_models, value=\"gpt-4\", label=\"Model\", info=\"Select one, for Natural language\")\n", " openai_key = gr.Textbox(label=\"OpenAI API Key\", type=\"password\")\n", " with gr.Tab(\"Azure Open AI\"):\n", " with gr.Row():\n", " with gr.Column():\n", " azure_openai_key = gr.Textbox(label=\"Azure OpenAI API Key\", type=\"password\")\n", " azure_openai_api_base = gr.Textbox(label=\"Azure OpenAI API Endpoint\")\n", " azure_openai_deployment_name = gr.Textbox(label=\"Azure OpenAI API Deployment Name\")\n", " with gr.Tab(\"Google PaLM API\"):\n", " with gr.Row():\n", " with gr.Column():\n", " google_model_name = gr.Dropdown(google_palm_models, value=\"models/text-bison-001\", label=\"Model\", info=\"Select one, for Natural language\")\n", " google_generative_api_key = gr.Textbox(label=\"Google Generative AI API Key\", type=\"password\")\n", " with gr.Tab(\"Llama-2\"):\n", " with gr.Row():\n", " with gr.Column():\n", " together_model_name = gr.Dropdown(['togethercomputer/llama-2-70b-chat'], value=\"togethercomputer/llama-2-70b-chat\", label=\"Model\", info=\"Select one, for Natural language\")\n", " together_api_key = gr.Textbox(label=\"Together API Key\", type=\"password\")\n", " test_button.click(\n", " fn=test_handler,\n", " inputs=[llm_options,\n", " openai_key,\n", " azure_openai_key,\n", " azure_openai_api_base,\n", " azure_openai_deployment_name,\n", " google_generative_api_key,\n", " together_api_key,\n", " test_string,\n", " openai_model,\n", " google_model_name,\n", "\t\t together_model_name],\n", " outputs=[test_string_output_info, test_string_response]\n", " )"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["if __name__ == \"__main__\":\n", " LLMDemoTabbedScreen.launch()"]}], "metadata": {"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.4"}}, "nbformat": 4, "nbformat_minor": 2}