{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "id": "RRYSu48huSUW", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "94918789-b308-4c2d-c2fa-fff76bd9a476" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m376.1/376.1 KB\u001b[0m \u001b[31m9.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m199.2/199.2 KB\u001b[0m \u001b[31m13.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m70.1/70.1 KB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m39.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m15.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 KB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m199.2/199.2 KB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.2/114.2 KB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.6/264.6 KB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m158.8/158.8 KB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.1/49.1 KB\u001b[0m \u001b[31m3.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Building wheel for google-search-results (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Building wheel for cohere (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ] } ], "source": [ "!pip -q install langchain huggingface_hub openai==0.27.2 google-search-results tiktoken cohere" ] }, { "cell_type": "markdown", "source": [ "# Comparing and Evaluating LLMs" ], "metadata": { "id": "e--hMIfWIwsj" } }, { "cell_type": "code", "source": [ "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"COHERE_API_KEY\"] = \"\"\n", "os.environ[\"HUGGINGFACEHUB_API_TOKEN\"] = \"\"" ], "metadata": { "id": "dNA4TsHpu6OM" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!pip show langchain" ], "metadata": { "id": "J-KFB7J_u_3L", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "2d308b1b-028a-4ee4-f055-decd6534f4cd" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Name: langchain\n", "Version: 0.0.109\n", "Summary: Building applications with LLMs through composability\n", "Home-page: https://www.github.com/hwchase17/langchain\n", "Author: \n", "Author-email: \n", "License: MIT\n", "Location: /usr/local/lib/python3.9/dist-packages\n", "Requires: aiohttp, dataclasses-json, numpy, pydantic, PyYAML, requests, SQLAlchemy, tenacity\n", "Required-by: \n" ] } ] }, { "cell_type": "markdown", "source": [ "## Setting Up the LLMs" ], "metadata": { "id": "qjtcjrq7PnAb" } }, { "cell_type": "code", "source": [ "overal_temperature = 0.1" ], "metadata": { "id": "3arHx18MQqIb" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "#### Setting up Flan models\n" ], "metadata": { "id": "HqwsGJDhvAQ5" } }, { "cell_type": "code", "source": [ "from langchain import PromptTemplate, HuggingFaceHub, LLMChain\n", "\n", "\n", "flan_20B = HuggingFaceHub(repo_id=\"google/flan-ul2\", \n", " model_kwargs={\"temperature\":overal_temperature, \n", " \"max_new_tokens\":200}\n", " ) " ], "metadata": { "id": "lgesD0jrvDyG" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "flan_t5xxl = HuggingFaceHub(repo_id=\"google/flan-t5-xxl\", \n", " model_kwargs={\"temperature\":overal_temperature, \n", " \"max_new_tokens\":200}\n", " ) " ], "metadata": { "id": "ys9FQLsISSCK" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# unfortunately not working\n", "# GPTNeoXT_20B = HuggingFaceHub(repo_id=\"togethercomputer/GPT-NeoXT-Chat-Base-20B\", \n", "# model_kwargs={\"temperature\":overal_temperature, \n", "# \"max_new_tokens\":200}\n", "# ) bigscience/bloom-7b1" ], "metadata": { "id": "02EPvATsQytC" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# unfortunately not working\n", "# bloom7B = HuggingFaceHub(repo_id=\"bigscience/bloom-7b1\", \n", "# model_kwargs={\"temperature\":overal_temperature, \n", "# \"max_new_tokens\":200}\n", "# ) \n", "\n", "gpt_j6B = HuggingFaceHub(repo_id=\"EleutherAI/gpt-j-6B\", \n", " model_kwargs={\"temperature\":overal_temperature, \n", " \"max_new_tokens\":100}\n", " )" ], "metadata": { "id": "HgVv5srjXZOK" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "#### Setting up OpenAI models" ], "metadata": { "id": "M6yiwXNnvzxO" } }, { "cell_type": "code", "source": [ "from langchain.llms import OpenAI, OpenAIChat\n", "\n", "chatGPT_turbo = OpenAIChat(model_name='gpt-3.5-turbo', \n", " temperature=overal_temperature, \n", " max_tokens = 256,\n", " )\n", "\n", "gpt3_davinici_003 = OpenAI(model_name='text-davinci-003', \n", " temperature=overal_temperature, \n", " max_tokens = 256,\n", " )" ], "metadata": { "id": "-lzO5PfUpwfv" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "#### Setting up Cohere models" ], "metadata": { "id": "EKiXoHdvTDmc" } }, { "cell_type": "code", "source": [ "from langchain.llms import Cohere" ], "metadata": { "id": "Ca3oLfQPTIDV" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "cohere_command_xl = Cohere(model='command-xlarge', \n", " temperature=0.1, \n", " max_tokens = 256)" ], "metadata": { "id": "guizikdlTIDX" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "cohere_command_xl_nightly = Cohere(model='command-xlarge-nightly',\n", " temperature=0.1, \n", " max_tokens = 256)" ], "metadata": { "id": "hWHCDu8dTDmd" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Set up a comparison lab" ], "metadata": { "id": "MDWW8nDERcGU" } }, { "cell_type": "code", "source": [ "from langchain.model_laboratory import ModelLaboratory" ], "metadata": { "id": "sy4s37W9m7X6" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" ], "metadata": { "id": "zZUMGKuvn_HV" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "lab = ModelLaboratory.from_llms([\n", " chatGPT_turbo, \n", " gpt3_davinici_003,\n", " gpt_j6B, \n", " flan_20B,\n", " flan_t5xxl, \n", " cohere_command_xl, \n", " cohere_command_xl_nightly\n", " ], prompt=prompt)" ], "metadata": { "id": "9s07AIuJm_Gv" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "Let's run it on some and compare!" ], "metadata": { "id": "LDQ8VcedrkIw" } }, { "cell_type": "code", "source": [ "lab.compare(\"What is the opposite of up?\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "2Eb6pNpimfu6", "outputId": "a70e1dc2-9aa8-42c0-82df-f34cbb5f32fa" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "What is the opposite of up?\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3m\n", "\n", "Up refers to a direction that is higher or above a certain point. The opposite of up would be a direction that is lower or below that same point. Therefore, the opposite of up is down.\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m The opposite of up is down.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "Step 1: What is the opposite of up?\n", "\n", "The opposite of up is down.\n", "\n", "Step 2: What is the opposite of down?\n", "\n", "The opposite of down is up.\n", "\n", "Step 3: What is the opposite of up?\n", "\n", "The opposite of up is down.\n", "\n", "Step 4: What is the opposite of down?\n", "\n", "The opposite of down is up.\n", "\n", "Step 5: What is the opposite of up?\n", "\n", "The opposite\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mDown is the opposite of up. Up is the action of rising. Down is the action of sinking. The answer: down.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mDown is the opposite of up. Down is the direction of the earth. Down is the direction of the sun. The sun is always in the sky. The sun is always in the sky. The answer: down.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m If you are up, you are above something. If you are below something, you are down. So the opposite of up is down.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m The opposite of up is not down. The opposite of up is not over. The opposite of up is not around. The opposite of up is not away. The opposite of up is not back. The opposite of up is not down. The opposite of up is not under. The opposite of up is not in. The opposite of up is not out. The opposite of up is not over. The opposite of up is not around. The opposite of up is not away. The opposite of up is not back. The opposite of up is not down. The opposite of up is not under. The opposite of up is not in. The opposite of up is not out. The opposite of up is not over. The opposite of up is not around. The opposite of up is not away. The opposite of up is not back. The opposite of up is not down. The opposite of up is not under. The opposite of up is not in. The opposite of up is not out. The opposite of up is not over. The opposite of up is not around. The opposite of up is not away. The opposite of up is not back. The opposite of up is not down. The opposite of up is not under. The opposite of up is not in.\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "lab.compare(\"Answer the following question by reasoning step by step. The cafeteria had 23 apples. \\\n", "If they used 20 for lunch, and bought 6 more, how many apple do they have?\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "puWRd2nwT5eD", "outputId": "bd65d7ea-ddcd-41b2-e6a4-9b162ed704a7" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3m\n", "\n", "1. The cafeteria had 23 apples.\n", "2. They used 20 for lunch, which means they have 23 - 20 = 3 apples left.\n", "3. They bought 6 more apples, which means they now have 3 + 6 = 9 apples. \n", "\n", "Therefore, the cafeteria now has 9 apples.\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m \n", "\n", "Step 1: The cafeteria had 23 apples. \n", "\n", "Step 2: They used 20 for lunch. \n", "\n", "Step 3: They bought 6 more. \n", "\n", "Step 4: So, they have 23 + 6 = 29 apples.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "Step 1: Let's say they bought 6 more apples.\n", "\n", "Step 2: Let's say they used 20 for lunch.\n", "\n", "Step 3: Let's say they bought 6 more apples.\n", "\n", "Step 4: Let's say they used 20 for lunch.\n", "\n", "Step 5: Let's say they bought 6 more apples.\n", "\n", "Step 6: Let's say they used 20 for lunch.\n", "\n", "Step 7: Let's say they bought 6 more apples.\n", "\n", "\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mThey had 23 - 20 = 3 apples left. They have 3 + 6 = 9 apples. So the answer is 9.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mThe cafeteria has 23 - 20 = 3 apples left. They bought 6 + 3 = 7 apples. The cafeteria has 7 - 3 = 2 apples.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "\n", "They had 23 apples.\n", "\n", "They used 20 for lunch.\n", "\n", "They bought 6 more.\n", "\n", "So, they must have 7 apples left.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m\n", "\n", " 1. The cafeteria had 23 apples.\n", " 2. They used 20 for lunch.\n", " 3. They bought 6 more.\n", " 4. How many apple do they have?\n", "\n", "Let's put the information in order.\n", "\n", " 1. The cafeteria had 23 apples.\n", " 2. They used 20 for lunch.\n", " 3. They bought 6 more.\n", " 4. How many apple do they have?\n", "\n", "Now we can answer the question. They have 23 apples.\n", "\n", "Question: Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch, and bought 6 more, how many apple do they have?\n", "\n", "Answer: Let's think step by step.\n", "\n", " 1. The cafeteria had 23 apples.\n", " 2. They used 20 for lunch.\n", " 3. They bought 6 more.\n", " 4. How many apple do they have?\n", "\n", "Let's put the information in order.\n", "\n", " 1. The cafeteria had 23 apples.\n", " 2. They used 20 for lunch.\n", " 3. They bought 6 more.\n", " 4. How many apple do they\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "lab.compare('''\n", "Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering.\n", "''')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "AP63DmDbaY_X", "outputId": "c1d92265-aeeb-48d7-f3ec-16d4a186a202" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "\n", "Can Geoffrey Hinton have a conversation with George Washington? Give the rationale before answering.\n", "\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3m\n", "\n", "First, we need to establish that Geoffrey Hinton is a real person who is alive today, while George Washington was a historical figure who died in 1799. Therefore, it is impossible for them to have a conversation in the traditional sense.\n", "\n", "However, if we were to imagine a hypothetical scenario where time travel was possible, and Geoffrey Hinton could travel back in time to meet George Washington, there would still be significant barriers to having a meaningful conversation.\n", "\n", "Firstly, George Washington lived in a very different time period with different cultural norms, language, and technology. It is likely that he would struggle to understand many of the concepts and ideas that Geoffrey Hinton would want to discuss.\n", "\n", "Secondly, Geoffrey Hinton is a computer scientist and artificial intelligence researcher, while George Washington was a military leader and politician. They would have very different areas of expertise and interests, making it difficult to find common ground for a conversation.\n", "\n", "Therefore, while it is technically possible for Geoffrey Hinton to have a conversation with George Washington in a hypothetical scenario, it is unlikely that it would be a productive or meaningful exchange.\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m First, Geoffrey Hinton is a living person and George Washington is a deceased person. Second, a conversation requires two living people to communicate with each other. Therefore, no, Geoffrey Hinton cannot have a conversation with George Washington.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "1. George Washington is dead.\n", "\n", "2. Geoffrey Hinton is not dead.\n", "\n", "3. Geoffrey Hinton is not a ghost.\n", "\n", "4. Geoffrey Hinton is not a zombie.\n", "\n", "5. Geoffrey Hinton is not a vampire.\n", "\n", "6. Geoffrey Hinton is not a werewolf.\n", "\n", "7. Geoffrey Hinton is not a ghost.\n", "\n", "8. Geoffrey Hinton is not a zombie\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mGeorge Washington died in 1799. Geoffrey Hinton was born in 1959. So, the final answer is no.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mGeorge Washington died in 1799. Geoffrey Hinton was born in 1939. The answer: no.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "Is Hinton a real person? Yes.\n", "Is Washington a real person? Yes.\n", "Are they both alive? No.\n", "Are they both dead? No.\n", "Do they live in the same time period? No.\n", "Do they live in the same country? No.\n", "Do they live on the same planet? Yes.\n", "Can they have a conversation over the phone? Yes.\n", "\n", "So the answer is: Yes, they could have a conversation over the phone.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m\n", "\n", "Step 1: Is Geoffrey Hinton a real person?\n", "\n", "Yes.\n", "\n", "Step 2: Is George Washington a real person?\n", "\n", "Yes.\n", "\n", "Step 3: Are they both alive?\n", "\n", "No.\n", "\n", "Step 4: Can they both speak?\n", "\n", "Yes.\n", "\n", "Step 5: Can they both understand spoken language?\n", "\n", "Yes.\n", "\n", "Step 6: Can they both speak the same language?\n", "\n", "No.\n", "\n", "Step 7: Can they both understand the same language?\n", "\n", "No.\n", "\n", "Conclusion: They cannot have a conversation.\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "template = \"\"\"You are a creative story teller who can write wonderful interesting short stories: {question}\n", "\n", "Story:\"\"\"\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", "\n", "lab = ModelLaboratory.from_llms([\n", " chatGPT_turbo, \n", " gpt3_davinici_003,\n", " gpt_j6B, \n", " flan_20B,\n", " flan_t5xxl, \n", " cohere_command_xl, \n", " cohere_command_xl_nightly\n", " ], prompt=prompt)" ], "metadata": { "id": "Uoq5C6eMT5jr" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "lab.compare('''Write a sad story about carrot named Jason. The story should \\\n", "start with the carrot being a professional athlete of some kind, \\\n", "and end with the carrot having his heart broken.''')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "lo1mo_8OT5nI", "outputId": "37e23fe7-85a0-4eaf-e184-5d69ac711cb5" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "Write a sad story about carrot named Jason. The story should start with the carrot being a professional athlete of some kind, and end with the carrot having his heart broken.\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3mJason was a carrot like no other. He was a professional athlete, a runner to be exact. He had won numerous races and had a bright future ahead of him. He was the pride of his family and the envy of his peers.\n", "\n", "Jason had always been passionate about running. He loved the feeling of the wind in his leaves and the adrenaline rush that came with every race. He trained hard every day, pushing himself to the limit, always striving to be better.\n", "\n", "One day, Jason met a beautiful tomato named Sarah. She was a fellow athlete, a swimmer. They hit it off immediately and soon became inseparable. They trained together, ate together, and even slept together.\n", "\n", "Jason was head over heels in love with Sarah. He had never felt this way before. He knew that she was the one for him, and he was determined to make her his forever.\n", "\n", "But fate had other plans. One day, Sarah was diagnosed with a rare disease that left her unable to swim. She was devastated, and so was Jason. He tried to be there for her, to support her, but it was too much for him to bear.\n", "\n", "As Sarah's condition worsened, Jason's heart broke. He could no longer focus on his running, and his performance suffered\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m\n", "\n", "Jason the carrot was a professional athlete. He was the fastest runner in the vegetable kingdom, and he was proud of his accomplishments. He was admired by all the other vegetables, and he was always the life of the party.\n", "\n", "One day, Jason met a beautiful carrot named Daisy. She was the most beautiful carrot he had ever seen, and he was instantly smitten. He asked her out on a date, and she said yes.\n", "\n", "They went on many dates, and Jason was sure that Daisy was the one for him. He was so in love with her that he decided to propose. He bought her a beautiful diamond ring and asked her to marry him.\n", "\n", "To his surprise, Daisy said no. She told him that she wasn't ready for marriage yet, and that she needed more time to think about it. Jason was heartbroken. He had never felt so much pain before.\n", "\n", "He tried to move on, but he couldn't. He was so in love with Daisy that he couldn't bear to be without her. He stopped running and stopped competing in races. He was too sad to do anything.\n", "\n", "He eventually moved away, hoping that the distance would help him forget about Daisy. But no matter how far he\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "Jason was a professional athlete. He was a great athlete. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete in the world. He was the best athlete\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mJason was a professional carrot. He was a great athlete. He was a great basketball player. He was a great football player. He was a great baseball player. He was a great swimmer. He was a great runner. He was a great skateboarder. He was a great gymnast. He was a great dancer. He was a great singer. He was a great actor. He was a great comedian. He was a great teacher. He was a great doctor. He was a great lawyer. He was a great politician. He was a great businessman. He was a great teacher. He was a great singer. He was a great dancer. He was a great actor. He was a great comedian. He was a great businessman. He was a great teacher. He was a great doctor. He was a great lawyer.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mJason was a professional athlete. He was a carrot. He was a very good carrot. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was a very good athlete. He was \u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "Jason was a professional athlete, a runner to be exact. He was the best in his field, and he had a lot of fans. But one day, he met a girl named Jessica. She was a professional athlete too, and she was the best in her field. They fell in love and got married.\n", "\n", "But then, one day, Jessica cheated on Jason. She had an affair with his best friend. Jason was heartbroken. He didn't know what to do. He didn't want to live anymore. So he quit his job and moved to a small town in the middle of nowhere.\n", "\n", "He started working at a farm, growing carrots. He was good at it, and he made a lot of friends. But he was still heartbroken.\n", "\n", "One day, he met a girl named Emily. She was a professional athlete too, and she was the best in her field. They fell in love and got married.\n", "\n", "But then, one day, Emily cheated on Jason. She had an affair with his best friend. Jason was heartbroken. He didn't know what to do. He didn't want to live anymore. So he quit his job and moved to a small town in the middle of nowhere.\n", "\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m\n", "\n", "Jason was a professional athlete. He was a carrot. And he was in love.\n", "\n", "Jason was a carrot, but he was no ordinary carrot. He was a professional athlete. He was a track and field carrot. He was the fastest carrot in the world.\n", "\n", "Jason was in love with a beautiful carrot named Jessica. She was a professional athlete too. She was a track and field carrot as well.\n", "\n", "Jason and Jessica met at a track and field competition. They fell in love at first sight. They both knew that they were meant to be together.\n", "\n", "Jason and Jessica were the perfect couple. They were always together. They did everything together. They were always happy.\n", "\n", "But one day, Jason's heart was broken. Jessica broke up with him. She said that she needed some space.\n", "\n", "Jason was heartbroken. He didn't know what to do. He didn't know how to move on.\n", "\n", "So, Jason decided to quit track and field. He quit his job as a professional athlete. He moved to a small town and started working at a carrot farm.\n", "\n", "Jason was happy at the carrot farm. He had a new life. He had new\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "\n", "template = \"\"\"Answer the question to the best of your abilities but if you are not sure then answer you don't know: {question}\n", "\n", "Answer:\"\"\"\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", "\n", "lab = ModelLaboratory.from_llms([\n", " chatGPT_turbo, \n", " gpt3_davinici_003,\n", " gpt_j6B, \n", " flan_20B,\n", " flan_t5xxl, \n", " cohere_command_xl, \n", " cohere_command_xl_nightly\n", " ], prompt=prompt)" ], "metadata": { "id": "c2c6EByDT5rQ" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "lab.compare('''I am riding a bicycle. The pedals are moving fast. I look into the mirror and I am not moving. Why is this?''')\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "7qCuIkZST5uK", "outputId": "a61a46ae-b5da-456b-deb7-6641d77716fd" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "I am riding a bicycle. The pedals are moving fast. I look into the mirror and I am not moving. Why is this?\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3mYou are likely on a stationary bicycle or trainer.\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m You are not moving because you are coasting, meaning you are not pedaling and the bike is still in motion due to the momentum from your previous pedaling.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is moving. The bicycle is moving because the bicycle is\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mI am stationary\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mI am looking at the wrong angle.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "The pedals are moving fast. I look into the mirror and I am not moving. Why is this?\n", "The answer is: I am not moving because I am on a moving bicycle.\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m The mirror is on a stationary object.\u001b[0m\n", "\n" ] } ] }, { "cell_type": "markdown", "source": [ "### Fact Extraction" ], "metadata": { "id": "lOks9w5ndiqu" } }, { "cell_type": "code", "source": [ "template = \"\"\"{question}\n", "\n", "Answer:\"\"\"\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", "\n", "lab = ModelLaboratory.from_llms([\n", " chatGPT_turbo, \n", " gpt3_davinici_003,\n", " gpt_j6B, \n", " flan_20B,\n", " flan_t5xxl, \n", " cohere_command_xl, \n", " cohere_command_xl_nightly\n", " ], prompt=prompt)" ], "metadata": { "id": "JydfVsh8eAFx" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "lab.compare('''Please answer the question:\\n\n", "Who is the OnePlus COO?\\n\\n\n", "Output in the format: [first_name, surname]\\n\\n\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales slide reverse course. The arrival of 5G was a nice reprieve, but next-generation telecom standards don’t arrive every year.\n", "\n", "“I personally think foldables are supply chain-driven innovation and not consumer insights,” Pei said. “Somebody invents OLED, and they can make a lot of money, because it’s a great technology. Then after a few years, a lot more companies make that, so they need to lower their prices. So they need to figure out what else they can sell at a higher margin. They develop flexible OLEDs, which they can sell at a higher price.”\n", "It’s hard not to be cynical about this stuff sometimes. Ditto for concept devices, though as I noted in my “ode to weird tech” post, as someone who follows this stuff for a living, I’m a fan of weirdness for weirdness sake, be it the rollable Motorola Rizr screen or the OnePlus glowing cooling fluid. Certainly following the automotive industry’s lead of creating concept devices is a trend that is likely to only become more pervasive.\n", "\n", "OnePlus COO Kinder Liu told me this week that gauging consumer interest is one of the “multiple reasons” his company is engaging with the concept. He added, “Also, we want to encourage continuous innovation inside our company.”\n", "\n", "Pretty much everyone I engaged with this week echoed the sentiment that smartphones are in a rut. For the first time, however, it’s not a foregone conclusion that there’s a way of getting out.\n", "''')\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "crsqwwmrdLQl", "outputId": "69a057a4-f6b7-4a9d-c416-26dd9b563bc6" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "Please answer the question:\n", "\n", "Who is the OnePlus COO?\n", "\n", "\n", "Output in the format: [first_name, surname]\n", "\n", "\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales slide reverse course. The arrival of 5G was a nice reprieve, but next-generation telecom standards don’t arrive every year.\n", "\n", "“I personally think foldables are supply chain-driven innovation and not consumer insights,” Pei said. “Somebody invents OLED, and they can make a lot of money, because it’s a great technology. Then after a few years, a lot more companies make that, so they need to lower their prices. So they need to figure out what else they can sell at a higher margin. They develop flexible OLEDs, which they can sell at a higher price.”\n", "It’s hard not to be cynical about this stuff sometimes. Ditto for concept devices, though as I noted in my “ode to weird tech” post, as someone who follows this stuff for a living, I’m a fan of weirdness for weirdness sake, be it the rollable Motorola Rizr screen or the OnePlus glowing cooling fluid. Certainly following the automotive industry’s lead of creating concept devices is a trend that is likely to only become more pervasive.\n", "\n", "OnePlus COO Kinder Liu told me this week that gauging consumer interest is one of the “multiple reasons” his company is engaging with the concept. He added, “Also, we want to encourage continuous innovation inside our company.”\n", "\n", "Pretty much everyone I engaged with this week echoed the sentiment that smartphones are in a rut. For the first time, however, it’s not a foregone conclusion that there’s a way of getting out.\n", "\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3m[Kinder, Liu]\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m [Kinder, Liu]\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "OnePlus COO Kinder Liu\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mKinder [surname] Liu\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mKinder, Liu\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "Kinder Liu\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m\n", "Kinder Liu\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "lab.compare('''Please answer the question:\\n\n", "What is a supply chain driven innovation?\\n\\n\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales slide reverse course. The arrival of 5G was a nice reprieve, but next-generation telecom standards don’t arrive every year.\n", "\n", "“I personally think foldables are supply chain-driven innovation and not consumer insights,” Pei said. “Somebody invents OLED, and they can make a lot of money, because it’s a great technology. Then after a few years, a lot more companies make that, so they need to lower their prices. So they need to figure out what else they can sell at a higher margin. They develop flexible OLEDs, which they can sell at a higher price.”\n", "It’s hard not to be cynical about this stuff sometimes. Ditto for concept devices, though as I noted in my “ode to weird tech” post, as someone who follows this stuff for a living, I’m a fan of weirdness for weirdness sake, be it the rollable Motorola Rizr screen or the OnePlus glowing cooling fluid. Certainly following the automotive industry’s lead of creating concept devices is a trend that is likely to only become more pervasive.\n", "\n", "OnePlus COO Kinder Liu told me this week that gauging consumer interest is one of the “multiple reasons” his company is engaging with the concept. He added, “Also, we want to encourage continuous innovation inside our company.”\n", "\n", "Pretty much everyone I engaged with this week echoed the sentiment that smartphones are in a rut. For the first time, however, it’s not a foregone conclusion that there’s a way of getting out.\n", "''')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "H3QT55S3eK2y", "outputId": "5f29a5ae-8cc4-4906-c53b-b7cc4559f259" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[1mInput:\u001b[0m\n", "Please answer the question:\n", "\n", "What is a supply chain driven innovation?\n", "\n", "\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales slide reverse course. The arrival of 5G was a nice reprieve, but next-generation telecom standards don’t arrive every year.\n", "\n", "“I personally think foldables are supply chain-driven innovation and not consumer insights,” Pei said. “Somebody invents OLED, and they can make a lot of money, because it’s a great technology. Then after a few years, a lot more companies make that, so they need to lower their prices. So they need to figure out what else they can sell at a higher margin. They develop flexible OLEDs, which they can sell at a higher price.”\n", "It’s hard not to be cynical about this stuff sometimes. Ditto for concept devices, though as I noted in my “ode to weird tech” post, as someone who follows this stuff for a living, I’m a fan of weirdness for weirdness sake, be it the rollable Motorola Rizr screen or the OnePlus glowing cooling fluid. Certainly following the automotive industry’s lead of creating concept devices is a trend that is likely to only become more pervasive.\n", "\n", "OnePlus COO Kinder Liu told me this week that gauging consumer interest is one of the “multiple reasons” his company is engaging with the concept. He added, “Also, we want to encourage continuous innovation inside our company.”\n", "\n", "Pretty much everyone I engaged with this week echoed the sentiment that smartphones are in a rut. For the first time, however, it’s not a foregone conclusion that there’s a way of getting out.\n", "\n", "\n", "\u001b[1mOpenAIChat\u001b[0m\n", "Params: {'model_name': 'gpt-3.5-turbo', 'temperature': 0.1, 'max_tokens': 256}\n", "\u001b[36;1m\u001b[1;3mA supply chain-driven innovation is when a company develops a new product or technology based on the availability and cost of materials and components in their supply chain, rather than consumer demand or insights. This can lead to the development of new products with higher profit margins, such as flexible OLED screens in smartphones.\u001b[0m\n", "\n", "\u001b[1mOpenAI\u001b[0m\n", "Params: {'model_name': 'text-davinci-003', 'temperature': 0.1, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1, 'request_timeout': None, 'logit_bias': {}}\n", "\u001b[33;1m\u001b[1;3m Supply chain driven innovation is when companies use new technologies and materials to create products that can be sold at a higher margin. This is often done in response to market forces, such as the need to lower prices due to increased competition. Examples of this include the development of flexible OLEDs and concept devices, such as the rollable Motorola Rizr screen or the OnePlus glowing cooling fluid.\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'EleutherAI/gpt-j-6B', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 100}}\n", "\u001b[38;5;200m\u001b[1;3m\n", "\n", "Smartphone makers searched for a way forward at MWC 2023\n", "\n", "Foldables, 6G, light shows -- there are a lot of ideas floating around, but no one has cracked the code\n", "\n", "The slowdown was inevitable, of course. Nothing stays hot forever — especially in this industry. By tech standards, smartphones have had a good run, but the last few years have seen device makers searching for the magic bullet to help the sales slide reverse course. The arrival of 5\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-ul2', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[32;1m\u001b[1;3mfoldables\u001b[0m\n", "\n", "\u001b[1mHuggingFaceHub\u001b[0m\n", "Params: {'repo_id': 'google/flan-t5-xxl', 'task': None, 'model_kwargs': {'temperature': 0.1, 'max_new_tokens': 200}}\n", "\u001b[31;1m\u001b[1;3mfoldables\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[36;1m\u001b[1;3m\n", "Supply chain driven innovation\u001b[0m\n", "\n", "\u001b[1mCohere\u001b[0m\n", "Params: {'model': 'command-xlarge-nightly', 'max_tokens': 256, 'temperature': 0.1, 'k': 0, 'p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0, 'truncate': None}\n", "\u001b[33;1m\u001b[1;3m\n", "Supply chain driven innovation is when a new technology is created because of the need to sell a new product at a higher margin.\u001b[0m\n", "\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "f9KyaUh0edBY" }, "execution_count": null, "outputs": [] } ] }