{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import tweepy\n",
    "import time\n",
    "from tweepy import OAuthHandler\n",
    "\n",
    "\n",
    "consumer_key = '*******************************'\n",
    "consumer_secret = '*******************************'\n",
    "access_token = '*******************************'\n",
    "access_token_secret = '*******************************'\n",
    " \n",
    "auth = OAuthHandler(consumer_key, consumer_secret)\n",
    "auth.set_access_token(access_token, access_token_secret)\n",
    " \n",
    "#api = tweepy.API(auth)\n",
    "api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=3, retry_delay=60)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Name: uzay00\n",
      "ID: 14519511\n",
      "Location: \n",
      "Friends: 404\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# Creates the user object. The me() method returns the user whose authentication keys were used.\n",
    "user = api.me()\n",
    " \n",
    "print('Name: ' + user.name)\n",
    "print('ID: ' + str(user.id))\n",
    "print('Location: ' + user.location)\n",
    "print('Friends: ' + str(user.friends_count))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Name: Kadir\n",
      "ID: 2332464662\n",
      "Location: Türkiye\n",
      "Friends: 143\n"
     ]
    }
   ],
   "source": [
    "user = api.get_user('kadirakgulll')\n",
    "\n",
    "print('Name: ' + user.name)\n",
    "print('ID: ' + str(user.id))\n",
    "print('Location: ' + user.location)\n",
    "print('Friends: ' + str(user.friends_count))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Burayı okuyan kaldı mı ?'"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "user.description"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "datetime.datetime(2014, 2, 7, 22, 2, 54)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "user.created_at"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2018-10-12 06:26:34 RT @ProfDemirtas: Geri kalmış ülkelere: GEÇMİŞ OLSUN https://t.co/jfDZwzYFqB\n",
      "2018-10-11 21:48:11 RT @ckucukozmen: Dünyanın ilk enflasyon polisi bizde. Ama haberi sunanlar neye gülüyor anlamadım. https://t.co/4a4WIUObWL\n",
      "2018-10-11 15:08:18 RT @coolstuffcheap: HDMI https://t.co/ZIW1Mtdp9U\n",
      "2018-10-10 20:52:31 -Knock Knock\n",
      "-Who's there?\n",
      "-Let the devil in\n",
      "-What?\n",
      " Eminem - Venom https://t.co/npjZ5DmX34 @YouTube aracılığıyla\n",
      "2018-10-10 20:04:10 RT @educatedear: Arda’nın tüm bunlara rağmen 5-10 sene sonra, belki çok daha yakın, bu ülkede teknik direktör, sportif direktör falan olaca…\n",
      "2018-10-09 22:09:56 RT @ozgurugzo: ben izlerken ruhumu teslim ettim neyine gülüyosunuz acaba ya https://t.co/D9IhtfNwBS\n",
      "2018-10-09 17:13:51 RT @ASRomaEN: International football is important but club football is importanter\n",
      "2018-10-08 17:04:54 RT @canwaves: keşke Servet-i Fünun dönemine geri dönebilseydik ya https://t.co/jUArFBQ890\n",
      "2018-10-07 15:35:36 RT @siyasifenomen: Siyasetçilerin, eşlerini ya da akrabalarını kamuda işe almasını yasaklayan kanun; Parlamento’da “oy birliğiye” kabul edi…\n",
      "2018-10-07 10:52:26 RT @TuhafAmaGercek: İnsan tükürüğü, morfinden 6 kat daha güçlü bir ağrı kesici olan Opiorphin adlı kimyasal bir bileşim içerir. https://t.c…\n",
      "2018-10-07 10:50:43 RT @takostate: mont mu alsam yoksa tofaş mı bir turlu karar veremiyorum\n",
      "2018-10-06 19:02:13 RT @BBahadirErdem: İŞKURun 1500 kişilik temizlik işçisi kadrosuna 6000 Üniversite 8000 Yüksek Okul mezunu başvuruyorsa ülkede düşünülmesi v…\n",
      "2018-10-06 14:56:13 RT @Roxabbe: Patates kızartması ve su sevmeyen insan hiç görmedim bi de tom hardy\n",
      "2018-10-05 20:52:28 RT @CirkinIstanbul: Büfe mi bisiklet yolunun üzerine yapılmış, bisiklet yolu mu büfenin altından geçirilmiş? @uskudarbld #tavukmuyumurtudan…\n",
      "2018-10-05 18:00:45 RT @theRA_official: Şu sağdaki Burcu mayonez. Soldaki Hellmann's. Yerli malını sikiyim. https://t.co/WAhja7IybP\n",
      "2018-10-05 18:00:12 RT @demarkegaming: #5EkimDünyaÖğretmenlerGünü https://t.co/5hGSlkeYR5\n",
      "2018-10-04 19:19:49 RT @archillect: https://t.co/F8ua5NgnlU\n",
      "2018-10-04 13:56:51 RT @sonat_isik: arkadaşlar nude atma akımının karşıtı gibi gözükmek istemem ama lut kavminde de her şeyin toplu nudelaşma ile başladığına e…\n",
      "2018-10-04 10:46:25 RT @Ucupak: bugün \"kasten adam öldürme, uyuşturucu ticareti, tecavüz ve çocuk istismarı\" suçlarına af isteyen aziz halkımız, 2-3 ay önce bu…\n",
      "2018-10-04 10:45:25 RT @kul0s: narkotik uyuma https://t.co/72WMOLy4Zy\n"
     ]
    }
   ],
   "source": [
    "# To print out the last 20 tweets by @VeriDefteri\n",
    "\n",
    "VeriDefteri_tweets = api.user_timeline('kadirakgulll')\n",
    "for tweet in VeriDefteri_tweets:\n",
    "    print( tweet.created_at, tweet.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'RT @kul0s: narkotik uyuma https://t.co/72WMOLy4Zy'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tweet.text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2332464662"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tweet.author.id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "False"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tweet.favorited"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RT @cagrimbakirci: @teslajnr @MeteAtature İnsanlar gerçek hayatta karşılaşsalar insanlara yapamayacakları şeyleri internette yapabiliyorlar…\n",
      "RT @deaneckles: How are biological and social contagion affected by changes to network structure? Recent work has claimed a \"weakness of lo…\n",
      "\"Hey onbeşli onbeşli\" ağıtını düğünlerde göbek havası olarak çalmakta sorun yokmuş, ama Çav Bella ile kalça kıvırma… https://t.co/jzLs3ypmaW\n",
      "İçişleri Bakanlığı, yolcuyu mağdur eden taksici sorununa el koydu https://t.co/Gd54MHvaa0\n",
      "UK to cut hybrid car subsidies https://t.co/pkfuc0Hu59\n",
      "@say_cem @BahaOkar @NalanMahsereci @BilimveGelecek Odanıza gelip bizzat imzalatabiliyor muyuz hocam?  :)\n",
      "Graphics processing units that were designed to make video games look better are now being deployed to power everyt… https://t.co/pEuIp1v5DL\n",
      "Have you ever wondered what would happen if a drone hit an airplane wing? Researchers at the University of Dayton t… https://t.co/qIGvFyiK02\n",
      "How to set up your home Wi-Fi https://t.co/5D9c9pBmcp https://t.co/wxcNx6WnPt\n",
      "Yargıtay Başkanı: Türk yargısında 5 kara delik var https://t.co/oNC2WwfoLX\n",
      "RT @ebskisafilm: #Eğitim konusunda söyleyecek sözün varsa filmini çek, toplamda 40 bin TL olan bu ödüllerden birini kazan. \n",
      "\n",
      "SON BAŞVURU TA…\n",
      "\"Commuting patterns: the flow and jump model and supporting data\"  https://t.co/K3EI0zpcl7\n",
      "\"The Statistical Physics of Real-World Networks. (arXiv:1810.05095v1 [physics.soc-ph])\"  https://t.co/6GGKSgSRTn\n",
      "\"Network localization is unalterable by infections in bursts. (arXiv:1810.04880v1 [physics.soc-ph])\"  https://t.co/FtZkWPffoI\n",
      "\"Leveraging local network communities to predict academic performance. (arXiv:1810.04730v1 [https://t.co/lwVVolmoyC… https://t.co/0eqelxr8e5\n",
      "\"Detecting Core-Periphery Structures by Surprise. (arXiv:1810.04717v1 [physics.soc-ph])\"  https://t.co/TIc6DpP7Uk\n",
      "RT @Phil_Baty: It has happened: China has overtaken the US to become the world’s largest producer of scientific research papers - well ahea…\n",
      "RT @eglerean: Excellent blog post by @OnervaKorhonen. I especially liked figure 2 https://t.co/b6GTYZHBuy https://t.co/Fy4jeqNZvN\n"
     ]
    }
   ],
   "source": [
    "#Recent tweets from accounts you follow:\n",
    "tweets = api.home_timeline()\n",
    "for tweet in tweets:\n",
    "    print(tweet.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tweet = api.update_status('Made with Tweepy yeah!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_friends(user_id):\n",
    "    users = []\n",
    "    page_count = 0\n",
    "    for user in tweepy.Cursor(api.friends, id=user_id, count=10).pages():\n",
    "        page_count += 1\n",
    "        print ('Getting page {} for friends'.format(page_count))\n",
    "        users.extend(user)\n",
    "    return users"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Getting page 1 for friends\n",
      "Getting page 2 for friends\n",
      "Getting page 3 for friends\n",
      "Getting page 4 for friends\n",
      "Getting page 5 for friends\n",
      "Getting page 6 for friends\n",
      "Getting page 7 for friends\n",
      "Getting page 8 for friends\n",
      "Getting page 9 for friends\n",
      "Getting page 10 for friends\n",
      "Getting page 11 for friends\n",
      "Getting page 12 for friends\n",
      "Getting page 13 for friends\n",
      "Getting page 14 for friends\n",
      "Getting page 15 for friends\n"
     ]
    }
   ],
   "source": [
    "friends = get_friends(user.id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "for f in friends:\n",
    "    # print(f._json['name'], f._json['id'])\n",
    "    pass"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Source:\n",
    " - https://www.geeksforgeeks.org/twitter-sentiment-analysis-using-python/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import tweepy\n",
    "from tweepy import OAuthHandler\n",
    "from textblob import TextBlob\n",
    " \n",
    "class TwitterClient(object):\n",
    "    '''\n",
    "    Generic Twitter Class for sentiment analysis.\n",
    "    '''\n",
    "    def __init__(self):\n",
    "        '''\n",
    "        Class constructor or initialization method.\n",
    "        '''\n",
    "        # keys and tokens from the Twitter Dev Console\n",
    "\n",
    "        consumer_key = '*******************************'\n",
    "        consumer_secret = '*******************************'\n",
    "        access_token = '*******************************'\n",
    "        access_token_secret = '*******************************'\n",
    "        \n",
    "        # attempt authentication\n",
    "        try:\n",
    "            # create OAuthHandler object\n",
    "            self.auth = OAuthHandler(consumer_key, consumer_secret)\n",
    "            # set access token and secret\n",
    "            self.auth.set_access_token(access_token, access_token_secret)\n",
    "            # create tweepy API object to fetch tweets\n",
    "            self.api = tweepy.API(self.auth)\n",
    "        except:\n",
    "            print(\"Error: Authentication Failed\")\n",
    " \n",
    "    def clean_tweet(self, tweet):\n",
    "        '''\n",
    "        Utility function to clean tweet text by removing links, special characters\n",
    "        using simple regex statements.\n",
    "        '''\n",
    "        return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n",
    " \n",
    "    def get_tweet_sentiment(self, tweet):\n",
    "        '''\n",
    "        Utility function to classify sentiment of passed tweet\n",
    "        using textblob's sentiment method\n",
    "        '''\n",
    "        # create TextBlob object of passed tweet text\n",
    "        analysis = TextBlob(self.clean_tweet(tweet))\n",
    "        # set sentiment\n",
    "        if analysis.sentiment.polarity > 0:\n",
    "            return 'positive'\n",
    "        elif analysis.sentiment.polarity == 0:\n",
    "            return 'neutral'\n",
    "        else:\n",
    "            return 'negative'\n",
    " \n",
    "    def get_tweets(self, query, count = 10):\n",
    "        '''\n",
    "        Main function to fetch tweets and parse them.\n",
    "        '''\n",
    "        # empty list to store parsed tweets\n",
    "        tweets = []\n",
    " \n",
    "        try:\n",
    "            # call twitter api to fetch tweets\n",
    "            fetched_tweets = self.api.search(q = query, count = count)\n",
    " \n",
    "            # parsing tweets one by one\n",
    "            for tweet in fetched_tweets:\n",
    "                # empty dictionary to store required params of a tweet\n",
    "                parsed_tweet = {}\n",
    " \n",
    "                # saving text of tweet\n",
    "                parsed_tweet['text'] = tweet.text\n",
    "                # saving sentiment of tweet\n",
    "                parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n",
    " \n",
    "                # appending parsed tweet to tweets list\n",
    "                if tweet.retweet_count > 0:\n",
    "                    # if tweet has retweets, ensure that it is appended only once\n",
    "                    if parsed_tweet not in tweets:\n",
    "                        tweets.append(parsed_tweet)\n",
    "                else:\n",
    "                    tweets.append(parsed_tweet)\n",
    " \n",
    "            # return parsed tweets\n",
    "            return tweets\n",
    " \n",
    "        except tweepy.TweepError as e:\n",
    "            # print error (if any)\n",
    "            print(\"Error : \" + str(e))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main(query = 'Donald Trump', count = 10):\n",
    "    # creating object of TwitterClient Class\n",
    "    api = TwitterClient()\n",
    "    # calling function to get tweets\n",
    "    tweets = api.get_tweets(query, count)\n",
    " \n",
    "    # picking positive tweets from tweets\n",
    "    ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']\n",
    "    # percentage of positive tweets\n",
    "    print(\"Positive tweets percentage:\")\n",
    "    print(100*len(ptweets)/len(tweets))\n",
    "    \n",
    "    # picking negative tweets from tweets\n",
    "    ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']\n",
    "    # percentage of negative tweets\n",
    "    print(\"Negative tweets percentage:\")\n",
    "    print(100*len(ntweets)/len(tweets))\n",
    "    \n",
    "    # percentage of neutral tweets\n",
    "    print(\"Neutral tweets percentage:\")\n",
    "    print(100 - 100*len(ntweets)/len(tweets) - 100*len(ptweets)/len(tweets))\n",
    " \n",
    "    # printing first 5 positive tweets\n",
    "    print(\"\\n\\nPositive tweets:\")\n",
    "    for tweet in ptweets[:10]:\n",
    "        print(tweet['text'])\n",
    " \n",
    "    # printing first 5 negative tweets\n",
    "    print(\"\\n\\nNegative tweets:\")\n",
    "    for tweet in ntweets[:10]:\n",
    "        print(tweet['text'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Positive tweets percentage:\n",
      "22.727272727272727\n",
      "Negative tweets percentage:\n",
      "9.090909090909092\n",
      "Neutral tweets percentage:\n",
      "68.18181818181819\n",
      "\n",
      "\n",
      "Positive tweets:\n",
      "RT @math_rachel: Is your company interested in sponsoring diversity fellowships for our upcoming Practical Deep Learning for Coders course…\n",
      "RT @DataScienceNIG: Dr. Abiodun Modupe kick-started the session on \"Deep Learning for Natural Language Processing ( NLP)\" by laying a good…\n",
      "RT @SwissCognitive: Real World Application of Multi-Agent Deep Reinforcement Learning: Autonomous Traffic Flow Management at GTC Europe 201…\n",
      "I’ve just come up with a great idea for the application of AI and Deep Learning to what currently appears to be an… https://t.co/pf9MHK1TVj\n",
      "Humans will take forever just to get a single information while these AI will only take minutes. As we become more… https://t.co/ZvZtAy10bS\n",
      "A new developmental reinforcement learning approach for sensorimotor space enlargement https://t.co/yUFMRH57wL\n",
      "Real World Application of Multi-Agent Deep Reinforcement Learning: Autonomous Traffic Flow Management at GTC Europe… https://t.co/55ccFtq2k0\n",
      "RT @PyDataFFM: Thanks for all the 'thank yous' :) Seems everyone had a great time at PyData Frankfurt #1 - we will be back next month with…\n",
      "RT @Robertson_SJ: NEW: Machine and deep learning for sport-specific movement recognition: a systematic review of model development and perf…\n",
      "RT @OpenAI: Apply for our Winter 2019 OpenAI Scholars Program, open to individuals from underrepresented groups in STEM interested in becom…\n",
      "\n",
      "\n",
      "Negative tweets:\n",
      "RT @jeremyphoward: Learn:\n",
      "- Intro Machine Learning https://t.co/gkefwjTwGh\n",
      "- Practical Deep Learning https://t.co/rue2Hahfv5 \n",
      "- Cutting Edg…\n",
      "RT @teamrework: We sat down with @RolandMemisevic from @twentybn to discuss his experiences at RE•WORK summits and gave us a sneak peek as…\n",
      "RT @SwissCognitive: How Artificial Intelligence Is Helping #Pharmaceuticals Develop Drugs\n",
      "#Bot #Cloud_Computing #CTO #Deep #Deep_Learning #…\n",
      "RT @jimhaseloff: Extensive review of \"Opportunities and obstacles for deep learning in biology and medicine” J. R. Soc. Interface 15: 20170…\n",
      "https://t.co/3jgZpDEIxc\n",
      "D Kartik et. al.\n",
      "Policy Design for Active Sequential Hypothesis Testing using Deep\n",
      "  Learni… https://t.co/9RjfsnncUY\n",
      "RT @fchollet: You can start training Keras models on TPUs, from the comfort of your browser, in a few seconds. Try it: https://t.co/G95rxp7…\n"
     ]
    }
   ],
   "source": [
    "main(query = 'deep learning', count = 100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "# empty list to store parsed tweets\n",
    "tweets = []\n",
    "fetched_tweets = api.search(q = 'CMPE 251', count = 10)\n",
    "# parsing tweets one by one\n",
    "for tweet in fetched_tweets:\n",
    "    tweets.append((tweet.id_str, tweet.author.id_str, tweet.author.name , tweet.text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('1050652767985913856',\n",
       "  '1585577172',\n",
       "  'Enes Gül',\n",
       "  'Cmpe 251 falan filan intermilan'),\n",
       " ('1050652749417738243',\n",
       "  '338222489',\n",
       "  'Hasan Kemik',\n",
       "  'CMPE 251, course tryout!'),\n",
       " ('1050652697097990144', '2332464662', 'Kadir', 'CMPE 251 data science ?!'),\n",
       " ('1050652682220797953',\n",
       "  '318510807',\n",
       "  'Selin Yeşilselve',\n",
       "  'Cmpe 251 is a great lesson!!!'),\n",
       " ('1050652679997779969', '14519511', 'uzay00', 'CMPE 251 is fun!! haha'),\n",
       " ('1050652671361728512', '217888241', 'Bartu Işıklar', 'CMPE 251 is not fun')]"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tweets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'CMPE 251 is not fun'"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tweets[-1]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}