{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "5beea67c", "metadata": {}, "outputs": [ { "data": { "application/javascript": [ "IPython.OutputArea.prototype._should_scroll = function(lines) {\n", " return false;\n", "}\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "%%javascript\n", "IPython.OutputArea.prototype._should_scroll = function(lines) {\n", " return false;\n", "}" ] }, { "cell_type": "code", "execution_count": 2, "id": "99b92926", "metadata": { "scrolled": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "ename": "FileNotFoundError", "evalue": "[Errno 2] No such file or directory: 'euro-daily-hist_1999_2020.csv'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/var/folders/bz/5cj1tzbj5xn319rpqrw6gpmh0000gn/T/ipykernel_33875/3007294150.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;31m# open file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0mx_rates\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"euro-daily-hist_1999_2020.csv\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;31m# head, tail, and basic info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/util/_decorators.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 309\u001b[0m \u001b[0mstacklevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstacklevel\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 310\u001b[0m )\n\u001b[0;32m--> 311\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 312\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 313\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36mread_csv\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, error_bad_lines, warn_bad_lines, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options)\u001b[0m\n\u001b[1;32m 584\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwds_defaults\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 585\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 586\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 587\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 588\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 480\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0;31m# Create the parser.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 482\u001b[0;31m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTextFileReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 483\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 484\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mchunksize\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0miterator\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, f, engine, **kwds)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"has_index_names\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"has_index_names\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/readers.py\u001b[0m in \u001b[0;36m_make_engine\u001b[0;34m(self, engine)\u001b[0m\n\u001b[1;32m 1038\u001b[0m )\n\u001b[1;32m 1039\u001b[0m \u001b[0;31m# error: Too many arguments for \"ParserBase\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1040\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmapping\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mengine\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[call-arg]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1041\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1042\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_failover_to_python\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, src, **kwds)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0;31m# open handles\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_open_handles\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandles\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py\u001b[0m in \u001b[0;36m_open_handles\u001b[0;34m(self, src, kwds)\u001b[0m\n\u001b[1;32m 220\u001b[0m \u001b[0mLet\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mreaders\u001b[0m \u001b[0mopen\u001b[0m \u001b[0mIOHandles\u001b[0m \u001b[0mafter\u001b[0m \u001b[0mthey\u001b[0m \u001b[0mare\u001b[0m \u001b[0mdone\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtheir\u001b[0m \u001b[0mpotential\u001b[0m \u001b[0mraises\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 221\u001b[0m \"\"\"\n\u001b[0;32m--> 222\u001b[0;31m self.handles = get_handle(\n\u001b[0m\u001b[1;32m 223\u001b[0m \u001b[0msrc\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 224\u001b[0m \u001b[0;34m\"r\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/anaconda3/envs/geo_env/lib/python3.9/site-packages/pandas/io/common.py\u001b[0m in \u001b[0;36mget_handle\u001b[0;34m(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)\u001b[0m\n\u001b[1;32m 700\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencoding\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m\"b\"\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 701\u001b[0m \u001b[0;31m# Encoding\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 702\u001b[0;31m handle = open(\n\u001b[0m\u001b[1;32m 703\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 704\u001b[0m \u001b[0mioargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'euro-daily-hist_1999_2020.csv'" ] } ], "source": [ "# import libraries and set display options\n", "import pandas as pd\n", "import pprint\n", "\n", "# set frame width and center output\n", "from IPython.core.display import display, HTML\n", "display(HTML(\"\"\"\n", "\n", "\"\"\"))\n", "\n", "# pretty print options\n", "pd.options.display.float_format = '{:20,.4f}'.format\n", "pd.set_option('display.max_rows', None)\n", "pd.set_option('display.max_columns', None)\n", "pd.set_option('display.width', 3000)\n", "pd.set_option('display.colheader_justify', 'center')\n", "pd.set_option('display.precision', 3)\n", "\n", "# open file\n", "x_rates = pd.read_csv(\"euro-daily-hist_1999_2020.csv\")\n", "\n", "# head, tail, and basic info\n", "# display(x_rates.head())\n", "# display(x_rates.tail())\n", "# display(x_rates.info())" ] }, { "cell_type": "code", "execution_count": null, "id": "eed88132", "metadata": { "scrolled": false }, "outputs": [], "source": [ "# strip brackets and trailing space from country names\n", "# replace remaining space with underscore\n", "# lower case all column names\n", "x_rates.columns = x_rates.columns.str.replace(\"[\",\"\").str.replace(\"]\",\"\")\n", "x_rates.columns = x_rates.columns.str.rstrip()\n", "x_rates.columns = x_rates.columns.str.replace(\" \",\"_\")\n", "x_rates.columns = x_rates.columns.str.lower()\n", "\n", "# rename columns\n", "x_rates.rename(columns={\"period\\\\unit:\":\"date\",\n", " \"chinese_yuan_renminbi\":\"chinese_yuan\",\n", " \"uk_pound_sterling\":\"uk_pound\"}, inplace=True)\n", "\n", "# convert datetime\n", "x_rates[\"date\"] = pd.to_datetime(x_rates[\"date\"])\n", "\n", "# resort and reindex\n", "x_rates.sort_values(\"date\", inplace=True)\n", "x_rates.reset_index(drop=True, inplace=True)\n", "\n", "# convert hyphens in currency columns to NaN\n", "import numpy as np\n", "x_rates = x_rates.replace(\"-\", np.nan)\n", "\n", "# convert exchange rate values to float\n", "x_rates.iloc[:,1:] = x_rates.iloc[:,1:].astype(float)" ] }, { "cell_type": "code", "execution_count": null, "id": "53c4ed0a", "metadata": { "scrolled": false }, "outputs": [], "source": [ "# create a us dollar to euro dataframe and add 30 day rolling mean column\n", "us_dollar = x_rates[[\"date\",\"us_dollar\"]].copy()\n", "us_dollar[\"rolling_mean_30\"] = us_dollar[\"us_dollar\"].rolling(30).mean()\n", "\n", "# line graph for dollar to euro exchange rate\n", "import matplotlib.pyplot as plt\n", "import matplotlib.dates as mdates\n", "plt.style.use('ggplot') # set style for graphs\n" ] }, { "cell_type": "code", "execution_count": null, "id": "1153ca91", "metadata": { "scrolled": false }, "outputs": [], "source": [ "# create nan group and get dates\n", "nan_dollar_group = us_dollar[us_dollar[\"us_dollar\"].isnull()]\n", "nan_days = nan_dollar_group[\"date\"]\n", "\n", "# create a dollar to euro dataframe with NaN dates removed\n", "US_dollar_nonull = x_rates[x_rates[\"us_dollar\"].notna()].copy()\n", "US_dollar_nonull[\"rolling_mean_30\"] = US_dollar_nonull[\"us_dollar\"].rolling(30).mean()\n", "\n", "# line graph showing dollar to euro showing NaN values with and without highlight\n" ] }, { "cell_type": "code", "execution_count": null, "id": "27c42da0", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# import colormap and set up color iteration\n", "from matplotlib.pyplot import cm\n", "colors = iter([plt.cm.tab10(i) for i in range(8)])\n", "\n", "# create a list of data frames for each currency with log rate of the exchange rate, 30 day rolling mean, and year\n", "df_dict = {}\n", "for currency in x_rates.columns[1:]:\n", " df_name = currency\n", " df = x_rates[[\"date\", currency]].copy()\n", " df = df[df[currency].notna()]\n", " df[\"log_rate\"] = np.log(df.iloc[:,1]/df.iloc[:,1].shift()) # getting the log of the exchange rate # double check this is the correct way to get log\n", " df[\"rolling_mean_30\"] = df[currency].rolling(30).mean()\n", " df[\"year\"] = df[\"date\"].dt.year\n", " df_dict[currency] = df\n", "\n", "# currencies for comparison\n", "high_freq = [df_dict[\"us_dollar\"], df_dict[\"japanese_yen\"], df_dict[\"uk_pound\"], df_dict[\"australian_dollar\"]]\n", "low_freq = [df_dict[\"israeli_shekel\"], df_dict[\"philippine_peso\"], df_dict[\"malaysian_ringgit\"], df_dict[\"romanian_leu\"]]\n", "\n", "# line graph showing Euro to each high frequency trading pairs\n" ] }, { "cell_type": "code", "execution_count": null, "id": "3c79d5a6", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# line graph showing Euro to four high frequency trading pairs overlapped\n" ] }, { "cell_type": "code", "execution_count": null, "id": "f9c862e9", "metadata": { "scrolled": false }, "outputs": [], "source": [ "# function to create df with year and annual volatility for every row\n", "volatility_df_list = []\n", "def volatizer(currency):\n", " annual_df_list = [currency[currency['year'] == y] for y in currency['year'].unique()] # list of annual dfs\n", " c_name = currency.columns[1]\n", " row_dict = {} # dictionary with year:annual_volatility as key:value \n", " for frame in annual_df_list:\n", " year_name = frame.iat[0,4] # first cell of the \"year\" column, becomes the \"year\" key for row_dict\n", " annual_volatility = frame[\"log_rate\"].std()*253**.5 # volatility measured by standard deviation * 253 trading days per year raised to the 0.5 power\n", " row_dict[year_name] = annual_volatility # dictionary with year:annual_volatility as key:value \n", " df = pd.DataFrame.from_dict(row_dict, orient=\"index\", columns=[c_name]) # new df from dictionary indexing on year\n", " volatility_df_list.append(df)\n", "\n", "# apply volatizer to each currency df\n", "for key in df_dict:\n", " df_dict[key] = volatizer(df_dict[key])\n", " \n", "# create an annual volatility data frame with all 40 currencies\n", "annual_volatility = pd.concat(volatility_df_list, axis=1)\n", "annual_volatility[\"year\"] = annual_volatility.index\n", "# move year to position 0 and set index as int" ] }, { "cell_type": "code", "execution_count": null, "id": "09a24666", "metadata": { "scrolled": true }, "outputs": [], "source": [ "# bar charts comparing volatility for four high and low frequency pairs\n", "high_freq = [annual_volatility[\"us_dollar\"], annual_volatility[\"japanese_yen\"], annual_volatility[\"uk_pound\"], annual_volatility[\"australian_dollar\"]]\n", "low_freq = [annual_volatility[\"israeli_shekel\"], annual_volatility[\"philippine_peso\"], annual_volatility[\"malaysian_ringgit\"], annual_volatility[\"romanian_leu\"]]\n", "years = annual_volatility.iloc[:,40]\n", "colors = iter([plt.cm.tab10(i) for i in range(8)])\n", "\n", "# high_freq bar charts\n", "\n", "\n", "# low_freq bar charts\n", "\n", "# plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "ef0e4c27", "metadata": { "scrolled": false }, "outputs": [], "source": [ "# uncommenting this cell and running in a geopandas environemnt\n", "# will produce a series of downloaded map png files\n", "\n", "# this is a very, very kuldge way for me to get the info from\n", "# the annual volatility datasat to the map dataset\n", "av_transposed = annual_volatility.transpose()\n", "av_transposed = av_transposed.drop(\"year\")\n", "iso_list = [\"AU\",\"BG\",\"BR\",\"CA\",\"CH\",\"CN\",\"CY\",\"CZ\",\"DK\",\"EE\",\"GB\",\"GR\",\n", " \"CN\",\"HR\",\"HU\",\"ID\",\"IL\",\"IN\",\"IS\",\"JP\",\"KR\",\"LT\",\"LV\",\"MT\",\n", " \"MX\",\"MY\",\"NO\",\"NZ\",\"PH\",\"PL\",\"RO\",\"RU\",\"SE\",\"SG\",\"SI\",\"SK\",\n", " \"TH\",\"TR\",\"US\",\"ZA\"]\n", "av_transposed.insert(loc=0, column='iso', value=iso_list)\n", "av_transposed.reset_index(inplace=True)\n", "av_transposed = av_transposed.rename(columns = {'index':'currency'})\n", "\n", "# import geopandas and other libraries\n", "# consider re-writing using geopandas map\n", "# geo_map = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n", "import geopandas as gpd\n", "import seaborn as sns\n", "import os\n", "map_data = gpd.read_file(\"maps/World_Countries__Generalized_.shp\")\n", "map_data[\"Volatility\"] = None\n", "concat_map = map_data.merge(av_transposed, left_on=\"ISO\", right_on=\"iso\", how=\"left\")\n", "map_data = concat_map\n", "\n", "# make maps for the gif\n", "# replaced -> map_data.plot(column=column, figsize=(18,12), cmap=\"plasma\", missing_kwds=missing_kwds, legend=True, legend_kwds={'label': \"Annual Volatility\", 'orientation':\"horizontal\"})\n", "for column in concat_map.iloc[:,11:34]:\n", " map_name = \"map_\"+str(column)\n", " map_data.plot(column=column, figsize=(18,12), cmap=\"plasma\", legend=True,\n", " legend_kwds={'label': \"Annual Volatility\", 'orientation':\"horizontal\"})\n", " plt.title(\" \"*5 + str(column) + \" Euro to National Currency Exchange Rate\" + \" \"*5,\n", " color = \"#f0f0f0\", backgroundcolor = \"#4d4d4d\", fontsize=30, weight=\"bold\")\n", " plt.savefig(/vmaps/map_name)" ] }, { "cell_type": "markdown", "id": "b7706257", "metadata": {}, "source": [ "" ] } ], "metadata": { "hide_input": false, "kernelspec": { "display_name": "geo_env", "language": "python", "name": "geo_env" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" } }, "nbformat": 4, "nbformat_minor": 5 }