{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Download hourly EPA emissions\n", "This notebook downloads zipped CEMS data from the EPA ftp server, extracts the data, corrects column names, combines the data, and exports one data file per year.\n", "\n", "## Instructions\n", "This code is the least complete out of all sections. The parts that identify new ftp files for download (using `already_downloaded` for existing files and ftp server upload dates) may require re-writing.\n", "\n", "Because not all data needs to be re-downloaded for every update, check to make sure that the correct years are used in loops for downloading and processing data." ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:27:55.791334Z", "start_time": "2017-10-31T15:27:55.779204Z" }, "collapsed": true }, "outputs": [], "source": [ "import io, time, json\n", "import pandas as pd\n", "import urllib#, urllib2\n", "import re\n", "import os\n", "from os.path import join\n", "import numpy as np\n", "import ftplib\n", "from ftplib import FTP\n", "import timeit\n", "import sys\n", "from joblib import Parallel, delayed\n", "\n", "cwd = os.getcwd()\n", "data_path = join(cwd, '..', 'Data storage')" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:20:18.676162Z", "start_time": "2017-10-31T15:20:18.662949Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "json 2.0.9\n", "pandas 0.21.1\n", "re 2.2.1\n", "numpy 1.13.3\n", "CPython 3.6.3\n", "IPython 6.2.1\n" ] } ], "source": [ "%load_ext watermark\n", "%watermark -iv -v" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:19:47.830026Z", "start_time": "2017-10-31T15:19:47.789798Z" }, "collapsed": true }, "outputs": [], "source": [ "# Load the \"autoreload\" extension\n", "%load_ext autoreload\n", "\n", "# always reload modules marked with \"%aimport\"\n", "%autoreload 1\n", "\n", "# add the 'src' directory as one where we can import modules\n", "src_dir = join(os.getcwd(), os.pardir, 'src')\n", "sys.path.append(src_dir)" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:20:10.111890Z", "start_time": "2017-10-31T15:20:10.105877Z" }, "collapsed": true }, "outputs": [], "source": [ "%aimport Data.data_extraction\n", "from Data.data_extraction import import_clean_epa" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Final script\n", "This downloads hourly data from the ftp server over a range of years, and saves all of the file names/last update times in a list. The downloads can take some time depending on how much data is being retrieved.\n", "\n", "Some of the code below assumes that we only need to retrieve new or modified files. If you are retrieving this data for the first time, create an empty dataframe named `already_downloaded` with column names `file name` and `last updated`." ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T14:57:47.062319Z", "start_time": "2017-10-31T14:57:47.056647Z" }, "collapsed": true }, "outputs": [], "source": [ "# Replace the filename with whatever csv stores already downloaded file info\n", "# path = join('EPA downloads', 'name_time 2015-2016.csv')\n", "# already_downloaded = pd.read_csv(path, parse_dates=['last updated'])\n", "\n", "# Uncomment the line below to create an empty dataframe\n", "already_downloaded = pd.DataFrame(columns=['file name', 'last updated'])" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T14:57:48.862973Z", "start_time": "2017-10-31T14:57:48.848613Z" } }, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
file namelast updated
\n", "
" ], "text/plain": [ "Empty DataFrame\n", "Columns: [file name, last updated]\n", "Index: []" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "already_downloaded.head()" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:14:52.185403Z", "start_time": "2017-10-31T15:10:39.876339Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Opening ftp connection\n", "2017\n", "Change directory to 2017\n", "Fetch filenames\n", "Find filenames without _HLD and time last updated\n", "Store names and update times\n", "Downloading data\n", "[WinError 10054] An existing connection was forcibly closed by the remote host\n", "[WinError 10054] An existing connection was forcibly closed by the remote host\n", "[WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond\n", "[WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond\n", "Download finished\n", "17.42 min so far\n" ] } ], "source": [ "# Years of data to download\n", "years = [2017]\n", "\n", "# Make the EPA download directory if it doesn't exist\n", "try:\n", " os.mkdir(join(data_path, 'EPA downloads'))\n", "except:\n", " pass\n", "\n", "# Timestamp\n", "start_time = timeit.default_timer()\n", "name_time_list = []\n", "# Open ftp connection and navigate to the correct folder\n", "print('Opening ftp connection')\n", "ftp = FTP('newftp.epa.gov')\n", "ftp.login()\n", "ftp.cwd('/dmdnload/emissions/hourly/monthly')\n", "\n", "for year in years: \n", " print(year)\n", "\n", " year_str = str(year)\n", " print('Change directory to {}'.format(year_str))\n", " try:\n", " ftp.cwd(year_str)\n", " except ftplib.all_errors as e:\n", " print(e)\n", " break\n", " \n", " # Use ftplib to get the list of filenames\n", " print('Fetch filenames')\n", " fnames = ftp.nlst()\n", " \n", " # Create new directory path if it doesn't exist\n", " new_path = join(data_path, 'EPA downloads', year_str)\n", " try:\n", " os.mkdir(new_path)\n", " print('New local folder created for {}'.format(year_str))\n", " except:\n", " pass\n", " \n", " \n", " # Look for files without _HLD in the name\n", " name_list = []\n", " time_list = []\n", " print('Find filenames without _HLD and time last updated')\n", " for name in fnames:\n", " if '_HLD' not in name:\n", " try:\n", " # The ftp command \"MDTM\" asks what time a file was last modified\n", " # It returns a code and the date/time\n", " # If the file name isn't already downloaded, or the time isn't the same\n", " tm = ftp.sendcmd('MDTM '+ name).split()[-1]\n", " dt = pd.to_datetime(tm[:8], format='%Y%m%d')\n", " if name not in already_downloaded['file name'].values:\n", " \n", " time_list.append(dt)\n", " name_list.append(name)\n", " elif (already_downloaded\n", " .loc[already_downloaded['file name']==name,\n", " 'last updated'].values[0] != tm):\n", " tm = ftp.sendcmd('MDTM '+ name).split()[-1]\n", " dt = pd.to_datetime(tm[:8], format='%Y%m%d')\n", "\n", " time_list.append(dt)\n", " name_list.append(name)\n", " except ftplib.all_errors as e:\n", " print(e)\n", " # If ftp.sendcmd didn't work, assume the connection was lost\n", " ftp = FTP('newftp.epa.gov')\n", " ftp.login()\n", " ftp.cwd('/dmdnload/emissions/hourly/monthly')\n", " ftp.cwd(year_str)\n", "# tm = ftp.sendcmd('MDTM '+ name)\n", "# time_list.append(pd.to_datetime(tm.split()[-1]))\n", " name_list.append(name)\n", " \n", " \n", " # Store all filenames and update times\n", " print('Store names and update times')\n", " name_time_list.extend(zip(name_list, time_list))\n", " \n", " # Download and store data\n", " print('Downloading data')\n", " for name in name_list:\n", " try:\n", " with open(join(data_path, 'EPA downloads', year_str, name),\n", " 'wb') as f:\n", " ftp.retrbinary('RETR %s' % name, f.write)\n", " except ftplib.all_errors as e:\n", " print(e)\n", " try:\n", " ftp.quit()\n", " except ftplib.all_errors as e:\n", " print(e)\n", " pass\n", " ftp = FTP('newftp.epa.gov')\n", " ftp.login()\n", " ftp.cwd('/dmdnload/emissions/hourly/monthly')\n", " ftp.cwd(year_str)\n", " with open(join(data_path, 'EPA downloads', year_str, name),\n", " 'wb') as f:\n", " ftp.retrbinary('RETR %s' % name, f.write)\n", "\n", " print('Download finished')\n", " print(round((timeit.default_timer() - start_time)/60.0,2), 'min so far')\n", " \n", " # Go back up a level on the ftp server\n", " ftp.cwd('..')\n", " \n", "# Timestamp\n", "elapsed = round((timeit.default_timer() - start_time)/60.0,2)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "ExecuteTime": { "end_time": "2017-03-15T12:20:05.033000", "start_time": "2017-03-15T12:20:05.029000" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Data download completed in 42.2 mins\n" ] } ], "source": [ "print 'Data download completed in %s mins' %(elapsed)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Export file names and update timestamp\n", "This is just a sample file with 2017 filenames" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:17:09.718257Z", "start_time": "2017-10-31T15:17:09.712279Z" }, "collapsed": true }, "outputs": [], "source": [ "name_time_df = pd.DataFrame(name_time_list, columns=['file name', 'last updated'])" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:17:10.850814Z", "start_time": "2017-10-31T15:17:10.839552Z" } }, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
file namelast updated
\n", "
" ], "text/plain": [ "Empty DataFrame\n", "Columns: [file name, last updated]\n", "Index: []" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "name_time_df.head()" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "ExecuteTime": { "end_time": "2017-10-31T15:17:37.669061Z", "start_time": "2017-10-31T15:17:37.659756Z" }, "collapsed": true }, "outputs": [], "source": [ "path = join(data_path, 'EPA downloads', 'name_time example.csv')\n", "name_time_df.to_csv(path, index=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Check number of columns and column names\n", "The results below are from 2001-2016 data. They show how column names have changed over the years." ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import csv\n", "import zipfile\n", "from io import BytesIO\n", "from collections import Counter" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This takes ~100 ms per file. " ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "collapsed": true }, "outputs": [], "source": [ "base_path = join(data_path, 'EPA downloads')\n", "num_cols = {}\n", "col_names = {}\n", "for year in range(2001, 2018):\n", "\n", " n_cols_list = []\n", " col_name_list = []\n", " path = join(base_path, str(year))\n", " fnames = os.listdir(path)\n", " \n", " for name in fnames:\n", " fullpath = os.path.join(path, name)\n", " columns = pd.read_csv(fullpath, nrows=5).columns\n", " \n", " # Add the column names to the large list\n", " col_name_list.extend(columns)\n", " # Add the number of columns to the list\n", " n_cols_list.append(len(columns))\n", " \n", " col_names[year] = Counter(col_name_list)\n", " num_cols[year] = Counter(n_cols_list)\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "From the table below, recent years always have the units after an emission name. Before 2009 some files have the units and some don't. UNITID is consistent through all years, but UNIT_ID was added in after 2008 (not the same thing)." ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "scrolled": false }, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
20012002200320042005200620072008200920102011201220132014201520162017
CO2_MASS8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
CO2_MASS (tons)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
CO2_MASS_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
CO2_RATE8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
CO2_RATE (tons/mmBtu)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
CO2_RATE_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
FACILITY_NAME196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
FAC_IDNaNNaNNaNNaNNaNNaNNaN98588.0588.0588.0588.0588.0588.0591.0588.0588.0
GLOAD8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
GLOAD (MW)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
HEAT_INPUT8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
HEAT_INPUT (mmBtu)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
NOX_MASS8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
NOX_MASS (lbs)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
NOX_MASS_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
NOX_RATE8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
NOX_RATE (lbs/mmBtu)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
NOX_RATE_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
OP_DATE196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
OP_HOUR196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
OP_TIME196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
ORISPL_CODE196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
SLOAD8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
SLOAD (1000 lbs)164.0159.0134.0126.060.01.010.054363.0NaNNaNNaNNaNNaNNaNNaNNaN
SLOAD (1000lb/hr)24.029.030.032.030.032.043.053225.0588.0588.0588.0588.0588.0591.0588.0588.0
SO2_MASS8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
SO2_MASS (lbs)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
SO2_MASS_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
SO2_RATE8.08.032.038.0106.0163.0143.089NaNNaNNaNNaNNaNNaNNaNNaNNaN
SO2_RATE (lbs/mmBtu)188.0188.0164.0158.090.033.053.0107588.0588.0588.0588.0588.0588.0591.0588.0588.0
SO2_RATE_MEASURE_FLG196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
STATE196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
UNITID196.0196.0196.0196.0196.0196.0196.0196588.0588.0588.0588.0588.0588.0591.0588.0588.0
UNIT_IDNaNNaNNaNNaNNaNNaNNaN98588.0588.0588.0588.0588.0588.0591.0588.0588.0
\n", "
" ], "text/plain": [ " 2001 2002 2003 2004 2005 2006 2007 2008 \\\n", "CO2_MASS 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "CO2_MASS (tons) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "CO2_MASS_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "CO2_RATE 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "CO2_RATE (tons/mmBtu) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "CO2_RATE_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "FACILITY_NAME 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "FAC_ID NaN NaN NaN NaN NaN NaN NaN 98 \n", "GLOAD 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "GLOAD (MW) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "HEAT_INPUT 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "HEAT_INPUT (mmBtu) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "NOX_MASS 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "NOX_MASS (lbs) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "NOX_MASS_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "NOX_RATE 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "NOX_RATE (lbs/mmBtu) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "NOX_RATE_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "OP_DATE 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "OP_HOUR 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "OP_TIME 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "ORISPL_CODE 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "SLOAD 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "SLOAD (1000 lbs) 164.0 159.0 134.0 126.0 60.0 1.0 10.0 54 \n", "SLOAD (1000lb/hr) 24.0 29.0 30.0 32.0 30.0 32.0 43.0 53 \n", "SO2_MASS 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "SO2_MASS (lbs) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "SO2_MASS_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "SO2_RATE 8.0 8.0 32.0 38.0 106.0 163.0 143.0 89 \n", "SO2_RATE (lbs/mmBtu) 188.0 188.0 164.0 158.0 90.0 33.0 53.0 107 \n", "SO2_RATE_MEASURE_FLG 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "STATE 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "UNITID 196.0 196.0 196.0 196.0 196.0 196.0 196.0 196 \n", "UNIT_ID NaN NaN NaN NaN NaN NaN NaN 98 \n", "\n", " 2009 2010 2011 2012 2013 2014 2015 2016 \\\n", "CO2_MASS NaN NaN NaN NaN NaN NaN NaN NaN \n", "CO2_MASS (tons) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "CO2_MASS_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "CO2_RATE NaN NaN NaN NaN NaN NaN NaN NaN \n", "CO2_RATE (tons/mmBtu) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "CO2_RATE_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "FACILITY_NAME 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "FAC_ID 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "GLOAD NaN NaN NaN NaN NaN NaN NaN NaN \n", "GLOAD (MW) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "HEAT_INPUT NaN NaN NaN NaN NaN NaN NaN NaN \n", "HEAT_INPUT (mmBtu) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "NOX_MASS NaN NaN NaN NaN NaN NaN NaN NaN \n", "NOX_MASS (lbs) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "NOX_MASS_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "NOX_RATE NaN NaN NaN NaN NaN NaN NaN NaN \n", "NOX_RATE (lbs/mmBtu) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "NOX_RATE_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "OP_DATE 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "OP_HOUR 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "OP_TIME 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "ORISPL_CODE 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "SLOAD NaN NaN NaN NaN NaN NaN NaN NaN \n", "SLOAD (1000 lbs) 363.0 NaN NaN NaN NaN NaN NaN NaN \n", "SLOAD (1000lb/hr) 225.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "SO2_MASS NaN NaN NaN NaN NaN NaN NaN NaN \n", "SO2_MASS (lbs) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "SO2_MASS_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "SO2_RATE NaN NaN NaN NaN NaN NaN NaN NaN \n", "SO2_RATE (lbs/mmBtu) 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "SO2_RATE_MEASURE_FLG 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "STATE 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "UNITID 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "UNIT_ID 588.0 588.0 588.0 588.0 588.0 588.0 591.0 588.0 \n", "\n", " 2017 \n", "CO2_MASS NaN \n", "CO2_MASS (tons) 588.0 \n", "CO2_MASS_MEASURE_FLG 588.0 \n", "CO2_RATE NaN \n", "CO2_RATE (tons/mmBtu) 588.0 \n", "CO2_RATE_MEASURE_FLG 588.0 \n", "FACILITY_NAME 588.0 \n", "FAC_ID 588.0 \n", "GLOAD NaN \n", "GLOAD (MW) 588.0 \n", "HEAT_INPUT NaN \n", "HEAT_INPUT (mmBtu) 588.0 \n", "NOX_MASS NaN \n", "NOX_MASS (lbs) 588.0 \n", "NOX_MASS_MEASURE_FLG 588.0 \n", "NOX_RATE NaN \n", "NOX_RATE (lbs/mmBtu) 588.0 \n", "NOX_RATE_MEASURE_FLG 588.0 \n", "OP_DATE 588.0 \n", "OP_HOUR 588.0 \n", "OP_TIME 588.0 \n", "ORISPL_CODE 588.0 \n", "SLOAD NaN \n", "SLOAD (1000 lbs) NaN \n", "SLOAD (1000lb/hr) 588.0 \n", "SO2_MASS NaN \n", "SO2_MASS (lbs) 588.0 \n", "SO2_MASS_MEASURE_FLG 588.0 \n", "SO2_RATE NaN \n", "SO2_RATE (lbs/mmBtu) 588.0 \n", "SO2_RATE_MEASURE_FLG 588.0 \n", "STATE 588.0 \n", "UNITID 588.0 \n", "UNIT_ID 588.0 " ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pd.DataFrame(col_names)" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Index([u'CO2_MASS', u'CO2_MASS (tons)', u'CO2_MASS_MEASURE_FLG', u'CO2_RATE',\n", " u'CO2_RATE (tons/mmBtu)', u'CO2_RATE_MEASURE_FLG', u'FACILITY_NAME',\n", " u'FAC_ID', u'GLOAD', u'GLOAD (MW)', u'HEAT_INPUT',\n", " u'HEAT_INPUT (mmBtu)', u'NOX_MASS', u'NOX_MASS (lbs)',\n", " u'NOX_MASS_MEASURE_FLG', u'NOX_RATE', u'NOX_RATE (lbs/mmBtu)',\n", " u'NOX_RATE_MEASURE_FLG', u'OP_DATE', u'OP_HOUR', u'OP_TIME',\n", " u'ORISPL_CODE', u'SLOAD', u'SLOAD (1000 lbs)', u'SLOAD (1000lb/hr)',\n", " u'SO2_MASS', u'SO2_MASS (lbs)', u'SO2_MASS_MEASURE_FLG', u'SO2_RATE',\n", " u'SO2_RATE (lbs/mmBtu)', u'SO2_RATE_MEASURE_FLG', u'STATE', u'UNITID',\n", " u'UNIT_ID'],\n", " dtype='object')" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pd.DataFrame(col_names).index" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
2001200220032004200520062007200820092010201120122013201420152016
22196.0196.0196.0196.0196.0196.0196.098NaNNaNNaNNaNNaNNaNNaNNaN
24NaNNaNNaNNaNNaNNaNNaN98588.0588.0588.0588.0588.0588.0591.0441.0
\n", "
" ], "text/plain": [ " 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 \\\n", "22 196.0 196.0 196.0 196.0 196.0 196.0 196.0 98 NaN NaN \n", "24 NaN NaN NaN NaN NaN NaN NaN 98 588.0 588.0 \n", "\n", " 2011 2012 2013 2014 2015 2016 \n", "22 NaN NaN NaN NaN NaN NaN \n", "24 588.0 588.0 588.0 588.0 591.0 441.0 " ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pd.DataFrame(num_cols)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Correct column names and export all files (1 file per year)\n", "Also convert the `OP_DATE` column to a datetime object" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Using [joblib](https://pythonhosted.org/joblib/parallel.html) for this.\n", "\n", "Joblib on windows requires the if __name__ == '__main__': statement. And in a Jupyter notebook the function needs to be imported from an external script. I probably should have done the parallel part at a higher level - the longest part is saving the csv files. Could use [this method](http://www.lucainvernizzi.net/blog/2015/08/03/8x-speed-up-for-python-s-csv-dictwriter/) - disable a check - to speed up the process.\n", "\n", "Joblib has to be at least version 10.0, which is only available through pip - got some errors when using the version installed by conda." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Create a dictionary mapping column names. Any values on the left (keys) should be replaced by values on the right (values). " ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "ExecuteTime": { "end_time": "2017-03-15T13:10:57.876000", "start_time": "2017-03-15T13:10:57.868000" }, "collapsed": true }, "outputs": [], "source": [ "col_name_map = {'CO2_MASS' : 'CO2_MASS (tons)',\n", " 'CO2_RATE' : 'CO2_RATE (tons/mmBtu)',\n", " 'GLOAD' : 'GLOAD (MW)',\n", " 'HEAT_INPUT' : 'HEAT_INPUT (mmBtu)',\n", " 'NOX_MASS' : 'NOX_MASS (lbs)',\n", " 'NOX_RATE' : 'NOX_RATE (lbs/mmBtu)',\n", " 'SLOAD' : 'SLOAD (1000lb/hr)',\n", " 'SLOAD (1000 lbs)' : 'SLOAD (1000lb/hr)',\n", " 'SO2_MASS' : 'SO2_MASS (lbs)',\n", " 'SO2_RATE' : 'SO2_RATE (lbs/mmBtu)'\n", " }" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Be sure to change the start and end years" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "ExecuteTime": { "end_time": "2017-03-15T13:34:53.126000", "start_time": "2017-03-15T13:12:03.130000" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Starting 2016\n", "Combining data\n", "Saving file\n", "16.46 min so far\n", "Starting 2017\n", "Combining data\n", "Saving file\n", "31.72 min so far\n" ] } ], "source": [ "years = [2016, 2017]\n", "\n", "if __name__ == '__main__':\n", " start_time = timeit.default_timer()\n", " base_path = join(data_path, 'EPA downloads')\n", " for year in years:\n", " print('Starting', str(year))\n", " df_list = []\n", " path = join(base_path, str(year))\n", " fnames = os.listdir(path)\n", "\n", " df_list = Parallel(n_jobs=-1)(delayed(import_clean_epa)\n", " (path, name, col_name_map) \n", " for name in fnames)\n", "\n", " print('Combining data')\n", " df = pd.concat(df_list)\n", " print('Saving file')\n", " path_out = join(data_path, 'EPA emissions',\n", " 'EPA emissions {}.csv'.format(str(year)))\n", " df.to_csv(path_out, index=False)\n", " \n", " # File IO is much faster with the feather file format\n", "# df.to_feather(path_out)\n", "\n", " print(round((timeit.default_timer() - start_time)/60.0,2), 'min so far')\n", " \n", " # Timestamp\n", " elapsed = round((timeit.default_timer() - start_time)/60.0,2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "psci", "language": "python", "name": "psci" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.4" }, "varInspector": { "cols": { "lenName": 16, "lenType": 16, "lenVar": 40 }, "kernels_config": { "python": { "delete_cmd_postfix": "", "delete_cmd_prefix": "del ", "library": "var_list.py", "varRefreshCmd": "print(var_dic_list())" }, "r": { "delete_cmd_postfix": ") ", "delete_cmd_prefix": "rm(", "library": "var_list.r", "varRefreshCmd": "cat(var_dic_list()) " } }, "types_to_exclude": [ "module", "function", "builtin_function_or_method", "instance", "_Feature" ], "window_display": false } }, "nbformat": 4, "nbformat_minor": 2 }