#!/usr/bin/env python3 # -*- coding: utf-8 -*- # hatlc.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Jan 2016 # License: MIT - see LICENSE for the full text. '''This contains functions to read HAT sqlite ("sqlitecurves") and CSV light curves generated by the new HAT data server. The most useful functions in this module are:: read_csvlc(lcfile): This reads a CSV light curve produced by the HAT data server into an lcdict. lcfile is the HAT gzipped CSV LC (with a .hatlc.csv.gz extension) And:: read_and_filter_sqlitecurve(lcfile, columns=None, sqlfilters=None, raiseonfail=False, forcerecompress=False): This reads a sqlitecurve file and optionally filters it, returns an lcdict. Returns columns requested in columns. If None, then returns all columns present in the latest columnlist in the lightcurve. See COLUMNDEFS for the full list of HAT LC columns. If sqlfilters is not None, it must be a list of text SQL filters that apply to the columns in the lightcurve. This returns an lcdict with an added 'lcfiltersql' key that indicates what the parsed SQL filter string was. If forcerecompress = True, will recompress the un-gzipped sqlitecurve even if the gzipped form exists on disk already. Finally:: describe(lcdict): This describes the metadata of the light curve. Command line usage ------------------ You can call this module directly from the command line: If you just have this file alone:: $ chmod +x hatlc.py $ ./hatlc.py --help If astrobase is installed with pip, etc., this will be on your path already:: $ hatlc --help These should give you the following:: usage: hatlc.py [-h] [--describe] hatlcfile read a HAT LC of any format and output to stdout positional arguments: hatlcfile path to the light curve you want to read and pipe to stdout optional arguments: -h, --help show this help message and exit --describe don't dump the columns, show only object info and LC metadata Either one will dump any HAT LC recognized to stdout (or just dump the description if requested). Other useful functions ---------------------- Two other functions that might be useful:: normalize_lcdict(lcdict, timecol='rjd', magcols='all', mingap=4.0, normto='sdssr', debugmode=False): This normalizes magnitude columns (specified in the magcols keyword argument) in an lcdict obtained from reading a HAT light curve. This normalization is done by finding 'timegroups' in each magnitude column, assuming that these belong to different 'eras' separated by a specified gap in the mingap keyword argument, and thus may be offset vertically from one another. Measurements within a timegroup are normalized to zero using the meidan magnitude of the timegroup. Once all timegroups have been processed this way, the whole time series is then re-normalized to the specified value in the normto keyword argument. And:: normalize_lcdict_byinst(lcdict, magcols='all', normto='sdssr', normkeylist=('stf','ccd','flt','fld','prj','exp'), debugmode=False) This normalizes magnitude columns (specified in the magcols keyword argument) in an lcdict obtained from reading a HAT light curve. This normalization is done by generating a normalization key using columns in the lcdict that specify various instrument properties. The default normalization key (specified in the normkeylist kwarg) is a combination of: - HAT station IDs ('stf') - camera position ID ('ccd'; useful for HATSouth observations) - camera filters ('flt') - observed HAT field names ('fld') - HAT project IDs ('prj') - camera exposure times ('exp') with the assumption that measurements with identical normalization keys belong to a single 'era'. Measurements within an era are normalized to zero using the median magnitude of the era. Once all eras have been processed this way, the whole time series is then re-normalized to the specified value in the normto keyword argument. There's an IPython notebook describing the use of this module and accompanying modules from the astrobase package at: https://github.com/waqasbhatti/astrobase-notebooks/blob/master/lightcurve-work.ipynb ''' # put this in here because hatlc can be used as a standalone module __version__ = '0.5.3' ############# ## LOGGING ## ############# # the basic logging styles common to all astrobase modules log_sub = '{' log_fmt = '[{levelname:1.1} {asctime} {module}:{lineno}] {message}' log_date_fmt = '%y%m%d %H:%M:%S' import logging DEBUG = False if DEBUG: level = logging.DEBUG else: level = logging.INFO LOGGER = logging.getLogger(__name__) logging.basicConfig( level=level, style=log_sub, format=log_fmt, datefmt=log_date_fmt, ) LOGDEBUG = LOGGER.debug LOGINFO = LOGGER.info LOGWARNING = LOGGER.warning LOGERROR = LOGGER.error LOGEXCEPTION = LOGGER.exception #################### ## SYSTEM IMPORTS ## #################### import os.path import os import gzip import shutil import re import sqlite3 as sql import json from pprint import pformat import sys import textwrap import numpy as np from numpy import nan ################# ## DEFINITIONS ## ################# # LC column definitions # the first elem is the column description, the second is the format to use when # writing a CSV LC column, the third is the type to use when parsing a CSV LC # column COLUMNDEFS = { # TIME 'rjd':('time of observation in Reduced Julian date (JD = 2400000.0 + RJD)', '%.7f', float), 'bjd':(('time of observation in Baryocentric Julian date ' '(note: this is BJD_TDB)'), '%.7f', float), # FRAME METADATA 'net':('network of telescopes observing this target', '%s', str), 'stf':('station ID of the telescope observing this target', '%i', int), 'cfn':('camera frame serial number', '%i', int), 'cfs':('camera subframe id', '%s', str), 'ccd':('camera CCD position number', '%i', int), 'prj':('project ID of this observation', '%s', str), 'fld':('observed field name', '%s', str), 'frt':('image frame type (flat, object, etc.)', '%s', str), # FILTER CONFIG 'flt':('filter ID from the filters table', '%i', int), 'flv':('filter version used', '%i', int), # CAMERA CONFIG 'cid':('camera ID ', '%i', int), 'cvn':('camera version', '%i', int), 'cbv':('camera bias-frame version', '%i', int), 'cdv':('camera dark-frame version', '%i', int), 'cfv':('camera flat-frame version', '%i', int), 'exp':('exposure time for this observation in seconds', '%.3f', float), # TELESCOPE CONFIG 'tid':('telescope ID', '%i', int), 'tvn':('telescope version', '%i', int), 'tfs':('telescope focus setting', '%i', int), 'ttt':('telescope tube temperature [deg]', '%.3f', float), 'tms':('telescope mount state (tracking, drizzling, etc.)', '%s', str), 'tmi':('telescope mount ID', '%i', int), 'tmv':('telescope mount version', '%i', int), 'tgs':('telescope guider status (MGen)', '%s', str), # ENVIRONMENT 'mph':('moon phase at this observation', '%.2f', float), 'iha':('hour angle of object at this observation', '%.3f', float), 'izd':('zenith distance of object at this observation', '%.3f', float), # APERTURE PHOTOMETRY METADATA 'xcc':('x coordinate on CCD chip', '%.3f', float), 'ycc':('y coordinate on CCD chip', '%.3f', float), 'bgv':('sky background measurement around object in ADU', '%.3f', float), 'bge':('error in sky background measurement in ADU', '%.3f', float), 'fsv':('source extraction S parameter [1/(the PSF spatial RMS)^2]', '%.5f', float), 'fdv':('source extraction D parameter (the PSF ellipticity in xy)', '%.5f', float), 'fkv':('source extraction K parameter (the PSF diagonal ellipticity)', '%.5f', float), # APERTURE PHOTOMETRY COLUMNS (NOTE: these are per aperture) 'aim':('aperture photometry raw instrumental magnitude in aperture %s', '%.5f', float), 'aie':('aperture photometry raw instrumental mag error in aperture %s', '%.5f', float), 'aiq':('aperture photometry raw instrumental mag quality flag, aperture %s', '%s', str), 'arm':('aperture photometry fit magnitude in aperture %s', '%.5f', float), 'aep':('aperture photometry EPD magnitude in aperture %s', '%.5f', float), 'atf':('aperture photometry TFA magnitude in aperture %s', '%.5f', float), # PSF FIT PHOTOMETRY METADATA 'psv':('PSF fit S parameter (the PSF spatial RMS)', '%.5f', float), 'pdv':('PSF fit D parameter (the PSF spatial ellipticity in xy)', '%.5f', float), 'pkv':('PSF fit K parameter (the PSF spatial diagonal ellipticity)', '%.5f', float), 'ppx':('PSF fit number of pixels used for fit', '%i', int), 'psn':('PSF fit signal-to-noise ratio', '%.3f', float), 'pch':('PSF fit chi-squared value', '%.5f', float), # PSF FIT PHOTOMETRY COLUMNS 'psim':('PSF fit instrumental raw magnitude', '%.5f', float), 'psie':('PSF fit instrumental raw magnitude error', '%.5f', float), 'psiq':('PSF fit instrumental raw magnitude quality flag', '%s', str), 'psrm':('PSF fit final magnitude after mag-fit', '%.5f', float), 'psrr':('PSF fit residual', '%.5f', float), 'psrn':('PSF fit number of sources used', '%i', int), 'psep':('PSF fit EPD magnitude', '%.5f', float), 'pstf':('PSF fit TFA magnitude', '%.5f', float), # IMAGE SUBTRACTION PHOTOMETRY METADATA 'xic':('x coordinate on CCD chip after image-subtraction frame warp', '%.3f', float), 'yic':('y coordinate on CCD chip after image-subtraction frame warp', '%.3f', float), # IMAGE SUBTRACTION PHOTOMETRY COLUMNS 'irm':('image subtraction fit magnitude in aperture %s', '%.5f', float), 'ire':('image subtraction fit magnitude error in aperture %s', '%.5f', float), 'irq':('image subtraction fit magnitude quality flag for aperture %s', '%s', str), 'iep':('image subtraction EPD magnitude in aperture %s', '%.5f', float), 'itf':('image subtraction TFA magnitude in aperture %s', '%.5f', float), } LC_MAG_COLUMNS = ('aim','arm','aep','atf', 'psim','psrm','psep','pstf', 'irm','iep','itf') LC_ERR_COLUMNS = ('aie','psie','ire') LC_FLAG_COLUMNS = ('aiq','psiq','irq') # used to validate the filter string # http://www.sqlite.org/lang_keywords.html SQLITE_ALLOWED_WORDS = ['and','between','in', 'is','isnull','like','not', 'notnull','null','or', '=','<','>','<=','>=','!=','%'] ####################### ## UTILITY FUNCTIONS ## ####################### # this is from Tornado's source (MIT License): # http://www.tornadoweb.org/en/stable/_modules/tornado/escape.html#squeeze def _squeeze(value): """Replace all sequences of whitespace chars with a single space.""" return re.sub(r"[\x00-\x20]+", " ", value).strip() ######################################## ## SQLITECURVE COMPRESSIION FUNCTIONS ## ######################################## def _pycompress_sqlitecurve(sqlitecurve, force=False): '''This just compresses the sqlitecurve. Should be independent of OS. ''' outfile = '%s.gz' % sqlitecurve try: if os.path.exists(outfile) and not force: os.remove(sqlitecurve) return outfile else: with open(sqlitecurve,'rb') as infd: with gzip.open(outfile,'wb') as outfd: shutil.copyfileobj(infd, outfd) if os.path.exists(outfile): os.remove(sqlitecurve) return outfile except Exception: return None def _pyuncompress_sqlitecurve(sqlitecurve, force=False): '''This just uncompresses the sqlitecurve. Should be independent of OS. ''' outfile = sqlitecurve.replace('.gz','') try: if os.path.exists(outfile) and not force: return outfile else: with gzip.open(sqlitecurve,'rb') as infd: with open(outfile,'wb') as outfd: shutil.copyfileobj(infd, outfd) # do not remove the intput file yet if os.path.exists(outfile): return outfile except Exception: return None _compress_sqlitecurve = _pycompress_sqlitecurve _uncompress_sqlitecurve = _pyuncompress_sqlitecurve GZIPTEST = None ################################### ## READING SQLITECURVE FUNCTIONS ## ################################### def _validate_sqlitecurve_filters(filterstring, lccolumns): '''This validates the sqlitecurve filter string. This MUST be valid SQL but not contain any commands. ''' # first, lowercase, then _squeeze to single spaces stringelems = _squeeze(filterstring).lower() # replace shady characters stringelems = filterstring.replace('(','') stringelems = stringelems.replace(')','') stringelems = stringelems.replace(',','') stringelems = stringelems.replace("'",'"') stringelems = stringelems.replace('\n',' ') stringelems = stringelems.replace('\t',' ') stringelems = _squeeze(stringelems) # split into words stringelems = stringelems.split(' ') stringelems = [x.strip() for x in stringelems] # get rid of all numbers stringwords = [] for x in stringelems: try: float(x) except ValueError: stringwords.append(x) # get rid of everything within quotes stringwords2 = [] for x in stringwords: if not(x.startswith('"') and x.endswith('"')): stringwords2.append(x) stringwords2 = [x for x in stringwords2 if len(x) > 0] # check the filterstring words against the allowed words wordset = set(stringwords2) # generate the allowed word set for these LC columns allowedwords = SQLITE_ALLOWED_WORDS + lccolumns checkset = set(allowedwords) validatecheck = list(wordset - checkset) # if there are words left over, then this filter string is suspicious if len(validatecheck) > 0: # check if validatecheck contains an elem with % in it LOGWARNING("provided SQL filter string '%s' " "contains non-allowed keywords" % filterstring) return None else: return filterstring def read_and_filter_sqlitecurve(lcfile, columns=None, sqlfilters=None, raiseonfail=False, returnarrays=True, forcerecompress=False, quiet=True): '''This reads a HAT sqlitecurve and optionally filters it. Parameters ---------- lcfile : str The path to the HAT sqlitecurve file. columns : list A list of columns to extract from the ligh curve file. If None, then returns all columns present in the latest `columnlist` in the light curve. sqlfilters : list of str If no None, it must be a list of text SQL filters that apply to the columns in the lightcurve. raiseonfail : bool If this is True, an Exception when reading the LC will crash the function instead of failing silently and returning None as the result. returnarrays : bool If this is True, the output lcdict contains columns as np.arrays instead of lists. You generally want this to be True. forcerecompress : bool If True, the sqlitecurve will be recompressed even if a compressed version of it is found. This usually happens when sqlitecurve opening is interrupted by the OS for some reason, leaving behind a gzipped and un-gzipped copy. By default, this function refuses to overwrite the existing gzipped version so if the un-gzipped version is corrupt but that one isn't, it can be safely recovered. quiet : bool If True, will not warn about any problems, even if the light curve reading fails (the only clue then will be the return value of None). Useful for batch processing of many many light curves. Returns ------- tuple : (lcdict, status_message) A two-element tuple is returned, with the first element being the lcdict. ''' # we're proceeding with reading the LC... try: # if this file is a gzipped sqlite3 db, then gunzip it if '.gz' in lcfile[-4:]: lcf = _uncompress_sqlitecurve(lcfile) else: lcf = lcfile db = sql.connect(lcf) cur = db.cursor() # get the objectinfo from the sqlitecurve query = ("select * from objectinfo") cur.execute(query) objectinfo = cur.fetchone() # get the lcinfo from the sqlitecurve query = ("select * from lcinfo " "order by version desc limit 1") cur.execute(query) lcinfo = cur.fetchone() (lcversion, lcdatarelease, lccols, lcsortcol, lcapertures, lcbestaperture, objinfocols, objidcol, lcunixtime, lcgitrev, lccomment) = lcinfo # load the JSON dicts lcapertures = json.loads(lcapertures) lcbestaperture = json.loads(lcbestaperture) objectinfokeys = objinfocols.split(',') objectinfodict = {x:y for (x,y) in zip(objectinfokeys, objectinfo)} objectid = objectinfodict[objidcol] # need to generate the objectinfo dict and the objectid from the lcinfo # columns # get the filters from the sqlitecurve query = ("select * from filters") cur.execute(query) filterinfo = cur.fetchall() # validate the requested columns if columns and all(x in lccols.split(',') for x in columns): LOGINFO('retrieving columns %s' % columns) proceed = True elif columns is None: columns = lccols.split(',') proceed = True else: proceed = False # bail out if there's a problem and tell the user what happened if not proceed: # recompress the lightcurve at the end if '.gz' in lcfile[-4:] and lcf: _compress_sqlitecurve(lcf, force=forcerecompress) LOGERROR('requested columns are invalid!') return None, "requested columns are invalid" # create the lcdict with the object, lc, and filter info lcdict = {'objectid':objectid, 'objectinfo':objectinfodict, 'objectinfokeys':objectinfokeys, 'lcversion':lcversion, 'datarelease':lcdatarelease, 'columns':columns, 'lcsortcol':lcsortcol, 'lcapertures':lcapertures, 'lcbestaperture':lcbestaperture, 'lastupdated':lcunixtime, 'lcserver':lcgitrev, 'comment':lccomment, 'filters':filterinfo} # validate the SQL filters for this LC if ((sqlfilters is not None) and isinstance(sqlfilters,str)): # give the validator the sqlfilters string and a list of lccols in # the lightcurve validatedfilters = _validate_sqlitecurve_filters(sqlfilters, lccols.split(',')) if validatedfilters is not None: LOGINFO('filtering LC using: %s' % validatedfilters) filtersok = True else: filtersok = False else: validatedfilters = None filtersok = None # now read all the required columns in the order indicated # we use the validated SQL filter string here if validatedfilters is not None: query = ( "select {columns} from lightcurve where {sqlfilter} " "order by {sortcol} asc" ).format( columns=','.join(columns), # columns is always a list sqlfilter=validatedfilters, sortcol=lcsortcol ) lcdict['lcfiltersql'] = validatedfilters else: query = ("select %s from lightcurve order by %s asc") % ( ','.join(columns), lcsortcol ) cur.execute(query) lightcurve = cur.fetchall() if lightcurve and len(lightcurve) > 0: lightcurve = list(zip(*lightcurve)) lcdict.update({x:y for (x,y) in zip(lcdict['columns'], lightcurve)}) lcok = True # update the ndet after filtering lcdict['objectinfo']['ndet'] = len(lightcurve[0]) else: LOGWARNING('LC for %s has no detections' % lcdict['objectid']) # fill the lightcurve with empty lists to indicate that it is empty lcdict.update({x:y for (x,y) in zip(lcdict['columns'], [[] for x in lcdict['columns']])}) lcok = False # generate the returned lcdict and status message if filtersok is True and lcok: statusmsg = 'SQL filters OK, LC OK' elif filtersok is None and lcok: statusmsg = 'no SQL filters, LC OK' elif filtersok is False and lcok: statusmsg = 'SQL filters invalid, LC OK' else: statusmsg = 'LC retrieval failed' returnval = (lcdict, statusmsg) # recompress the lightcurve at the end if '.gz' in lcfile[-4:] and lcf: _compress_sqlitecurve(lcf, force=forcerecompress) # return ndarrays if that's set if returnarrays: for column in lcdict['columns']: lcdict[column] = np.array([x if x is not None else np.nan for x in lcdict[column]]) except Exception: if not quiet: LOGEXCEPTION('could not open sqlitecurve %s' % lcfile) returnval = (None, 'error while reading lightcurve file') # recompress the lightcurve at the end if '.gz' in lcfile[-4:] and lcf: _compress_sqlitecurve(lcf, force=forcerecompress) if raiseonfail: raise return returnval ############################ ## DESCRIBING THE COLUMNS ## ############################ DESCTEMPLATE = '''\ OBJECT ------ objectid = {objectid} hatid = {hatid}; twomassid = {twomassid} network = {network}; stations = {stations}; ndet = {ndet} ra = {ra}; decl = {decl} pmra = {pmra}; pmra_err = {pmra_err} pmdecl = {pmdecl}; pmdecl_err = {pmdecl_err} jmag = {jmag}; hmag = {hmag}; kmag = {kmag}; bmag = {bmag}; vmag = {vmag} sdssg = {sdssg}; sdssr = {sdssr}; sdssi = {sdssi} METADATA -------- datarelease = {datarelease}; lcversion = {lcversion} lastupdated = {lastupdated:.3f}; lcserver = {lcserver} comment = {comment} lcbestaperture = {lcbestaperture} lcsortcol = {lcsortcol} lcfiltersql = {lcfiltersql} lcnormcols = {lcnormcols} CAMFILTERS ---------- {filterdefs} PHOTAPERTURES ------------- {aperturedefs} LIGHT CURVE COLUMNS ------------------- {columndefs}''' LCC_CSVLC_DESCTEMPLATE = '''\ OBJECT ID: {objectid} OBJECT AND LIGHT CURVE METADATA ------------------------------- {metadata_desc} {metadata} LIGHT CURVE COLUMNS ------------------- {columndefs}''' def describe(lcdict, returndesc=False, offsetwith=None): '''This describes the light curve object and columns present. Parameters ---------- lcdict : dict The input lcdict to parse for column and metadata info. returndesc : bool If True, returns the description string as an str instead of just printing it to stdout. offsetwith : str This is a character to offset the output description lines by. This is useful to add comment characters like '#' to the output description lines. Returns ------- str or None If returndesc is True, returns the description lines as a str, otherwise returns nothing. ''' # transparently read LCC CSV format description if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower(): return describe_lcc_csv(lcdict, returndesc=returndesc) # figure out the columndefs part of the header string columndefs = [] for colind, column in enumerate(lcdict['columns']): if '_' in column: colkey, colap = column.split('_') coldesc = COLUMNDEFS[colkey][0] % colap else: coldesc = COLUMNDEFS[column][0] columndefstr = '%03i - %s - %s' % (colind, column, coldesc) columndefs.append(columndefstr) columndefs = '\n'.join(columndefs) # figure out the filterdefs filterdefs = [] for row in lcdict['filters']: filterid, filtername, filterdesc = row filterdefstr = '%s - %s - %s' % (filterid, filtername, filterdesc) filterdefs.append(filterdefstr) filterdefs = '\n'.join(filterdefs) # figure out the apertures aperturedefs = [] for key in sorted(lcdict['lcapertures'].keys()): aperturedefstr = '%s - %.2f px' % (key, lcdict['lcapertures'][key]) aperturedefs.append(aperturedefstr) aperturedefs = '\n'.join(aperturedefs) # now fill in the description description = DESCTEMPLATE.format( objectid=lcdict['objectid'], hatid=lcdict['objectinfo']['hatid'], twomassid=lcdict['objectinfo']['twomassid'].strip(), ra=lcdict['objectinfo']['ra'], decl=lcdict['objectinfo']['decl'], pmra=lcdict['objectinfo']['pmra'], pmra_err=lcdict['objectinfo']['pmra_err'], pmdecl=lcdict['objectinfo']['pmdecl'], pmdecl_err=lcdict['objectinfo']['pmdecl_err'], jmag=lcdict['objectinfo']['jmag'], hmag=lcdict['objectinfo']['hmag'], kmag=lcdict['objectinfo']['kmag'], bmag=lcdict['objectinfo']['bmag'], vmag=lcdict['objectinfo']['vmag'], sdssg=lcdict['objectinfo']['sdssg'], sdssr=lcdict['objectinfo']['sdssr'], sdssi=lcdict['objectinfo']['sdssi'], ndet=lcdict['objectinfo']['ndet'], lcsortcol=lcdict['lcsortcol'], lcbestaperture=json.dumps(lcdict['lcbestaperture'],ensure_ascii=True), network=lcdict['objectinfo']['network'], stations=lcdict['objectinfo']['stations'], lastupdated=lcdict['lastupdated'], datarelease=lcdict['datarelease'], lcversion=lcdict['lcversion'], lcserver=lcdict['lcserver'], comment=lcdict['comment'], lcfiltersql=(lcdict['lcfiltersql'] if 'lcfiltersql' in lcdict else ''), lcnormcols=(lcdict['lcnormcols'] if 'lcnormcols' in lcdict else ''), filterdefs=filterdefs, columndefs=columndefs, aperturedefs=aperturedefs ) if offsetwith is not None: description = textwrap.indent( description, '%s ' % offsetwith, lambda line: True ) print(description) else: print(description) if returndesc: return description ###################################### ## READING CSV LIGHTCURVES HATLC V2 ## ###################################### def _smartcast(castee, caster, subval=None): ''' This just tries to apply the caster function to castee. Returns None on failure. ''' try: return caster(castee) except Exception: if caster is float or caster is int: return nan elif caster is str: return '' else: return subval # these are the keys used in the metadata section of the CSV LC METAKEYS = {'objectid':str, 'hatid':str, 'twomassid':str, 'ucac4id':str, 'network':str, 'stations':str, 'ndet':int, 'ra':float, 'decl':float, 'pmra':float, 'pmra_err':float, 'pmdecl':float, 'pmdecl_err':float, 'jmag':float, 'hmag':float, 'kmag':float, 'bmag':float, 'vmag':float, 'sdssg':float, 'sdssr':float, 'sdssi':float} def _parse_csv_header(header): ''' This parses the CSV header from the CSV HAT sqlitecurve. Returns a dict that can be used to update an existing lcdict with the relevant metadata info needed to form a full LC. ''' # first, break into lines headerlines = header.split('\n') headerlines = [x.lstrip('# ') for x in headerlines] # next, find the indices of the metadata sections objectstart = headerlines.index('OBJECT') metadatastart = headerlines.index('METADATA') camfilterstart = headerlines.index('CAMFILTERS') photaperturestart = headerlines.index('PHOTAPERTURES') columnstart = headerlines.index('COLUMNS') lcstart = headerlines.index('LIGHTCURVE') # get the lines for the header sections objectinfo = headerlines[objectstart+1:metadatastart-1] metadatainfo = headerlines[metadatastart+1:camfilterstart-1] camfilterinfo = headerlines[camfilterstart+1:photaperturestart-1] photapertureinfo = headerlines[photaperturestart+1:columnstart-1] columninfo = headerlines[columnstart+1:lcstart-1] # parse the header sections and insert the appropriate key-val pairs into # the lcdict metadict = {'objectinfo':{}} # first, the objectinfo section objectinfo = [x.split(';') for x in objectinfo] for elem in objectinfo: for kvelem in elem: key, val = kvelem.split(' = ',1) metadict['objectinfo'][key.strip()] = ( _smartcast(val, METAKEYS[key.strip()]) ) # the objectid belongs at the top level metadict['objectid'] = metadict['objectinfo']['objectid'][:] del metadict['objectinfo']['objectid'] # get the lightcurve metadata metadatainfo = [x.split(';') for x in metadatainfo] for elem in metadatainfo: for kvelem in elem: try: key, val = kvelem.split(' = ',1) # get the lcbestaperture into a dict again if key.strip() == 'lcbestaperture': val = json.loads(val) # get the lcversion and datarelease as integers if key.strip() in ('datarelease', 'lcversion'): val = int(val) # get the lastupdated as a float if key.strip() == 'lastupdated': val = float(val) # put the key-val into the dict metadict[key.strip()] = val except Exception: LOGWARNING('could not understand header element "%s",' ' skipped.' % kvelem) # get the camera filters metadict['filters'] = [] for row in camfilterinfo: filterid, filtername, filterdesc = row.split(' - ') metadict['filters'].append((int(filterid), filtername, filterdesc)) # get the photometric apertures metadict['lcapertures'] = {} for row in photapertureinfo: apnum, appix = row.split(' - ') appix = float(appix.rstrip(' px')) metadict['lcapertures'][apnum.strip()] = appix # get the columns metadict['columns'] = [] for row in columninfo: colnum, colname, coldesc = row.split(' - ') metadict['columns'].append(colname) return metadict ################################## ## READING LC CSV FORMAT LCC V1 ## ################################## def _parse_csv_header_lcc_csv_v1(headerlines): ''' This parses the header of the LCC CSV V1 LC format. ''' # the first three lines indicate the format name, comment char, separator commentchar = headerlines[1] separator = headerlines[2] headerlines = [x.lstrip('%s ' % commentchar) for x in headerlines[3:]] # next, find the indices of the various LC sections metadatastart = headerlines.index('OBJECT METADATA') columnstart = headerlines.index('COLUMN DEFINITIONS') lcstart = headerlines.index('LIGHTCURVE') metadata = ' ' .join(headerlines[metadatastart+1:columnstart-1]) columns = ' ' .join(headerlines[columnstart+1:lcstart-1]) metadata = json.loads(metadata) columns = json.loads(columns) return metadata, columns, separator def read_lcc_csvlc(lcfile): '''This reads a CSV LC produced by an `LCC-Server `_ instance. Parameters ---------- lcfile : str The LC file to read. Returns ------- dict Returns an lcdict that's readable by most astrobase functions for further processing. ''' # read in the file and split by lines if '.gz' in os.path.basename(lcfile): infd = gzip.open(lcfile,'rb') else: infd = open(lcfile,'rb') lctext = infd.read().decode() infd.close() lctextlines = lctext.split('\n') lcformat = lctextlines[0] commentchar = lctextlines[1] lcstart = lctextlines.index('%s LIGHTCURVE' % commentchar) headerlines = lctextlines[:lcstart+1] lclines = lctextlines[lcstart+1:] metadata, columns, separator = _parse_csv_header_lcc_csv_v1(headerlines) # break out the objectid and objectinfo objectid = metadata['objectid']['val'] objectinfo = {key:metadata[key]['val'] for key in metadata} # figure out the column dtypes colnames = [] colnum = [] coldtypes = [] # generate the args for np.genfromtxt for k in columns: coldef = columns[k] colnames.append(k) colnum.append(coldef['colnum']) coldtypes.append(coldef['dtype']) coldtypes = ','.join(coldtypes) # read in the LC recarr = np.genfromtxt( lclines, comments=commentchar, delimiter=separator, usecols=colnum, autostrip=True, names=colnames, dtype=coldtypes ) lcdict = {x:recarr[x] for x in colnames} lcdict['lcformat'] = lcformat lcdict['objectid'] = objectid lcdict['objectinfo'] = objectinfo lcdict['columns'] = colnames lcdict['coldefs'] = columns lcdict['metadata'] = metadata return lcdict def describe_lcc_csv(lcdict, returndesc=False): ''' This describes the LCC CSV format light curve file. Parameters ---------- lcdict : dict The input lcdict to parse for column and metadata info. returndesc : bool If True, returns the description string as an str instead of just printing it to stdout. Returns ------- str or None If returndesc is True, returns the description lines as a str, otherwise returns nothing. ''' metadata_lines = [] coldef_lines = [] if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower(): metadata = lcdict['metadata'] metakeys = lcdict['objectinfo'].keys() coldefs = lcdict['coldefs'] for mk in metakeys: metadata_lines.append( '%20s | %s' % ( mk, metadata[mk]['desc'] ) ) for ck in lcdict['columns']: coldef_lines.append('column %02d | %8s | numpy dtype: %3s | %s' % (coldefs[ck]['colnum'], ck, coldefs[ck]['dtype'], coldefs[ck]['desc'])) desc = LCC_CSVLC_DESCTEMPLATE.format( objectid=lcdict['objectid'], metadata_desc='\n'.join(metadata_lines), metadata=pformat(lcdict['objectinfo']), columndefs='\n'.join(coldef_lines) ) print(desc) if returndesc: return desc else: LOGERROR("this lcdict is not from an LCC CSV, can't figure it out...") return None ##################################### ## MULTIFORMAT CSV READER FUNCTION ## ##################################### def read_csvlc(lcfile): '''This reads a HAT data server or LCC-Server produced CSV light curve into an lcdict. This will automatically figure out the format of the file provided. Currently, it can read: - legacy HAT data server CSV LCs (e.g. from https://hatsouth.org/planets/lightcurves.html) with an extension of the form: `.hatlc.csv.gz`. - all LCC-Server produced LCC-CSV-V1 LCs (e.g. from https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`. Parameters ---------- lcfile : str The light curve file to read. Returns ------- dict Returns an lcdict that can be read and used by many astrobase processing functions. ''' # read in the file and split by lines if '.gz' in os.path.basename(lcfile): LOGINFO('reading gzipped HATLC: %s' % lcfile) infd = gzip.open(lcfile,'rb') else: LOGINFO('reading HATLC: %s' % lcfile) infd = open(lcfile,'rb') # this transparently reads LCC CSVLCs lcformat_check = infd.read(12).decode() if 'LCC-CSVLC' in lcformat_check: infd.close() return read_lcc_csvlc(lcfile) else: infd.seek(0) # below is reading the HATLC v2 CSV LCs lctext = infd.read().decode() # argh Python 3 infd.close() # figure out the header and get the LC columns lcstart = lctext.index('# LIGHTCURVE\n') lcheader = lctext[:lcstart+12] lccolumns = lctext[lcstart+13:].split('\n') lccolumns = [x for x in lccolumns if len(x) > 0] # initialize the lcdict and parse the CSV header lcdict = _parse_csv_header(lcheader) # tranpose the LC rows into columns lccolumns = [x.split(',') for x in lccolumns] lccolumns = list(zip(*lccolumns)) # argh more Python 3 # write the columns to the dict for colind, col in enumerate(lcdict['columns']): if (col.split('_')[0] in LC_MAG_COLUMNS or col.split('_')[0] in LC_ERR_COLUMNS or col.split('_')[0] in LC_FLAG_COLUMNS): lcdict[col] = np.array([_smartcast(x, COLUMNDEFS[col.split('_')[0]][2]) for x in lccolumns[colind]]) elif col in COLUMNDEFS: lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][2]) for x in lccolumns[colind]]) else: LOGWARNING('lcdict col %s has no formatter available' % col) continue return lcdict ########################## ## NORMALIZING LCDICTS ## ########################## def find_lc_timegroups(lctimes, mingap=4.0): '''This finds the time gaps in the light curve, so we can figure out which times are for consecutive observations and which represent gaps between seasons. Parameters ---------- lctimes : np.array This is the input array of times, assumed to be in some form of JD. mingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. Returns ------- tuple A tuple of the form below is returned, containing the number of time groups found and Python slice objects for each group:: (ngroups, [slice(start_ind_1, end_ind_1), ...]) ''' lc_time_diffs = [(lctimes[x] - lctimes[x-1]) for x in range(1,len(lctimes))] lc_time_diffs = np.array(lc_time_diffs) group_start_indices = np.where(lc_time_diffs > mingap)[0] if len(group_start_indices) > 0: group_indices = [] for i, gindex in enumerate(group_start_indices): if i == 0: group_indices.append(slice(0,gindex+1)) else: group_indices.append(slice(group_start_indices[i-1]+1,gindex+1)) # at the end, add the slice for the last group to the end of the times # array group_indices.append(slice(group_start_indices[-1]+1,len(lctimes))) # if there's no large gap in the LC, then there's only one group to worry # about else: group_indices = [slice(0,len(lctimes))] return len(group_indices), group_indices def normalize_lcdict(lcdict, timecol='rjd', magcols='all', mingap=4.0, normto='sdssr', debugmode=False, quiet=False): '''This normalizes magcols in `lcdict` using `timecol` to find timegroups. Parameters ---------- lcdict : dict The input lcdict to process. timecol : str The key in the lcdict that is to be used to extract the time column. magcols : 'all' or list of str If this is 'all', all of the columns in the lcdict that are indicated to be magnitude measurement columns are normalized. If this is a list of str, must contain the keys of the lcdict specifying which magnitude columns will be normalized. mingap : float This defines how much the difference between consecutive measurements is allowed to be to consider them as parts of different timegroups. By default it is set to 4.0 days. normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'} This indicates which column will be the normalization target. If this is 'globalmedian', the normalization will be to the global median of each LC column. If this is 'zero', will normalize to 0.0 for each LC column. Otherwise, will normalize to the value of one of the other keys in the lcdict['objectinfo'][magkey], meaning the normalization will be to some form of catalog magnitude. debugmode : bool If True, will indicate progress as time-groups are found and processed. quiet : bool If True, will not emit any messages when processing. Returns ------- dict Returns the lcdict with the magnitude measurements normalized as specified. The normalization happens IN PLACE. ''' # check if this lc has been normalized already. return as-is if so if 'lcnormcols' in lcdict and len(lcdict['lcnormcols']) > 0: if not quiet: LOGWARNING('this lightcurve is already normalized, returning...') return lcdict # first, get the LC timegroups if timecol in lcdict: times = lcdict[timecol] elif 'rjd' in lcdict: times = lcdict['rjd'] # if there aren't any time columns in this lcdict, then we can't do any # normalization, return it as-is else: LOGERROR("can't figure out the time column to use, lcdict cols = %s" % lcdict['columns']) return lcdict ngroups, timegroups = find_lc_timegroups(np.array(times), mingap=mingap) # HATLC V2 format if 'lcapertures' in lcdict: apertures = sorted(lcdict['lcapertures'].keys()) # LCC-CSV-V1 format HATLC elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']: apertures = sorted(lcdict['objectinfo']['lcapertures'].keys()) aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict] armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict] aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict] atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict] psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict] irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict] iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict] itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict] # next, find all the mag columns to normalize if magcols == 'all': cols_to_normalize = (aimcols + armcols + aepcols + atfcols + psimcols + irmcols + iepcols + itfcols) elif magcols == 'redmags': cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) + irmcols) elif magcols == 'epdmags': cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) + iepcols) elif magcols == 'tfamags': cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) + itfcols) elif magcols == 'epdtfa': cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) + iepcols + atfcols + (['pstf'] if 'pstf' in lcdict else []) + itfcols) else: cols_to_normalize = magcols.split(',') cols_to_normalize = [x.strip() for x in cols_to_normalize] colsnormalized = [] # now, normalize each column for col in cols_to_normalize: if col in lcdict: mags = lcdict[col] mags = [(nan if x is None else x) for x in mags] mags = np.array(mags) colsnormalized.append(col) # find all the non-nan indices finite_ind = np.isfinite(mags) if any(finite_ind): # find the global median global_mag_median = np.median(mags[finite_ind]) # go through the groups and normalize them to the median for # each group for tgind, tg in enumerate(timegroups): finite_ind = np.isfinite(mags[tg]) # find this timegroup's median mag and normalize the mags in # it to this median group_median = np.median((mags[tg])[finite_ind]) mags[tg] = mags[tg] - group_median if debugmode: LOGDEBUG('%s group %s: elems %s, ' 'finite elems %s, median mag %s' % (col, tgind, len(mags[tg]), len(finite_ind), group_median)) else: LOGWARNING('column %s is all nan, skipping...' % col) continue # now that everything is normalized to 0.0, add the global median # offset back to all the mags and write the result back to the dict if normto == 'globalmedian': mags = mags + global_mag_median elif normto in ('jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'): if (normto in lcdict['objectinfo'] and lcdict['objectinfo'][normto] is not None): mags = mags + lcdict['objectinfo'][normto] else: if not quiet: LOGWARNING('no %s available in lcdict, ' 'normalizing to global mag median' % normto) normto = 'globalmedian' mags = mags + global_mag_median lcdict[col] = mags else: if not quiet: LOGWARNING('column %s is not present, skipping...' % col) continue # add the lcnormcols key to the lcdict lcnormcols = ('cols normalized: %s - ' 'min day gap: %s - ' 'normalized to: %s') % ( repr(colsnormalized), mingap, normto ) lcdict['lcnormcols'] = lcnormcols return lcdict def normalize_lcdict_byinst( lcdict, magcols='all', normto='sdssr', normkeylist=('stf','ccd','flt','fld','prj','exp'), debugmode=False, quiet=False ): '''This is a function to normalize light curves across all instrument combinations present. Use this to normalize a light curve containing a variety of: - HAT station IDs ('stf') - camera IDs ('ccd') - filters ('flt') - observed field names ('fld') - HAT project IDs ('prj') - exposure times ('exp') Parameters ---------- lcdict : dict The input lcdict to process. magcols : 'all' or list of str If this is 'all', all of the columns in the lcdict that are indicated to be magnitude measurement columns are normalized. If this is a list of str, must contain the keys of the lcdict specifying which magnitude columns will be normalized. normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'} This indicates which column will be the normalization target. If this is 'zero', will normalize to 0.0 for each LC column. Otherwise, will normalize to the value of one of the other keys in the lcdict['objectinfo'][magkey], meaning the normalization will be to some form of catalog magnitude. normkeylist : list of str These are the column keys to use to form the normalization index. Measurements in the specified `magcols` with identical normalization index values will be considered as part of a single measurement 'era', and will be normalized to zero. Once all eras have been normalized this way, the final light curve will be re-normalized as specified in `normto`. debugmode : bool If True, will indicate progress as time-groups are found and processed. quiet : bool If True, will not emit any messages when processing. Returns ------- dict Returns the lcdict with the magnitude measurements normalized as specified. The normalization happens IN PLACE. ''' # check if this lc has been normalized already. return as-is if so if 'lcinstnormcols' in lcdict and len(lcdict['lcinstnormcols']) > 0: if not quiet: LOGWARNING('this lightcurve is already ' 'normalized by instrument keys, ' 'returning...') return lcdict # generate the normalization key normkeycols = [] availablenormkeys = [] for key in normkeylist: if key in lcdict and lcdict[key] is not None: normkeycols.append(lcdict[key]) availablenormkeys.append(key) # transpose to turn these into rows normkeycols = list(zip(*normkeycols)) # convert to a string rep for each key and post-process for simplicity allkeys = [repr(x) for x in normkeycols] allkeys = [a.replace('(','').replace(')','').replace("'",'').replace(' ','') for a in allkeys] # turn these into a numpy array and get the unique values allkeys = np.array(allkeys) normkeys = np.unique(allkeys) # figure out the apertures # HATLC V2 format if 'lcapertures' in lcdict: apertures = sorted(lcdict['lcapertures'].keys()) # LCC-CSV-V1 format HATLC elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']: apertures = sorted(lcdict['objectinfo']['lcapertures'].keys()) # put together the column names aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict] armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict] aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict] atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict] psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict] irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict] iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict] itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict] # next, find all the mag columns to normalize if magcols == 'all': cols_to_normalize = (aimcols + armcols + aepcols + atfcols + psimcols + irmcols + iepcols + itfcols) elif magcols == 'redmags': cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) + irmcols) elif magcols == 'epdmags': cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) + iepcols) elif magcols == 'tfamags': cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) + itfcols) elif magcols == 'epdtfa': cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) + iepcols + atfcols + (['pstf'] if 'pstf' in lcdict else []) + itfcols) else: cols_to_normalize = magcols.split(',') cols_to_normalize = [x.strip() for x in cols_to_normalize] colsnormalized = [] # go through each column and normalize them for col in cols_to_normalize: if col in lcdict: # note: this requires the columns in ndarray format # unlike normalize_lcdict thismags = lcdict[col] # go through each key in normusing for nkey in normkeys: thisind = allkeys == nkey # make sure we have at least 3 elements in the matched set of # magnitudes corresponding to this key. also make sure that the # magnitudes corresponding to this key aren't all nan. thismagsize = thismags[thisind].size thismagfinite = np.where(np.isfinite(thismags[thisind]))[0].size if thismagsize > 2 and thismagfinite > 2: # do the normalization and update the thismags in the lcdict medmag = np.nanmedian(thismags[thisind]) lcdict[col][thisind] = lcdict[col][thisind] - medmag if debugmode: LOGDEBUG('magcol: %s, currkey: "%s", nelem: %s, ' 'medmag: %s' % (col, nkey, len(thismags[thisind]), medmag)) # we remove mags that correspond to keys with less than 3 # (finite) elements because we can't get the median mag # correctly and renormalizing them to zero would just set them # to zero else: lcdict[col][thisind] = np.nan # everything should now be normalized to zero # add back the requested normto if normto in ('jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'): if (normto in lcdict['objectinfo'] and lcdict['objectinfo'][normto] is not None): lcdict[col] = lcdict[col] + lcdict['objectinfo'][normto] else: if not quiet: LOGWARNING('no %s available in lcdict, ' 'normalizing to 0.0' % normto) normto = 'zero' # update the colsnormalized list colsnormalized.append(col) else: if not quiet: LOGWARNING('column %s is not present, skipping...' % col) continue # add the lcnormcols key to the lcdict lcinstnormcols = ('cols normalized: %s - ' 'normalized to: %s - ' 'norm keys used: %s') % (repr(colsnormalized), normto, repr(availablenormkeys)) lcdict['lcinstnormcols'] = lcinstnormcols return lcdict def main(): ''' This is called when we're executed from the commandline. The current usage from the command-line is described below:: usage: hatlc [-h] [--describe] hatlcfile read a HAT LC of any format and output to stdout positional arguments: hatlcfile path to the light curve you want to read and pipe to stdout optional arguments: -h, --help show this help message and exit --describe don't dump the columns, show only object info and LC metadata ''' # handle SIGPIPE sent by less, head, et al. import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) import argparse aparser = argparse.ArgumentParser( description='read a HAT LC of any format and output to stdout' ) aparser.add_argument( 'hatlcfile', action='store', type=str, help=("path to the light curve you want to read and pipe to stdout") ) aparser.add_argument( '--describe', action='store_true', default=False, help=("don't dump the columns, show only object info and LC metadata") ) args = aparser.parse_args() filetoread = args.hatlcfile if not os.path.exists(filetoread): LOGERROR("file provided: %s doesn't seem to exist" % filetoread) sys.exit(1) # figure out the type of LC this is filename = os.path.basename(filetoread) # switch based on filetype if filename.endswith('-hatlc.csv.gz') or filename.endswith('-csvlc.gz'): if args.describe: describe(read_csvlc(filename)) sys.exit(0) else: with gzip.open(filename,'rb') as infd: for line in infd: print(line.decode(),end='') elif filename.endswith('-hatlc.sqlite.gz'): lcdict, msg = read_and_filter_sqlitecurve(filetoread) # dump the description describe(lcdict, offsetwith='#') # stop here if describe is True if args.describe: sys.exit(0) # otherwise, continue to parse the cols, etc. # get the aperture names apertures = sorted(lcdict['lcapertures'].keys()) # update column defs per aperture for aper in apertures: COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_MAG_COLUMNS}) COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_ERR_COLUMNS}) COLUMNDEFS.update({'%s_%s' % (x, aper): COLUMNDEFS[x] for x in LC_FLAG_COLUMNS}) formstr = ','.join([COLUMNDEFS[x][1] for x in lcdict['columns']]) ndet = lcdict['objectinfo']['ndet'] for ind in range(ndet): line = [lcdict[x][ind] for x in lcdict['columns']] formline = formstr % tuple(line) print(formline) else: LOGERROR('unrecognized HATLC file: %s' % filetoread) sys.exit(1) # we use this to enable command-line cat-like dumping of a HAT light curve if __name__ == '__main__': main()