Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 26 additions & 10 deletions trunk/bin/LCOGTingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def download_frame(frame, force=False):
filename = frame['filename']
with open(filepath + filename, 'wb') as f:
f.write(requests.get(frame['url']).content)

if filename[-3:] == '.fz' and (not os.path.isfile(filepath + filename[:-3]) or force):
logger.info('unpacking {}'.format(filename))
if os.path.exists(filepath + filename[:-3]):
Expand Down Expand Up @@ -153,6 +153,20 @@ def download_frame(frame, force=False):
'fwhm': 'AGFWHM',
'tracknumber': 'TRACKNUM'}

spec_to_hdrkey = {'objname': 'OBJECT',
'dayobs': 'DAY-OBS',
'dateobs': 'DATE-OBS',
'ut': 'UTSTART',
'mjd': 'MJD-OBS',
'exptime': 'EXPTIME',
'filter': 'FILTER',
'telescope': 'TELESCOP',
'instrument': 'INSTRUME',
'airmass': 'AIRMASS',
'slit': 'APERWID',
'ra0': 'RA',
'dec0': 'DEC'}

def get_groupidcode(hdr):
if 'tracknum' in hdr and hdr['tracknum'] != 'UNSPECIFIED':
result = lsc.mysqldef.query(['''select obsrequests.groupidcode, obsrequests.targetid
Expand All @@ -169,23 +183,27 @@ def get_groupidcode(hdr):
groupidcode = result[0]['groupidcode']
return groupidcode, targetid

def db_ingest(filepath, filename, force=False):
def db_ingest(filepath, filename, table, force=False): #now accepts table input
'''Read an image header and add a row to the database'''
global telescopeids, instrumentids
if '-en' in filename:
table = 'speclcoraw'

if table == 'spec':
db_to_hdrkey = spec_to_hdrkey

if table == 'speclcoraw':
db_to_hdrkey = speclcoraw_to_hdrkey
else:
table = 'photlcoraw'

if table == 'photlcoraw':
db_to_hdrkey = photlcoraw_to_hdrkey

fileindb = lsc.mysqldef.getfromdataraw(conn, table, 'filename', filename, column2='filepath')
if fileindb:
filepath = fileindb[0]['filepath'] # could be marked as bad
if not fileindb or force:
if filename[-3:] == '.fz':
hdr = fits.getheader(filepath + filename, 1)
hdr = fits.getheader(filepath + filename, 1) # from banzai file format
else:
hdr = fits.getheader(filepath + filename)
hdr = fits.getheader(filepath + filename) # from banzai file format
groupidcode, targetid = get_groupidcode(hdr)
dbdict = {'filename': filename,
'filepath': filepath,
Expand Down Expand Up @@ -278,8 +296,6 @@ def record_floyds_tar_link(authtoken, frame, force=False):

if args.username and args.password:
authtoken = authenticate(args.username, args.password)
elif os.getenv('LCO_API_KEY'):
authtoken = {'Authorization': 'Token ' + os.environ['LCO_API_KEY']}
else:
authtoken = {}

Expand Down
4 changes: 3 additions & 1 deletion trunk/bin/calibratemag.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,9 @@ def combine_nights(combined_catalog, filterlist, refcat):
if args.stage in ['abscat', 'local'] and args.catalog is not None:
try:
refcat = Table.read(args.catalog, format='ascii', fill_values=[('9999.000', '0')])
if 'SOURCE_ID' in refcat.colnames: # Gaia catalog
if 'source_id' in refcat.colnames: # Gaia catalog
refcat.rename_column('source_id', 'id')
elif 'SOURCE_ID' in refcat.colnames: # Gaia catalog
refcat.rename_column('SOURCE_ID', 'id')
else:
colnames = [row.split()[0] for row in refcat.meta['comments'] if len(row.split()) == 6]
Expand Down
23 changes: 18 additions & 5 deletions trunk/bin/ingestall.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,14 @@
for telid in ['2m0a', '1m0a', '0m4a', '0m4b', '0m4c']:
frames += get_metadata(authtoken, start=start, end=end, PROPID='Photometric standards', OBSTYPE='STANDARD', TELID=telid, RLEVEL=91) # all photometric standards (except SQA)
frames += get_metadata(authtoken, start=start, end=end, PROPID='Photometric standards', OBSTYPE='EXPOSE', TELID=telid, RLEVEL=91) # all photometric standards (except SQA)
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en06', RLEVEL=0, public=False) # all FTN spectra SNEx is a co-I
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en05', RLEVEL=0, public=False) # all FTS spectra SNEx is a co-I
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en12', RLEVEL=0, public=False) # all FTS spectra SNEx is a co-I
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en06', basename = 'e91-1d', public=False) # all FTN spectra SNEx is a co-I, checks basename for Banzai-floyds 1ds
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en05', basename = 'e91-1d', public=False) # all FTS spectra SNEx is a co-I, checks basename for Banzai-floyds 1ds
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en12', basename = 'e91-1d', public=False) # all FTS spectra SNEx is a co-I, checks basename for Banzai-floyds 1ds

frames = get_metadata(authtoken, start=start, end=end, INSTRUME='en06', RLEVEL=0, public=False) # all FTN spectra SNEx is a co-I, keeping e00 for floyds inbox (IRAF) reductions
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en05', RLEVEL=0, public=False) # all FTS spectra SNEx is a co-I, keeping e00 for floyds inbox (IRAF) reductions
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en12', RLEVEL=0, public=False) # all FTS spectra SNEx is a co-I, keeping e00 for floyds inbox (IRAF) reductions

frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en06', RLEVEL=0, PROPID='FLOYDS standards') # FTN standard star spectra
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en05', RLEVEL=0, PROPID='FLOYDS standards') # FTS standard star spectra
frames += get_metadata(authtoken, start=start, end=end, INSTRUME='en12', RLEVEL=0, PROPID='FLOYDS standards') # FTS standard star spectra
Expand All @@ -62,12 +67,20 @@
traceback.print_exc()
continue
try:
dbdict = db_ingest(filepath, filename)
if '-en' in filename: #ingesting the 1d spectra into both speclcoraw and spec
table_raw = 'speclcoraw' # this table is what the scheduler checks, shouldn't be updated with versions
dbdict = db_ingest(filepath, filename,table_raw)
if 'e91-1d' in filename: # only ingest the banzai floyds directly into spec
table_reduced = 'spec' # this table will track version control of spectra that get re-reduced
dbdict = db_ingest(filepath, filename,table_reduced)
if '-en' not in filename and 'e91-1d' not in filename: # all others are photlcoraw
table = 'photlcoraw'
dbdict = db_ingest(filepath, filename,table)
except:
logger.error('!!! FAILED TO INGEST {}'.format(filename))
traceback.print_exc()
continue
if '-en' in filename and '-e00.fits' in filename:
if '-en' in filename and '-e00' in filename: #ignoring banzai files
try:
fits2png(filepath + filename)
except:
Expand Down
13 changes: 9 additions & 4 deletions trunk/src/lsc/lscabsphotdef.py
Original file line number Diff line number Diff line change
Expand Up @@ -1242,7 +1242,12 @@ def gaia2file(ra, dec, size=26., mag_limit=18., output='gaia.cat'):
response['ra'].format ='%16.12f'
response['dec'].format = '%16.12f'
response['phot_g_mean_mag'].format = '%.2f'

gaia_cat = response['ra', 'dec', 'SOURCE_ID', 'phot_g_mean_mag']
gaia_cat.write(output, format='ascii.commented_header',
delimiter=' ', overwrite=True)

try:
gaia_cat = response['ra', 'dec', 'source_id', 'phot_g_mean_mag']
gaia_cat.write(output, format='ascii.commented_header',
delimiter=' ', overwrite=True)
except ValueError:
gaia_cat = response['ra', 'dec', 'SOURCE_ID', 'phot_g_mean_mag']
gaia_cat.write(output, format='ascii.commented_header',
delimiter=' ', overwrite=True)