From 0e70f2b7c3e2e9e2f5bdb2175b608fab0ccbd1bd Mon Sep 17 00:00:00 2001 From: moralejo Date: Mon, 4 Sep 2023 17:38:31 +0000 Subject: [PATCH 1/8] Script to add (event_wise) to a DL2 file the nominal position of a given source in CameraFrame cordinates --- .../scripts/lstchain_dl2_add_sourcepos.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100755 lstchain/scripts/lstchain_dl2_add_sourcepos.py diff --git a/lstchain/scripts/lstchain_dl2_add_sourcepos.py b/lstchain/scripts/lstchain_dl2_add_sourcepos.py new file mode 100755 index 0000000000..8ef5f29d94 --- /dev/null +++ b/lstchain/scripts/lstchain_dl2_add_sourcepos.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +import pandas as pd +import numpy as np +import astropy.units as u +import argparse +from astropy.coordinates import SkyCoord, AltAz +from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom +from astropy.time import Time +from lstchain.reco.utils import location, add_delta_t_key, get_effective_time +from ctapipe.coordinates import CameraFrame, TelescopeFrame +from ctapipe_io_lst import LSTEventSource + +import sys + +parser = argparse.ArgumentParser(description="Source position adder. Calculates x,y coordinates of a source in CameraFrame, event-wise.") +parser.add_argument('-f', '--dl2-file', dest='file', required=True, + type=str, help='DL2 file to be processed') +parser.add_argument('-s', '--source-name', dest='source_name', + type=str, default='Crab pulsar', + help='Source name (known to astropy; default: %(default)s)') + +# NOTE: the position "Crab pulsar" is better defined than "Crab" or +# "Crab Nebula", for which astropy.coordinates.SkyCoord.form_name returns +# different values depending on the catalog it uses... + + +def main(): + args = parser.parse_args() + + source_pos = SkyCoord.from_name(args.source_name) + tablename = "/dl2/event/telescope/parameters/LST_LSTCam" + new_table_name = 'source_position' + + # Effective focal length, i.e. including the effect of aberration + # (which affects all image parameters and hence the reconstructed source + # position in CameraFrame): + subarray_info = LSTEventSource.create_subarray(tel_id=1) + focal = subarray_info.tel[1].optics.effective_focal_length + # By using this effective focal the calculated nominal source position + # will be consistent with the reconstructed event directions in CameraFrame + + print('Selected source name:', args.source_name) + print('focal length for calculation:', focal) + print('Processing', args.file, '...') + + table = pd.read_hdf(args.file, tablename) + + pointing_alt = np.array(table['alt_tel']) * u.rad + pointing_az = np.array(table['az_tel']) * u.rad + + time_utc = Time(table["dragon_time"], format="unix", scale="utc") + telescope_pointing = SkyCoord(alt=pointing_alt, az=pointing_az, + frame=AltAz(obstime=time_utc, + location=location)) + + # CameraFrame is terribly slow without the erfa interpolator below... + with erfa_astrom.set(ErfaAstromInterpolator(5 * u.min)): + camera_frame = CameraFrame(focal_length=focal, + telescope_pointing=telescope_pointing, + location=location, obstime=time_utc) + source_pos_camera = source_pos.transform_to(camera_frame) + + # Units: m (like reco_src_x, y) + table['src_x'] = source_pos_camera.data.x.to_value(u.m) + table['src_y'] = source_pos_camera.data.y.to_value(u.m) + + table[['src_x', 'src_y']].to_hdf(args.file, new_table_name, mode='r+', + format='table', data_columns=True) + print('... done!') From 98f7af9c6e2ab86122f742ab9b7b829d75d6b31f Mon Sep 17 00:00:00 2001 From: moralejo Date: Mon, 4 Sep 2023 17:48:39 +0000 Subject: [PATCH 2/8] Removed unused imports --- lstchain/scripts/lstchain_dl2_add_sourcepos.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lstchain/scripts/lstchain_dl2_add_sourcepos.py b/lstchain/scripts/lstchain_dl2_add_sourcepos.py index 8ef5f29d94..60fc0d60ac 100755 --- a/lstchain/scripts/lstchain_dl2_add_sourcepos.py +++ b/lstchain/scripts/lstchain_dl2_add_sourcepos.py @@ -7,8 +7,8 @@ from astropy.coordinates import SkyCoord, AltAz from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom from astropy.time import Time -from lstchain.reco.utils import location, add_delta_t_key, get_effective_time -from ctapipe.coordinates import CameraFrame, TelescopeFrame +from lstchain.reco.utils import location +from ctapipe.coordinates import CameraFrame from ctapipe_io_lst import LSTEventSource import sys From b3cba480ae72507e22c1a1feff7f145b966de57c Mon Sep 17 00:00:00 2001 From: moralejo Date: Fri, 8 Sep 2023 15:45:49 +0000 Subject: [PATCH 3/8] Notebook for sensitivity calculation, same procedure as used in the LST1 performance paper (this notebook was used for the cross-check of the published sensitivities) --- .../calculate_sensitivity_from_Crab.ipynb | 2032 +++++++++++++++++ 1 file changed, 2032 insertions(+) create mode 100644 notebooks/calculate_sensitivity_from_Crab.ipynb diff --git a/notebooks/calculate_sensitivity_from_Crab.ipynb b/notebooks/calculate_sensitivity_from_Crab.ipynb new file mode 100644 index 0000000000..05db02096f --- /dev/null +++ b/notebooks/calculate_sensitivity_from_Crab.ipynb @@ -0,0 +1,2032 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "italian-enforcement", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import matplotlib.colors as colors\n", + "import matplotlib as mpl\n", + "import numpy as np\n", + "import glob\n", + "import astropy.units as u\n", + "import tables\n", + "import gc\n", + "import time\n", + "\n", + "from astropy.coordinates import SkyCoord, AltAz\n", + "from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom\n", + "from astropy.time import Time\n", + "from lstchain.reco.utils import location, add_delta_t_key, get_effective_time\n", + "from os import path\n", + "from scipy.stats import binned_statistic\n", + "from pyirf.statistics import li_ma_significance\n", + "from lstchain.spectra.crab import crab_magic\n", + "from ctapipe.containers import EventType\n", + "from pyirf.spectral import CRAB_MAGIC_JHEAP2015\n", + "from ctapipe.coordinates import CameraFrame, TelescopeFrame\n", + "from gammapy.stats import WStatCountsStatistic\n", + "from ctapipe_io_lst import LSTEventSource\n", + "from ctapipe.io import read_table\n", + "\n", + "%matplotlib inline\n", + "\n", + "#%load_ext filprofiler" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "turkish-psychology", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# This is a notebook to calculate the flux sensitivity using real Crab observations.\n", + "#\n", + "# NOTE: the inputs of this notebook are DL2 files of Crab observations, \n", + "# both for source-independent and source-dependent analyses.\n", + "# \n", + "# The source-independent files must contain the event-wise nominal position \n", + "# of Crab (src_x, src_y), in a table called \"source_position\". This can be \n", + "# achieved by processing standard DL2 files with $LSTCHAIN/scripts/lstchain_dl2_add_sourcepos.py\n", + "# (this is done like this because it is too costly to compute the position every time we need it)\n", + "#\n", + "#\n", + "# The samples below are those used for the ApJ LST-1 performance (or \"Crab\") paper:\n", + "# NOTE!! The full sample (34.2 h of effective time) takes about 3 hours to process in the IT cluster!\n", + "# The time is mostly to read the data and fill the \"super-histograms\" from which the event statistics\n", + "# are derived. One can fill the histograms once, and the run only the (much faster) second part of the \n", + "# notebook with different settings for the sensitivity calculation.\n", + "#\n", + "# source-independent dataset\n", + "tag1 = \"source-independent\"\n", + "#dataset1 = glob.glob(\"/fefs/aswg/workspace/abelardo.moralejo/Crab_performance_paper/data_v0.9.9/DL2/process_with*/dl2_LST-1.Run*.h5\")\n", + "dataset1 = glob.glob(\"/fefs/aswg/workspace/abelardo.moralejo/Crab_performance_paper/data_v0.9.9/DL2_sourcepos_with_v010/dl2_LST-1.Run*.h5\")\n", + "\n", + "# Source-dependent dataset\n", + "# Created with RFs trained using gammas out to 1 deg, AND additional src-indep params:\n", + "tag2 = \"source-dependent+, ring-wobble-0-1-trained\"\n", + "dataset2 = glob.glob(\"/fefs/aswg/workspace/seiya.nozaki/Crab_performance_paper/20221027_v0.9.9_crab_tuned/combined_off_axis_1deg/DL2/data/dl2_LST-1.Run*.h5\")\n", + "\n", + "dataset1.sort()\n", + "dataset2.sort()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "180ecb50", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Settings for sensitivity calculations:\n", + "# (NOTE: one can change these afterwards, to obtain results with different conditions just\n", + "# running the second part of the notebook, with no need to re-read all data!!!)\n", + "#\n", + "alpha = 0.2 # Li & Ma's \"alpha\", ratio of the ON to the OFF exposure\n", + "# This what we *assume* for the calculation, because it is the standard value.\n", + "# Note however that for a single LST, and to avoid large systematics, we use only one off\n", + "# region to estimate the background (in source-dependent analysis; in source-independent \n", + "# analysis it is one region up to 250 GeV and 3 regions for >250 GeV). We then \"extrapolate\"\n", + "# that background to what we would have in case we really had alpha=0.2...\n", + "\n", + "\n", + "# Assumed (effective) observation time for the calculation (note: it has nothing to do with \n", + "# the actual observation time of the sample used for the calculation!):\n", + "obs_time = 50 * u.h \n", + "# obs_time = 1/60. * u.h # 1 minute\n", + "# obs_time = 0.083333333 * u.h # 5 minutes\n", + "# obs_time = 0.5 * u.h\n", + "# obs_time = 0.7 * u.h\n", + "# obs_time = 5 * u.h\n", + "\n", + "# The 5-sigma condition is so standard that we do not make it configurable! It is hardcoded. \n", + "# Additional sensitivity conditions:\n", + "min_n_gammas = 10 # Minimum number of excess events \n", + "min_backg_percent = 5 # Excess must be at least this percentage of the background\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "320581e3", + "metadata": {}, + "outputs": [], + "source": [ + "# Binnins of gammaness, Alpha, thetas and energy for all calculations:\n", + "\n", + "# Very fine binnings to allow cut optimization:\n", + "gammaness_bins = np.linspace(0, 1, 2001)\n", + "alphabins = np.linspace(0, 60, 3001)\n", + "theta2bins = np.linspace(0, 0.6, 6001)\n", + "\n", + "logenergy_bins = np.linspace(-2.0, 2.2, 22) # 5 per decade\n", + "\n", + "evt_id_bins = [-0.5, 0.5, 1.5] \n", + "# for event_id%2 (to separate odd & even event numbers). \n", + "# Even event-id's end up in the first bin; odd event-id's in the second one.\n", + "# We separate them so that we can optimize the cuts in one and apply them to \n", + "# the other (and vice versa)\n", + "\n", + "# For source-independent analysis we will use one off region up to 250 GeV. \n", + "# Three off regions for higher energies:\n", + "first_ebin_with_3offs = 7 # i.e. from ~250 GeV onwards\n", + "\n", + "# Intensity cuts:\n", + "min_intensity = 50\n", + "max_intensity = 1e10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae51deb9", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# CUTS TO ENSURE THE QUALITY OF THE REAL DATA EXCESS FROM WHICH THE SENSITIVITY WILL BE COMPUTED!\n", + "#\n", + "# We set here the conditions to ensure that all used histogram bins (defined by Ebin, gammaness cut and \n", + "# angular cut) have, in the data sample, a reliable excess which we can use for estimating sensitivity:\n", + "#\n", + "min_signi = 3 # below this value (significance of the test source, Crab, for the *actual* observation \n", + " # time of the sample and obtained with 1 off region) we ignore the corresponding cut \n", + " # combination\n", + "\n", + "min_exc = 0.002 # in fraction of off. Below this we ignore the corresponding cut combination. \n", + "min_off_events = 10 # minimum number of off events in the actual observation used. Below this we \n", + " # ignore the corresponding cut combination.\n", + "\n", + "# Note that min_exc is set to a pretty low value! In the observations published in the LST1 performance \n", + "# paper the background at low E is stable to 0.5% (i.e. 0.005)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72a7253b", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# For the calculation of the uncertainty of sensitivity:\n", + "backg_syst = 0.01 # relative background systematic uncertainty\n", + "\n", + "backg_normalization = False # generally normalization is not needed, background is uniform enough.\n", + "norm_range = np.array([[0.1, 0.16], [20., 59.8]]) # deg2 for theta2, deg for Alpha" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9574f30b", + "metadata": {}, + "outputs": [], + "source": [ + "sa = LSTEventSource.create_subarray(tel_id=1)\n", + "focal = sa.tel[1].optics.effective_focal_length" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54cf3f44", + "metadata": {}, + "outputs": [], + "source": [ + "# The source position src_x, src_y (in m), stored in \"source_position\", is calculated by \n", + "# lstchain_dl2_add_sourcepos.py using the effective focal length (29.30565 m), which means \n", + "# that it is \"consistent\" with the reco values reco_src_x, reco_src_y (which are affected \n", + "# by the telescope's optical aberration)\n", + "#\n", + "print(focal)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31d1bb8e", + "metadata": {}, + "outputs": [], + "source": [ + "on_events = [None, None]\n", + "off_events = [None, None]\n", + "\n", + "# In on_events and off_events (which are basically 5-axis histograms):\n", + "# axis 0 has two bins. Indicates analysis type. index=0: source-independent; index=1: source-dependent \n", + "#\n", + "# axis 1 has two bins. index=0: even event-id events; index=1: odd-event-id events\n", + "# axis 2 is the energy axis, see logenergy_bins above\n", + "# axis 3 is the gammaness axis, see gammaness_bins above\n", + "# axis 4 is the angular axis (theta or Alpha), see theta2bins and alphabins above\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f930fbb0", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "#%%filprofile\n", + "\n", + "tablename = \"/dl2/event/telescope/parameters/LST_LSTCam\"\n", + "livetimes = []\n", + "\n", + "livetime = 0\n", + "\n", + "\n", + "for ifile, file in enumerate(dataset1):\n", + "\n", + " print(ifile+1, '/', len(dataset1), ': ', file, time.asctime(time.gmtime()))\n", + " \n", + " tb = read_table(file, tablename)\n", + "\n", + " # See above the note on the source_position table!\n", + " tb_extra = read_table(file, \"source_position/table\")\n", + " \n", + " lt, _ = get_effective_time(tb)\n", + " livetime += lt\n", + "\n", + " \n", + " dx = np.rad2deg((tb['reco_src_x']-tb_extra['src_x'])/focal.to_value(u.m))\n", + " dy = np.rad2deg((tb['reco_src_y']-tb_extra['src_y'])/focal.to_value(u.m))\n", + " tb['theta2_on'] = (dx**2 + dy**2).astype('float32')\n", + " \n", + " dx = np.rad2deg((tb['reco_src_x']+tb_extra['src_x'])/focal.to_value(u.m))\n", + " dy = np.rad2deg((tb['reco_src_y']+tb_extra['src_y'])/focal.to_value(u.m))\n", + " tb['theta2_off'] = (dx**2 + dy**2).astype('float32')\n", + "\n", + " dx = np.rad2deg((tb['reco_src_x']+tb_extra['src_y'])/focal.to_value(u.m))\n", + " dy = np.rad2deg((tb['reco_src_y']-tb_extra['src_x'])/focal.to_value(u.m))\n", + " tb['theta2_off_90'] = (dx**2 + dy**2).astype('float32')\n", + " \n", + " dx = np.rad2deg((tb['reco_src_x']-tb_extra['src_y'])/focal.to_value(u.m))\n", + " dy = np.rad2deg((tb['reco_src_y']+tb_extra['src_x'])/focal.to_value(u.m))\n", + " tb['theta2_off_270'] = (dx**2 + dy**2).astype('float32')\n", + " \n", + " tb['odd_or_even'] = (tb['event_id']%2).astype('float32')\n", + "\n", + "\n", + " # Filter to select cosmics (=shower events)\n", + " noped = (tb['event_type'] != EventType.SKY_PEDESTAL.value)\n", + " nocal = (tb['event_type'] != EventType.FLATFIELD.value)\n", + " cosmics = noped & nocal\n", + " \n", + " mask = ((tb['intensity']>min_intensity) & \n", + " (tb['intensity']= first_ebin_with_3offs we fill the off with the average of 3 off regions\n", + " off[:,first_ebin_with_3offs:,:,:] *= 1/3.\n", + " high_e_mask = mask & (tb['log_reco_energy'] >= logenergy_bins[first_ebin_with_3offs])\n", + "\n", + " off += np.histogramdd(np.array([tb['odd_or_even'][high_e_mask],\n", + " tb['log_reco_energy'][high_e_mask].astype('float32'),\n", + " tb['gammaness'][high_e_mask].astype('float32'), \n", + " tb['theta2_off_90'][high_e_mask]]).T, \n", + " bins=[evt_id_bins, logenergy_bins, gammaness_bins, \n", + " theta2bins])[0].astype('float32') / 3.\n", + "\n", + " off += np.histogramdd(np.array([tb['odd_or_even'][high_e_mask],\n", + " tb['log_reco_energy'][high_e_mask].astype('float32'),\n", + " tb['gammaness'][high_e_mask].astype('float32'), \n", + " tb['theta2_off_270'][high_e_mask]]).T, \n", + " bins=[evt_id_bins, logenergy_bins, gammaness_bins, \n", + " theta2bins])[0].astype('float32') / 3.\n", + "\n", + " if off_events[0] is None:\n", + " off_events[0] = off\n", + " else:\n", + " off_events[0] += off\n", + " \n", + " on = None\n", + " off = None\n", + " tb = None\n", + " tb_extra = None\n", + " gc.collect() # memory clean-up\n", + " \n", + "\n", + "print(\"Live time:\", livetime.to(u.h))\n", + "livetimes.append(livetime)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abe15f62", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "#%%filprofile\n", + "# Columns to be kept in the case of source-dependent analysis (again, so save memory):\n", + "\n", + "columns_srcdep = [\"('on', 'expected_src_x')\",\n", + " \"('on', 'expected_src_y')\",\n", + " \"('on', 'alpha')\",\n", + " \"('off_180', 'alpha')\",\n", + " \"('on', 'gammaness')\",\n", + " \"('off_180', 'gammaness')\",\n", + " \"('on', 'reco_energy')\",\n", + " \"('off_180', 'reco_energy')\"]\n", + "columns = ['obs_id', \n", + " 'event_id',\n", + " 'intensity', \n", + " 'event_type',\n", + " 'x', 'y', 'psi']\n", + "\n", + "\n", + "tablename = \"/dl2/event/telescope/parameters/LST_LSTCam\"\n", + "tablename_srcdep = \"/dl2/event/telescope/parameters_src_dependent/LST_LSTCam\"\n", + "\n", + "livetime = 0\n", + "\n", + "on_events[1] = None\n", + "off_events[1] = None\n", + "\n", + "for ifile, file in enumerate(dataset2):\n", + "\n", + " print(ifile+1, '/', len(dataset2), ': ', file, time.asctime(time.gmtime()))\n", + "\n", + " tb = read_table(file, tablename)\n", + " tb['odd_or_even'] = (tb['event_id']%2).astype('float32')\n", + "\n", + " \n", + " lt, _ = get_effective_time(tb)\n", + " livetime += lt\n", + "\n", + " tb_srcdep = read_table(file, tablename_srcdep)\n", + "\n", + " tb_srcdep.rename_columns([\"('on', 'expected_src_x')\", \"('on', 'expected_src_y')\",\n", + " \"('on', 'alpha')\", \"('off_180', 'alpha')\",\n", + " \"('on', 'gammaness')\", \"('off_180', 'gammaness')\",\n", + " \"('on', 'reco_energy')\", \"('off_180', 'reco_energy')\"],\n", + " ['src_x', 'src_y', 'alpha_on', 'alpha_off', \n", + " 'gammaness_on', 'gammaness_off', 'reco_energy_on', 'reco_energy_off'])\n", + "\n", + " noped = (tb['event_type'] != EventType.SKY_PEDESTAL.value)\n", + " nocal = (tb['event_type'] != EventType.FLATFIELD.value)\n", + " cosmics = noped & nocal\n", + " \n", + " mask = ((tb['intensity']>min_intensity) & \n", + " (tb['intensity']250 GeV). We then \"extrapolate\"\n", + "# that background to what we would have in case we really had alpha=0.2...\n", + "\n", + "\n", + "# Assumed (effective) observation time for the calculation (note: it has nothing to do with \n", + "# the actual observation time of the sample used for the calculation!):\n", + "# obs_time = 50 * u.h \n", + "# obs_time = 1/60. * u.h # 1 minute\n", + "# obs_time = 0.083333333 * u.h # 5 minutes\n", + "# obs_time = 0.5 * u.h\n", + "# obs_time = 0.7 * u.h\n", + "# obs_time = 5 * u.h\n", + "\n", + "\n", + "# The 5-sigma condition is so standard that we do not make it configurable! It is hardcoded. \n", + "# Additional sensitivity conditions:\n", + "# min_n_gammas = 10 # Minimum number of excess events \n", + "# min_backg_percent = 5 # Excess must be at least this percentage of the background\n", + "#" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08f077ff", + "metadata": {}, + "outputs": [], + "source": [ + "#################################################################################################\n", + "# #\n", + "# UNCOMMENT BELOW THE SETTINGS YOU WANT TO MODIFY (RELATIVE TO WHAT WAS SET AT THE BEGINNING) #\n", + "# #\n", + "#################################################################################################\n", + "\n", + "#\n", + "# CUTS TO ENSURE THE QUALITY OF THE REAL DATA EXCESS FROM WHICH THE SENSITIVITY WILL BE COMPUTED!\n", + "#\n", + "# We set here the conditions to ensure that all used histogram bins (defined by Ebin, gammaness cut and \n", + "# angular cut) have, in the data sample, a reliable excess which we can use for estimating sensitivity:\n", + "#\n", + "# min_signi = 3 # below this value (significance of the test source, Crab, for the *actual* observation \n", + " # time of the sample and obtained with 1 off region) we ignore the corresponding cut \n", + " # combination\n", + "\n", + "# min_exc = 0.002 # in fraction of off. Below this we ignore the corresponding cut combination. \n", + "# min_off_events = 10 # minimum number of off events in the actual observation used. Below this we \n", + " # ignore the corresponding cut combination.\n", + "\n", + "# Note that min_exc is set to a pretty low value! In the observations published in the LST1 performance \n", + "# paper the background at low E is stable to 0.5% (i.e. 0.005)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43260673", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# For the calculation of the uncertainty of sensitivity:\n", + "backg_syst = 0.01 # relative background systematic uncertainty\n", + "\n", + "backg_normalization = False # generally normalization is not needed, background is uniform enough.\n", + "norm_range = np.array([[0.1, 0.16], [20., 59.8]]) # deg2 for theta2, deg for Alpha" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52ff388d", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Function to compute the fraction of the Crab flux that results in a given significance, for the \n", + "# observation time and Li&Ma's alpha set above.\n", + "# This is based on the observed excess and background (cumul_excess and cumul_off). This is done for all \n", + "# bins of cumul_excess and cumul_off, that is, for all possible gammaness and theta2/Alpha cuts\n", + "#\n", + "# In order to get more reliable results:\n", + "# We can require the *actually observed* excess to be a minimum fraction of the background (min_exc)\n", + "# We can require the *actually observed* excess to have a significance of at least min_signi (computed\n", + "# assuming just 1 off region)\n", + "# By \"actually observed\" we mean that it is not the excess (or significance) extrapolated for a \n", + "# 50h-observation or whatever, but the actually obtained result in the input data sample.\n", + "#\n", + "def calc_flux_for_N_sigma(N_sigma, cumul_excess, cumul_off, \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, actual_obs_time):\n", + " \n", + " time_factor = target_obs_time.to_value(u.h) / actual_obs_time.to_value(u.h)\n", + "\n", + " start_flux = 1\n", + " flux_factor = start_flux * np.ones_like(cumul_excess)\n", + "\n", + " good_bin_mask = ((cumul_excess > min_exc*cumul_off) &\n", + " (cumul_off > min_off_events))\n", + "\n", + " flux_factor = np.where(good_bin_mask, flux_factor, np.nan)\n", + " \n", + " # First calculate significance (with 1 off) of the excesses in the provided sample, with no scaling.\n", + " # We will only use the cut combinations where we have at least min_signi sigmas to begin with...\n", + " # NOTE!!! float64 precision is essential for the arguments of li_ma_significance!\n", + "\n", + " lima_signi = li_ma_significance((flux_factor*cumul_excess + cumul_off).astype('float64'), \n", + " cumul_off.astype('float64'), \n", + " alpha=1)\n", + " \n", + " # Set nan in bins where we do not reach min_signi:\n", + " flux_factor = np.where(lima_signi > min_signi, flux_factor, np.nan)\n", + "\n", + " \n", + " # Now calculate the significance for the target observation time_\n", + " lima_signi = li_ma_significance((time_factor*(flux_factor*cumul_excess +\n", + " cumul_off)).astype('float64'), \n", + " (time_factor*cumul_off/alpha).astype('float64'), \n", + " alpha=alpha)\n", + "\n", + " \n", + " # iterate to obtain the flux which gives exactly N_sigma:\n", + " for iter in range(4):\n", + " # print(iter)\n", + " tolerance_mask = np.abs(lima_signi-N_sigma)>0.001 # recalculate only what is needed\n", + " flux_factor[tolerance_mask] *= (N_sigma / lima_signi[tolerance_mask])\n", + " # NOTE!!! float64 precision is essential here!!!!\n", + " lima_signi[tolerance_mask] = li_ma_significance((time_factor*(flux_factor[tolerance_mask]*\n", + " cumul_excess[tolerance_mask]+\n", + " cumul_off[tolerance_mask])).astype('float64'), \n", + " (time_factor*cumul_off[tolerance_mask]/alpha).astype('float64'), \n", + " alpha=alpha)\n", + " return flux_factor, lima_signi" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56ccff4b", + "metadata": {}, + "outputs": [], + "source": [ + "#%%filprofile\n", + "\n", + "lima_signi = [[], []]\n", + "flux_for_5_sigma = [[], []]\n", + "\n", + "# Compute the flux (in Crab fraction), for all cuts, which results in 5 sigma for the sensitivity conditions\n", + "# defined above:\n", + "\n", + "for k in range(2):\n", + " flux_for_5_sigma[k], lima_signi[k] = calc_flux_for_N_sigma(5, cum_excess_events[k], cum_off_events[k], \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " obs_time, livetimes[k]*0.5)\n", + "# NOTE: livetime is divided by 2 because calculation is done separately for the odd- and even-event_id samples!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5d143b2", + "metadata": {}, + "outputs": [], + "source": [ + "# Now make sure we only consider bins with valid flux in *both* samples (odd- and even-events).\n", + "# This is because we will use the optimal cuts obtained with odd- events to apply them to even- events,\n", + "# and vice-versa. If the flux is nan for one of the two, we set it to nan for both.\n", + "\n", + "for k in range(2):\n", + " mask = np.isnan(flux_for_5_sigma[k][0]) | np.isnan(flux_for_5_sigma[k][1])\n", + " flux_for_5_sigma[k] = np.where(mask, np.nan, flux_for_5_sigma[k])\n", + " lima_signi[k] = np.where(mask, np.nan, lima_signi[k])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00f673ca", + "metadata": {}, + "outputs": [], + "source": [ + "# Check that significances are indeed 5 (or very close - they come from an interative procedure)\n", + "# Each entry in the histograms is a cut combination.\n", + "\n", + "plt.hist(lima_signi[0].flatten(), bins=500, range=(0, 10), log=True)\n", + "plt.xlabel('Li & Ma significance')\n", + "plt.show()\n", + "\n", + "plt.hist(lima_signi[1].flatten(), bins=500, range=(0, 10), log=True)\n", + "plt.xlabel('Li & Ma significance')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0153c33", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Function to calculate the flux needed to obtain a given excess, for all cut combinations:\n", + "#\n", + "def calc_flux_for_N_excess(N_excess, cumul_excess, target_obs_time, actual_obs_time):\n", + " time_factor = target_obs_time.to_value(u.h) / actual_obs_time.to_value(u.h)\n", + " return (N_excess/(cumul_excess*time_factor))\n", + "\n", + "#\n", + "# Do the computation (JUST FOR TEST!!):\n", + "#\n", + "# flux_for_10_excess = [[], []]\n", + "# for k in range(2):\n", + "# flux_for_10_excess[k] = calc_flux_for_N_excess(min_n_gammas, cum_excess_events[k], obs_time, livetimes[k]*0.5)\n", + "# # livetime divided by 2 because calculation is done separately for the odd- and even-event_id samples!\n", + " \n", + "# flux_for_10_excess = [np.where(np.isnan(flux_for_5_sigma[0]), np.nan, flux_for_10_excess[0]), \n", + "# np.where(np.isnan(flux_for_5_sigma[1]), np.nan, flux_for_10_excess[1])]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e37ef03", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Function to calculate the flux needed to obtain a given percent of the background, \n", + "# for all cut combinations:\n", + "#\n", + "def calc_flux_for_x_percent_backg(percent, cumul_excess, cumul_off):\n", + " # In fraction of the flux of the test source (Crab):\n", + " return percent/100*cumul_off/cumul_excess\n", + "\n", + "#\n", + "# Do the computation (JUST FOR TEST!!):\n", + "#\n", + "# flux_for_5percent_backg = [[], []]\n", + "# for k in range(2):\n", + "# flux_for_5percent_backg[k] = calc_flux_for_x_percent_backg(min_backg_percent, cum_excess_events[k], \n", + "# cum_off_events[k])\n", + "\n", + "# flux_for_5percent_backg = [np.where(np.isnan(flux_for_5_sigma[0]), np.nan, flux_for_5percent_backg[0]), \n", + "# np.where(np.isnan(flux_for_5_sigma[1]), np.nan, flux_for_5percent_backg[1])]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7697be5a", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Function to calculate the flux (for all cut combinations) which fulfills\n", + "# all 3 conditions of sensitivity (standard: at least 5-sigma significance, \n", + "# at least 10 excess events, and the excess being at least 5% of the background):\n", + "#\n", + "def calc_flux_3conditions(cumul_excess, cumul_off, \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, actual_obs_time,\n", + " min_n_gammas, min_backg_percent):\n", + " \n", + " f1, _ = calc_flux_for_N_sigma(5, cumul_excess, cumul_off, \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, actual_obs_time)\n", + " f2 = 0\n", + " f3 = 0\n", + " \n", + " if min_n_gammas > 0:\n", + " f2 = calc_flux_for_N_excess(min_n_gammas, cumul_excess, target_obs_time, actual_obs_time)\n", + " if min_backg_percent > 0:\n", + " f3 = calc_flux_for_x_percent_backg(min_backg_percent, cumul_excess, cumul_off)\n", + " \n", + " return np.maximum(np.maximum(f1, f2), f3) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "309e3136", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Definitions of min_signi, min_exc and min_off_events (to consider a cut combination valid) are above\n", + "#\n", + "# Do the computation of the detectable flux (for all cut combinations) according to the sensitivity conditions:\n", + "#\n", + "detectable_flux = [[],[]]\n", + "\n", + "#Minimum flux fulfilling all three conditions:\n", + "for k in range(2):\n", + " detectable_flux[k] = calc_flux_3conditions(cum_excess_events[k], cum_off_events[k],\n", + " min_signi, min_exc, min_off_events, alpha,\n", + " obs_time, livetimes[k]*0.5,\n", + " min_n_gammas, min_backg_percent)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e531e63b", + "metadata": {}, + "outputs": [], + "source": [ + "# Just in case, make sure we only consider bins with valid flux in *both* samples (odd- and even-events):\n", + "for k in range(2):\n", + " mask = np.isnan(detectable_flux[k][0]) | np.isnan(detectable_flux[k][1])\n", + " detectable_flux[k] = np.where(mask, np.nan, detectable_flux[k])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab8551c6", + "metadata": {}, + "outputs": [], + "source": [ + "sensitivity = [[[], []], [[], []]] # [analysis_type, odd_or_even, energy]\n", + "reco_energy = [[[], []], [[], []]]\n", + "signi = [[[], []], [[], []]]\n", + "\n", + "cut_indices = [[[], []], [[], []]] # [analysis_type, odd_or_even, energy]\n", + "\n", + "# Tweak: sometimes the minimization at low Ereco ends up with very tight alpha cut \n", + "# just because of a fluke... We try to avoid it here, by setting minimum \"reasonable\" \n", + "# cut values for the low-E bins:\n", + "#\n", + "min_angle_cut = [np.zeros(len(logenergy_bins)-1), np.zeros(len(logenergy_bins)-1)]\n", + "min_angle_cut[0][:5] = 0.02\n", + "min_angle_cut[1][:5] = 5\n", + "\n", + "# Now, in each subsample (odd/even event_id's) we will find which cuts provide the best sensitivity\n", + "# (= minimum detectable flux), and will apply thoise cuts to the *other* subsample. \n", + "\n", + "for analysis_type in range(2): # source-independent and source-dependent analyses\n", + " for even_or_odd in range(2):\n", + " # We optimize the cuts with the sample indicated by even_or_odd,\n", + " # and apply them on the complementary sample, \"other_half\":\n", + " if even_or_odd == 0:\n", + " other_half = 1\n", + " else:\n", + " other_half = 0\n", + "\n", + " for iebin in range(len(logenergy_bins)-1):\n", + " reco_energy[analysis_type][other_half].append(10**(0.5*(logenergy_bins[iebin]+logenergy_bins[iebin+1])))\n", + "\n", + " # Now find the cuts which provide the minimum detectable flux using the events with odd event_id.\n", + "\n", + " # Except if we have only nan values... :\n", + " if np.sum(~np.isnan(detectable_flux[analysis_type][even_or_odd][iebin])) == 0:\n", + " sensitivity[analysis_type][other_half].append(np.nan)\n", + " signi[analysis_type][other_half].append(np.nan)\n", + " cut_indices[analysis_type][other_half].append([0, 0])\n", + " continue\n", + "\n", + " if analysis_type == 0:\n", + " start_bin = np.where(theta2bins>min_angle_cut[0][iebin])[0][0] - 1\n", + " else:\n", + " start_bin = np.where(alphabins>min_angle_cut[1][iebin])[0][0] - 1\n", + " \n", + " index = np.nanargmin(detectable_flux[analysis_type][even_or_odd][iebin,:,start_bin:]) \n", + "\n", + " indices = list(np.unravel_index(index, \n", + " detectable_flux[analysis_type][even_or_odd][iebin, :, start_bin:].shape))\n", + " indices[1] += start_bin\n", + " \n", + " # Now get & store the minimum detectable flux with these cuts but using the other half of the events\n", + " # Keep also the best-cut indices for later use\n", + "\n", + " sensitivity[analysis_type][other_half].append(detectable_flux[analysis_type][other_half][iebin, indices[0], indices[1]])\n", + " signi[analysis_type][other_half].append(lima_signi[analysis_type][other_half][iebin, indices[0], indices[1]])\n", + " cut_indices[analysis_type][other_half].append(indices)\n", + "\n", + " sensitivity[analysis_type][other_half] = np.array(sensitivity[analysis_type][other_half])\n", + " reco_energy[analysis_type][other_half] = np.array(reco_energy[analysis_type][other_half])\n", + " signi[analysis_type][other_half] = np.array(signi[analysis_type][other_half])\n", + " cut_indices[analysis_type][other_half] = np.array(cut_indices[analysis_type][other_half])\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc4643a9", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# MAGIC sensitivity in Crab fraction:\n", + "#\n", + "def plot_MAGIC_sensitivity_fraction():\n", + " s = np.loadtxt('magic_sensitivity.txt', skiprows = 1)\n", + " energy = (s[:,0] * u.GeV).to(u.TeV)\n", + " percentage = s[:,5]\n", + " plt.plot(energy, percentage/100., '-.', label='MAGIC (stereo) [Aleksić et al. 2016]', color='tab:green')\n", + " return" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb4586cd", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10,4))\n", + "plt.scatter(reco_energy[0][1], sensitivity[0][1], marker='o', facecolors='tab:blue', edgecolors='tab:blue',\n", + " label=tag1+\", odd id\")\n", + "plt.scatter(reco_energy[0][0], sensitivity[0][0], marker='o', facecolors='none', edgecolors='tab:blue',\n", + " label=tag1+\", even id\")\n", + "\n", + "plt.scatter(reco_energy[1][1], sensitivity[1][1], marker='o', facecolors='tab:orange', edgecolors='tab:orange',\n", + " label=tag2+\", odd id\")\n", + "plt.scatter(reco_energy[1][0], sensitivity[1][0], marker='o', facecolors='none', edgecolors='tab:orange',\n", + " label=tag2+\", even id\")\n", + "\n", + "plt.yscale('log')\n", + "plt.xscale('log')\n", + "\n", + "plt.ylim(0.005, 10)\n", + "plt.xlim(0.01, 200)\n", + "plt.ylabel(\"Fraction of Crab flux\")\n", + "plt.xlabel(\"Reconstructed energy / TeV\")\n", + "\n", + "plot_MAGIC_sensitivity_fraction()\n", + "\n", + "plt.legend()\n", + "plt.grid()\n", + "plt.show() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a8e5be4", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10,4))\n", + "plt.scatter(reco_energy[0][1], 0.5*(sensitivity[0][1]+sensitivity[0][0]), \n", + " marker='o', facecolors='tab:blue', edgecolors='tab:blue',\n", + " label=tag1)\n", + "\n", + "plt.scatter(reco_energy[1][1], 0.5*(sensitivity[1][1]+sensitivity[1][0]), marker='o', facecolors='tab:orange', edgecolors='tab:orange',\n", + " label=tag2)\n", + "\n", + "plt.yscale('log')\n", + "plt.xscale('log')\n", + "\n", + "plt.ylim(0.005, 5)\n", + "plt.xlim(0.01, 200)\n", + "plt.ylabel(\"Fraction of Crab flux\")\n", + "plt.xlabel(\"Reconstructed energy / TeV\")\n", + "\n", + "plot_MAGIC_sensitivity_fraction()\n", + "\n", + "plt.legend(loc='lower right')\n", + "plt.grid()\n", + "plt.show() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4ae541e", + "metadata": {}, + "outputs": [], + "source": [ + "# Check the significance for the optimal cuts, for all the energy bins:\n", + "plt.figure(figsize=(15,6))\n", + "plt.scatter(reco_energy[0][1], signi[0][1], label=tag1+\", odd id\")\n", + "plt.scatter(reco_energy[1][1], signi[1][1], label=tag2+\", odd id\")\n", + "plt.scatter(reco_energy[0][1], signi[0][0], label=tag1+\", even id\")\n", + "plt.scatter(reco_energy[1][1], signi[1][0], label=tag2+\", even id\")\n", + "\n", + "plt.xscale('log')\n", + "\n", + "plt.ylim(2, 8)\n", + "plt.xlim(0.01, 100)\n", + "plt.legend()\n", + "plt.xlabel('Reconstructed energy / TeV')\n", + "plt.grid()\n", + "plt.show() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c453d86a", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# Just a test plot to see how the minimum flux is found on even-numbered events, and applied to odd-numbered events\n", + "energyid = 2 # for energy bin\n", + "analysis_type = 0 # 0 source-independent, 1 source-dependent\n", + "\n", + "print(10**(0.5*(logenergy_bins[energyid]+logenergy_bins[energyid+1])), \"TeV\")\n", + "\n", + "if analysis_type == 0:\n", + " start_bin = np.where(theta2bins>min_angle_cut[0][energyid])[0][0] - 1\n", + "else:\n", + " start_bin = np.where(alphabins>min_angle_cut[1][energyid])[0][0] - 1\n", + "\n", + "index = np.nanargmin(detectable_flux[analysis_type][0][energyid, :, start_bin:])\n", + "indices = list(np.unravel_index(index, detectable_flux[analysis_type][0][energyid, :, start_bin:].shape))\n", + "indices[1] += start_bin\n", + "# convert to indices in gammaness axis & angle axis\n", + "\n", + "print(\"Minimum:\", np.nanmin(detectable_flux[analysis_type][0][energyid, :, start_bin:]))\n", + "print(detectable_flux[analysis_type][0][energyid, indices[0], indices[1]])\n", + "print(detectable_flux[analysis_type][0][energyid, \n", + " indices[0]-3:indices[0]+4, \n", + " indices[1]-3:indices[1]+4])\n", + "\n", + "gammaness_cut = gammaness_bins[indices[0]]\n", + "angular_cut = theta2bins[indices[1]+1] # +1 because we want the bin's upper edge\n", + "if analysis_type == 1:\n", + " angular_cut = alphabins[indices[1]+1]\n", + "\n", + "print()\n", + "print(\"With the sample of even-numbered events:\")\n", + "print('Minimum flux, gammaness & angular cut bins:', indices)\n", + "print('Cut values and minimum detectable flux (in fraction of Crab):',\n", + " gammaness_cut, angular_cut, detectable_flux[analysis_type][0][energyid, indices[0], indices[1]])\n", + "print('Excess, Off events (for input sample), Li & Ma significance (in target t_obs for detectable flux):')\n", + "print(' ', cum_excess_events[analysis_type][0][energyid, indices[0], indices[1]],\n", + " cum_off_events[analysis_type][0][energyid, indices[0], indices[1]],\n", + " f'{lima_signi[analysis_type][0][energyid, indices[0], indices[1]]:.3f}')\n", + "\n", + "plt.figure(figsize=(16,4))\n", + "if analysis_type == 0:\n", + " plt.pcolormesh(theta2bins, gammaness_bins, detectable_flux[analysis_type][0][energyid], norm=colors.LogNorm())\n", + "else:\n", + " plt.pcolormesh(alphabins, gammaness_bins, detectable_flux[analysis_type][0][energyid], norm=colors.LogNorm())\n", + "\n", + "plt.colorbar()\n", + "plt.scatter([angular_cut], [gammaness_cut], marker='o', facecolors='none', edgecolors='red')\n", + "\n", + "plt.ylabel('gammaness cut')\n", + "plt.xlabel('angular cut')\n", + "\n", + "# plt.xlim(0, 0.5)\n", + "# plt.ylim(0.65, 0.75)\n", + "plt.show()\n", + "\n", + "plt.figure(figsize=(16,4))\n", + "if analysis_type == 0:\n", + " plt.pcolormesh(theta2bins, gammaness_bins, detectable_flux[analysis_type][1][energyid], norm=colors.LogNorm())\n", + "else:\n", + " plt.pcolormesh(alphabins, gammaness_bins, detectable_flux[analysis_type][1][energyid], norm=colors.LogNorm())\n", + "plt.colorbar()\n", + "plt.scatter([angular_cut], [gammaness_cut], marker='o', facecolors='none', edgecolors='red')\n", + "\n", + "plt.ylabel('gammaness cut')\n", + "plt.xlabel('angular cut')\n", + "# plt.xlim(0, 0.5)\n", + "# plt.ylim(0.65, 0.75)\n", + "plt.show()\n", + "\n", + "\n", + "print('With the sample of odd-numbered events:')\n", + "print('Applied cuts (from the other sample) and minimum detectable flux (in fraction of Crab):',\n", + " gammaness_cut, angular_cut, detectable_flux[analysis_type][1][energyid, indices[0], indices[1]])\n", + "print('Excess, Off events (for input sample), Li & Ma significance (in target t_obs for detectable flux):')\n", + "print(' ', cum_excess_events[analysis_type][1][energyid, indices[0], indices[1]],\n", + " cum_off_events[analysis_type][1][energyid, indices[0], indices[1]],\n", + " f'{lima_signi[analysis_type][1][energyid, indices[0], indices[1]]:.3f}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75fd73b2", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Function to find the cut indices (i.e. bin indices of the histos) which correspond to certain cuts\n", + "#\n", + "def find_bin_indices(gcut, angcut, analysis_type):\n", + " # Find bin edge which is closest to the cut value:\n", + " if analysis_type == 0:\n", + " angcut_index = np.nanargmin(np.abs(theta2bins-angcut)) - 1 \n", + " else:\n", + " angcut_index = np.nanargmin(np.abs(alphabins-angcut)) - 1 \n", + "\n", + " gcut_index = np.nanargmin(np.abs(gammaness_bins-gcut))\n", + "\n", + " return gcut_index, angcut_index " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05d25fe7", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Plot a rebinned histogram (for better visualization)\n", + "#\n", + "def plot_rebinned(x, y, yerr, rebin, label):\n", + " xx = np.array([0.5*(x[i]+x[i+rebin]) for i in range(0, len(x)-1, rebin)])\n", + " yy = np.array([np.sum(y[i:i+rebin]) for i in range(0, len(y)-1, rebin)])\n", + " yyerr = np.array([(np.sum(yerr[i:i+rebin]**2))**0.5 for i in range(0, len(yerr)-1, rebin)])\n", + " plt.errorbar(xx, yy, yerr=yyerr, fmt='o', markersize=3, label=label)\n", + " return xx, yy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14b1ec00", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "#\n", + "# Now we recalculate the sensitivities, and also check individual theta2 / Alphaplots:\n", + "#\n", + "\n", + "target_obs_time = obs_time # = the same for which the cut optimization was done\n", + "\n", + "# target_obs_time = 0.5 * u.h # CHANGE ONLY IN CASE YOU WANT TO CALCULATE SENSITIVITY \n", + " # FOR DIFFERENT T_OBS, BUT *WITHOUT* RE-OPTIMIZING CUTS!!\n", + "\n", + "\n", + "\n", + "norm_bins = np.array([[np.where(theta2bins>norm_range[0][0])[0][0],\n", + " np.where(theta2bins>norm_range[0][1])[0][0]],\n", + " [np.where(alphabins>norm_range[1][0])[0][0],\n", + " np.where(alphabins>norm_range[1][1])[0][0]]\n", + " ])\n", + "\n", + "\n", + "\n", + "rebin_factor = np.array((len(logenergy_bins)-1)*[20]) # join bins in groups of rebin_factor, to make plots less noisy.\n", + "#rebin_factor = np.array((len(logenergy_bins)-1)*[60]) # join bins in groups of rebin_factor, to make plots less noisy.\n", + "\n", + "rebin_factor[:3] = 120\n", + "rebin_factor[3:5] = 60\n", + "rebin_factor[14:] = 60\n", + "\n", + "sensitivity = np.empty((2, len(logenergy_bins)-1))\n", + "sensitivity[:] = np.nan\n", + "\n", + "delta_sensitivity = np.empty((2, 2, len(logenergy_bins)-1)) # separate upper and lower error bars\n", + "delta_sensitivity[:] = np.nan\n", + "\n", + "# For 5-sigma condition only: \n", + "# (NOTE! This is with the cuts optimized using all 3 conditions! In order to obtain the best sensitivity\n", + "# for just the 5 sigma condition, one has to set min_n_gammas=0 and min_backg_percent=0 BEFORE the cut \n", + "# optimization!)\n", + "sensitivity_5s = np.zeros_like(sensitivity)\n", + "delta_sensitivity_5s = np.zeros_like(delta_sensitivity)\n", + "sensitivity_5s[:] = np.nan\n", + "delta_sensitivity_5s[:] = np.nan\n", + "\n", + "reco_energy = np.zeros_like(sensitivity)\n", + "num_excess_events = np.zeros_like(sensitivity)\n", + "num_off_events = np.zeros_like(sensitivity)\n", + "reco_energy[:] = np.nan\n", + "num_excess_events[:] = np.nan\n", + "num_off_events[:] = np.nan\n", + "\n", + "angular_cut = np.empty((2, 2, len(logenergy_bins)-1))\n", + "gammaness_cut = np.empty((2, 2, len(logenergy_bins)-1)) # analysis_type, odd_or_even, energy\n", + "angular_cut[:] = np.nan\n", + "gammaness_cut[:] = np.nan\n", + "\n", + "\n", + "for iebin in range(len(logenergy_bins)-1):\n", + "\n", + " recoE = 10**(0.5*(logenergy_bins[iebin]+logenergy_bins[iebin+1]))\n", + " reco_energy[0][iebin] = recoE\n", + " reco_energy[1][iebin] = recoE\n", + "\n", + " \n", + " if (recoE < 0.016) | (recoE > 16):\n", + " continue\n", + " \n", + " print(f'Energy: {recoE:.4f} Tev') \n", + " \n", + " fig = plt.figure(figsize=(16, 5))\n", + " \n", + " for analysis_type in range(2):\n", + " \n", + " indices0 = cut_indices[analysis_type][0][iebin]\n", + " indices1 = cut_indices[analysis_type][1][iebin]\n", + " # indices0 and indices1 have 2 elements each: [0] is the gammaness cut, [1] is the angular cut\n", + "\n", + " if (indices0>0).all() and (indices1>0).all():\n", + " # Valid gammanness & angular cuts (cut indices ==0 means no valid cuts could be determined!)\n", + " nevts_on = (np.sum(on_events[analysis_type][0, iebin, indices0[0]:], axis=0) + \n", + " np.sum(on_events[analysis_type][1, iebin, indices1[0]:], axis=0))\n", + "\n", + " nevts_off = (np.sum(off_events[analysis_type][0, iebin, indices0[0]:], axis=0) + \n", + " np.sum(off_events[analysis_type][1, iebin, indices1[0]:], axis=0))\n", + " else:\n", + " nevts_on = None\n", + " nevts_off = None\n", + " \n", + " if analysis_type == 0:\n", + " fig.add_subplot(1, 2, 1+analysis_type)\n", + "\n", + " if nevts_on is None:\n", + " print('No valid cuts found for source-independent analysis!')\n", + " continue\n", + " else:\n", + " print(f'Gammaness cuts: {theta2bins[indices0[0]+1]:.4f}, {theta2bins[indices1[0]+1]:.4f}')\n", + " print(f'Theta2 cuts: {theta2bins[indices0[1]+1]:.4f}, {theta2bins[indices1[1]+1]:.4f}')\n", + " \n", + " xx, yy = plot_rebinned(theta2bins, nevts_on, nevts_on**0.5, rebin_factor[iebin], '')\n", + " xxoff, yyoff = plot_rebinned(theta2bins, nevts_off, nevts_off**0.5, rebin_factor[iebin], '')\n", + "\n", + " plt.plot([theta2bins[indices0[1]+1], theta2bins[indices0[1]+1]], \n", + " [0, yy[int((indices0[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')\n", + " plt.plot([theta2bins[indices1[1]+1], theta2bins[indices1[1]+1]], \n", + " [0, yy[int((indices1[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')\n", + "\n", + " plt.xlim(0, 0.2)\n", + " plt.ylim(yyoff.min()*0.9, yy.max()*1.1)\n", + " plt.xlabel('Theta2 (deg2)')\n", + " plt.ylabel('Events')\n", + "\n", + " angular_cut[analysis_type][0][iebin] = theta2bins[indices0[1]+1]\n", + " angular_cut[analysis_type][1][iebin] = theta2bins[indices1[1]+1]\n", + "\n", + " else:\n", + " fig.add_subplot(1, 2, 1+analysis_type)\n", + " \n", + " if nevts_on is None:\n", + " print('No valid cuts found for source-dependent analysis!')\n", + " continue\n", + " else:\n", + " print(f'Alpha cuts: {alphabins[indices0[1]+1]:.2f}, {alphabins[indices1[1]+1]:.2f}')\n", + " \n", + " xx, yy = plot_rebinned(alphabins, nevts_on, nevts_on**0.5, rebin_factor[iebin], '')\n", + " xxoff, yyoff = plot_rebinned(alphabins, nevts_off, nevts_off**0.5, rebin_factor[iebin], '')\n", + "\n", + " plt.plot([alphabins[indices0[1]+1], alphabins[indices0[1]+1]], \n", + " [0, yy[int((indices0[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')\n", + " plt.plot([alphabins[indices1[1]+1], alphabins[indices1[1]+1]], \n", + " [0, yy[int((indices1[1]+1)/rebin_factor[iebin])]], '--', color='tab:green')\n", + " plt.xlim(0, 60)\n", + " plt.ylim(yyoff.min()*0.9, yy.max()*1.1)\n", + " plt.xlabel('Alpha (deg)')\n", + " plt.ylabel('Events')\n", + "\n", + " angular_cut[analysis_type][0][iebin] = alphabins[indices0[1]+1]\n", + " angular_cut[analysis_type][1][iebin] = alphabins[indices1[1]+1]\n", + "\n", + " # Add up the backg numbers (odd and even events) in the normalization region, and the excess:\n", + " off_in_norm_region = (cum_off_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][1]] +\n", + " cum_off_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][1]] -\n", + " cum_off_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][0]] -\n", + " cum_off_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][0]]\n", + " )\n", + " excess_in_norm_region = (cum_excess_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][1]] +\n", + " cum_excess_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][1]] -\n", + " cum_excess_events[analysis_type][0, iebin, indices0[0], norm_bins[analysis_type][0]] -\n", + " cum_excess_events[analysis_type][1, iebin, indices1[0], norm_bins[analysis_type][0]]\n", + " )\n", + " off_norm_factor = 1\n", + " if backg_normalization:\n", + " off_norm_factor = (off_in_norm_region + excess_in_norm_region) / off_in_norm_region\n", + " print('Off normalization for analysis type', analysis_type, ':', off_norm_factor)\n", + "\n", + " norm_min = (((off_in_norm_region + excess_in_norm_region) - \n", + " (off_in_norm_region + excess_in_norm_region)**0.5) / \n", + " (off_in_norm_region + off_in_norm_region**0.5))\n", + " norm_max = (((off_in_norm_region + excess_in_norm_region) + \n", + " (off_in_norm_region + excess_in_norm_region)**0.5) / \n", + " (off_in_norm_region - off_in_norm_region**0.5))\n", + " print(f' {norm_min:.4f} to {norm_max:.4f}')\n", + "\n", + "\n", + " gammaness_cut[analysis_type][0][iebin] = gammaness_bins[indices0[0]]\n", + " gammaness_cut[analysis_type][1][iebin] = gammaness_bins[indices1[0]]\n", + "\n", + " # Add up the excess (and the off) for odd and even event_id's\n", + " nexcess = (cum_excess_events[analysis_type][0, iebin, indices0[0], indices0[1]] +\n", + " cum_excess_events[analysis_type][1, iebin, indices1[0], indices1[1]])\n", + " noff = (cum_off_events[analysis_type][0, iebin, indices0[0], indices0[1]] +\n", + " cum_off_events[analysis_type][1, iebin, indices1[0], indices1[1]])\n", + "\n", + "\n", + " nexcess = nexcess + noff * (1 - off_norm_factor)\n", + " noff = noff * off_norm_factor\n", + "\n", + "\n", + " flux = calc_flux_3conditions(np.array([nexcess]), np.array([noff]), \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " min_n_gammas, min_backg_percent)\n", + "\n", + " if analysis_type == 0:\n", + " print(f'Results (source-indep): Nexc={nexcess}, Noff={noff}, Flux/CU={flux[0]:.4f}')\n", + " else:\n", + " print(f'Results (source-dep): Nexc={nexcess}, Noff={noff}, Flux/CU={flux[0]:.4f}')\n", + "\n", + " sensitivity[analysis_type][iebin] = flux[0]\n", + "\n", + " # Assume background systematics and statistical fluctuation of excess go in same direction:\n", + " max_excess = nexcess + backg_syst * noff + (nexcess + 2*noff)**0.5\n", + " min_excess = nexcess - backg_syst * noff - (nexcess + 2*noff)**0.5\n", + "\n", + " flux_minus = calc_flux_3conditions(np.array([max_excess]), \n", + " np.array([noff]), \n", + " 0, 0, 0, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " min_n_gammas, min_backg_percent)\n", + " flux_plus = calc_flux_3conditions(np.array([min_excess]), \n", + " np.array([noff]), \n", + " 0, 0, 0, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " min_n_gammas, min_backg_percent)\n", + "\n", + " delta_sensitivity[analysis_type][1][iebin] = flux_plus[0] - flux[0]\n", + " delta_sensitivity[analysis_type][0][iebin] = flux[0] - flux_minus[0]\n", + "\n", + "\n", + " # Now only with the 5-sigma condition (remove req for 10 excess events & 5% of backg):\n", + "\n", + " flux_5s = calc_flux_3conditions(np.array([nexcess]), np.array([noff]), \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " 0, 0)\n", + " sensitivity_5s[analysis_type][iebin] = flux_5s[0]\n", + " flux_5s_minus = calc_flux_3conditions(np.array([max_excess]), \n", + " np.array([noff]), \n", + " 0, 0, 0, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " 0, 0)\n", + " flux_5s_plus = calc_flux_3conditions(np.array([min_excess]), \n", + " np.array([noff]), \n", + " 0, 0, 0, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " 0, 0)\n", + "\n", + " delta_sensitivity_5s[analysis_type][1][iebin] = flux_5s_plus[0] - flux_5s[0]\n", + " delta_sensitivity_5s[analysis_type][0][iebin] = flux_5s[0] - flux_5s_minus[0]\n", + "\n", + "\n", + " num_excess_events[analysis_type][iebin] = nexcess\n", + " num_off_events[analysis_type][iebin] = noff\n", + "\n", + " # plt.ylim(0, 250)\n", + "\n", + " plt.grid()\n", + " # plt.legend()\n", + "\n", + " plt.show()\n", + " print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b1805e1", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# FINAL SENSITIVITY PLOTS\n", + "#\n", + "plt.figure(figsize=(8,6))\n", + "\n", + "\n", + "plt.fill_between(reco_energy[0][:-1], \n", + " (sensitivity[0]-delta_sensitivity[0][0])[:-1], \n", + " (sensitivity[0]+delta_sensitivity[0][1])[:-1], alpha=0.4, color='tab:blue')\n", + "\n", + "\n", + "plt.fill_between(reco_energy[1][:-1], \n", + " (sensitivity[1]-delta_sensitivity[1][0])[:-1], \n", + " (sensitivity[1]+delta_sensitivity[1][1])[:-1], alpha=0.4, color='tab:orange')\n", + "\n", + "\n", + "plt.errorbar(reco_energy[0], sensitivity[0],\n", + " yerr=delta_sensitivity[0], marker='o', color='tab:blue', ls='none', markersize=4,\n", + " label='LST-1 (source-independent)')\n", + "\n", + "plt.errorbar(reco_energy[1], sensitivity[1],\n", + " yerr=delta_sensitivity[1], marker='o', color='tab:orange', ls='none', markersize=4,\n", + " label='LST-1 (source-dependent)')\n", + "\n", + "\n", + "plot_MAGIC_sensitivity_fraction()\n", + "\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.xlabel('E$_{reco}$ / TeV')\n", + "plt.ylabel('Fraction of Crab nebula flux')\n", + "plt.ylim(0.01, 50)\n", + "plt.xlim(0.02, 12)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4443a891", + "metadata": {}, + "outputs": [], + "source": [ + "# Write out the results:\n", + "Output_filename = \"Sensitivity_output.csv\"\n", + "np.savetxt(Output_filename, [reco_energy[0], \n", + " sensitivity[0], \n", + " delta_sensitivity[0][0],\n", + " delta_sensitivity[0][1],\n", + " sensitivity[1], \n", + " delta_sensitivity[1][0],\n", + " delta_sensitivity[1][1],\n", + " sensitivity_5s[0], \n", + " delta_sensitivity_5s[0][0],\n", + " delta_sensitivity_5s[0][1],\n", + " sensitivity_5s[1], \n", + " delta_sensitivity_5s[1][0],\n", + " delta_sensitivity_5s[1][1]],\n", + " fmt='%.5e', delimiter=',',\n", + " header = 'E(TeV) SI_sensitivity, SI_delta_sensitivity_low, SI_delta_sensitivity_high, '+\n", + " 'SD_sensitivity, SD_delta_sensitivity_low, SD_delta_sensitivity_high, '+\n", + " 'SI_sensitivity_5s, SI_delta_sensitivity_5s_low, SI_delta_sensitivity_5s_high, '+\n", + " 'SD_sensitivity_5s, SD_delta_sensitivity_5s_low, SD_delta_sensitivity_5s_high')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3a86ac5", + "metadata": {}, + "outputs": [], + "source": [ + "# REDO PLOT FROM CSV FILE:\n", + "\n", + "plt.figure(figsize=(8,6))\n", + "\n", + "data = np.loadtxt(Output_filename, delimiter=',')\n", + "plt.fill_between(data[0][:-1], \n", + " (data[1]-data[2])[:-1], \n", + " (data[1]+data[3])[:-1], alpha=0.4, color='tab:blue')\n", + "plt.fill_between(data[0][:-1], \n", + " (data[4]-data[5])[:-1], \n", + " (data[4]+data[6])[:-1], alpha=0.4, color='tab:orange')\n", + "\n", + "plt.errorbar(data[0], data[1],\n", + " yerr=[data[2], data[3]], marker='o', color='tab:blue', ls='none', markersize=4,\n", + " label='LST-1 (source-independent)')\n", + "plt.errorbar(data[0], data[4],\n", + " yerr=[data[5], data[6]], marker='o', color='tab:orange', ls='none', markersize=4,\n", + " label='LST-1 (source-dependent)')\n", + "\n", + "plot_MAGIC_sensitivity_fraction()\n", + "\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.xlabel('E$_{reco}$ / TeV')\n", + "plt.ylabel('Fraction of Crab nebula flux')\n", + "plt.xlim(0.02, 12)\n", + "plt.ylim(0.01, 50)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d2360f8", + "metadata": {}, + "outputs": [], + "source": [ + "np.nancumsum(num_excess_events[0][::-1])[::-1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8438ba2a", + "metadata": {}, + "outputs": [], + "source": [ + "# Integral numbers of events\n", + "fig = plt.figure(figsize=(16, 6))\n", + "\n", + "fig.add_subplot(1, 2, 1)\n", + "plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_excess_events[0][::-1])[::-1])\n", + "plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_excess_events[1][::-1])[::-1])\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.xlabel('Ereco / TeV')\n", + "plt.ylabel('Number of excess events, integrated above Ereco')\n", + "plt.xlim(0.01, 30)\n", + "\n", + "fig.add_subplot(1, 2, 2)\n", + "plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_off_events[0][::-1])[::-1])\n", + "plt.plot(10**logenergy_bins[:-1], np.nancumsum(num_off_events[1][::-1])[::-1])\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.xlabel('Ereco / TeV')\n", + "plt.ylabel('Number of OFF events, integrated above Ereco')\n", + "plt.xlim(0.01, 30)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1959b272", + "metadata": {}, + "outputs": [], + "source": [ + "#\n", + "# Calculate integral sensitivity, using the same optimized cuts in each bin of Ereco.\n", + "# NOTE: at low E differential sensitivities may be similar, yet the integral ones differ, because of\n", + "# the different rates of excess and background (resulting in different conditions determining the\n", + "# flux sensitivity value). Cuts are not re-optimized!\n", + "# NOTE anyway that only the best integral sensitivity (in terms of Crab fraction) is relevant!\n", + "#\n", + "\n", + "integral_sensitivity = np.zeros_like(sensitivity)\n", + "integral_sensitivity[:] = np.nan\n", + "\n", + "for analysis_type in range(2):\n", + " \n", + " for iebin in range(len(logenergy_bins)-1):\n", + " total_excess = np.nansum(num_excess_events[analysis_type][iebin:])\n", + " total_off = np.nansum(num_off_events[analysis_type][iebin:])\n", + " \n", + " flux = calc_flux_3conditions(np.array([total_excess]), np.array([total_off]), \n", + " min_signi, min_exc, min_off_events, alpha,\n", + " target_obs_time, livetimes[analysis_type],\n", + " min_n_gammas, min_backg_percent)\n", + "\n", + " integral_sensitivity[analysis_type][iebin] = flux[0] " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31f8e507", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(12,6))\n", + "\n", + "plt.plot(10**logenergy_bins[:-1], integral_sensitivity[0], label='Source-independent')\n", + "plt.plot(10**logenergy_bins[:-1], integral_sensitivity[1], label='Source-dependent')\n", + "\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "#plt.ylim(0.01, 0.5)\n", + "plt.xlabel('E$_{reco}$ / TeV')\n", + "plt.ylabel('Integral sensitivity (fraction of Crab nebula flux)')\n", + "plt.grid()\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "print(f'Best integral sensitivity, source-indep: {np.nanmin(integral_sensitivity[0]):.4f} C.U.')\n", + "print(f'Best integral sensitivity, source-dep: {np.nanmin(integral_sensitivity[1]):.4f} C.U.')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "346eb54a", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "np.set_printoptions(precision=4)\n", + "print(\"Energies (TeV):\", np.array(reco_energy[0]))\n", + "\n", + "print()\n", + "for analysis_type in range(2):\n", + " if analysis_type == 0:\n", + " print(\"Values for source-independent analysis:\")\n", + " else:\n", + " print(\"Values for source-dependent analysis:\")\n", + " print(\"Sensitivity:\", sensitivity[analysis_type])\n", + " print()\n", + " print(\"Nexcess:\", num_excess_events[analysis_type])\n", + " print()\n", + " print(\"Noff:\", num_off_events[analysis_type])\n", + " \n", + " print()\n", + " print(\"Gammaness cut (even):\", np.where(np.isnan(sensitivity[analysis_type]), np.nan, \n", + " gammaness_cut[analysis_type][0]))\n", + " print()\n", + " print(\"Gammaness cut (odd):\", np.where(np.isnan(sensitivity[analysis_type]), np.nan,\n", + " gammaness_cut[analysis_type][1]))\n", + " print()\n", + " print(\"Angular cut (even):\", np.where(np.isnan(sensitivity[analysis_type]), np.nan,\n", + " angular_cut[analysis_type][0]))\n", + " print()\n", + " print(\"Angular cut (odd):\", np.where(np.isnan(sensitivity[analysis_type]), np.nan,\n", + " angular_cut[analysis_type][1]))\n", + " \n", + " print('\\n'*3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f463d8a5", + "metadata": {}, + "outputs": [], + "source": [ + "[angular_cut[0][1], angular_cut[1][1]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b365e57", + "metadata": {}, + "outputs": [], + "source": [ + "[gammaness_cut[0][1], gammaness_cut[1][1]]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a3d7144", + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(reco_energy[0], angular_cut[0][0], label='even')\n", + "plt.plot(reco_energy[1], angular_cut[0][1], label='odd')\n", + "plt.xscale('log')\n", + "plt.grid()\n", + "plt.ylabel('theta2 cut / deg2')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "plt.plot(reco_energy[0], angular_cut[1][0], label='even')\n", + "plt.plot(reco_energy[1], angular_cut[1][1], label='odd')\n", + "plt.xscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Alpha cut / deg')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61a826f2", + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(reco_energy[0], num_excess_events[0], label='source-independent')\n", + "plt.plot(reco_energy[1], num_excess_events[1], label='source-dependent')\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Excess events')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "plt.plot(reco_energy[0], num_off_events[0], label='source-independent')\n", + "plt.plot(reco_energy[1], num_off_events[1], label='source-dependent')\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Off events')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46ed913f", + "metadata": {}, + "outputs": [], + "source": [ + "num_off_events[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43231a1e", + "metadata": {}, + "outputs": [], + "source": [ + "num_excess_events[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb5f530b", + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(reco_energy[0], gammaness_cut[0][0], label='even')\n", + "plt.plot(reco_energy[0], gammaness_cut[0][1], label='odd')\n", + "plt.xscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Gammaness')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "plt.plot(reco_energy[1], gammaness_cut[1][0], label='even')\n", + "plt.plot(reco_energy[1], gammaness_cut[1][1], label='odd')\n", + "plt.xscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Gammaness')\n", + "plt.xlabel('E / TeV')\n", + "#plt.ylim(0.9, 1.05)\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2cac0fdc", + "metadata": {}, + "outputs": [], + "source": [ + "import scipy.integrate as integrate\n", + "def dfde(x):\n", + " return CRAB_MAGIC_JHEAP2015(x*u.TeV).to_value(1/(u.TeV*u.s*u.cm**2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da0372bb", + "metadata": {}, + "outputs": [], + "source": [ + "dfde(1.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a1a24248", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "etev = reco_energy[0] * u.TeV\n", + "plt.plot(etev, CRAB_MAGIC_JHEAP2015(etev))\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.show()\n", + "\n", + "logenergy_bins[iebin]+logenergy_bins[iebin+1]\n", + "\n", + "rate_per_s_m2 = 1e4*np.array([integrate.quad(dfde, 10**a, 10**b)[0] \n", + " for a, b in zip(logenergy_bins[:-1], logenergy_bins[1:])])\n", + "rate_per_s_m2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c051805b", + "metadata": {}, + "outputs": [], + "source": [ + "plt.plot(reco_energy[0], num_excess_events[0]/livetimes[0]/rate_per_s_m2, label='source-independent')\n", + "plt.plot(reco_energy[1], num_excess_events[1]/livetimes[1]/rate_per_s_m2, label='source-dependent')\n", + "plt.xscale('log')\n", + "plt.yscale('log')\n", + "plt.grid()\n", + "plt.ylabel('Aeff(m2)')\n", + "plt.xlabel('E / TeV')\n", + "plt.legend()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3228e1be", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From f64407ab4a006a13a42a20cbbf4a76f971d5f843 Mon Sep 17 00:00:00 2001 From: moralejo Date: Fri, 8 Sep 2023 15:56:38 +0000 Subject: [PATCH 4/8] Removed unused import --- lstchain/scripts/lstchain_dl2_add_sourcepos.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lstchain/scripts/lstchain_dl2_add_sourcepos.py b/lstchain/scripts/lstchain_dl2_add_sourcepos.py index 60fc0d60ac..281dc033bf 100755 --- a/lstchain/scripts/lstchain_dl2_add_sourcepos.py +++ b/lstchain/scripts/lstchain_dl2_add_sourcepos.py @@ -11,8 +11,6 @@ from ctapipe.coordinates import CameraFrame from ctapipe_io_lst import LSTEventSource -import sys - parser = argparse.ArgumentParser(description="Source position adder. Calculates x,y coordinates of a source in CameraFrame, event-wise.") parser.add_argument('-f', '--dl2-file', dest='file', required=True, type=str, help='DL2 file to be processed') From 02fdf200f1fc76c578345073933cd78231ad172b Mon Sep 17 00:00:00 2001 From: moralejo Date: Tue, 12 Sep 2023 07:34:49 +0000 Subject: [PATCH 5/8] Added file needed by the sensitivity calculation notebook --- notebooks/magic_sensitivity.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 notebooks/magic_sensitivity.txt diff --git a/notebooks/magic_sensitivity.txt b/notebooks/magic_sensitivity.txt new file mode 100644 index 0000000000..6a26092788 --- /dev/null +++ b/notebooks/magic_sensitivity.txt @@ -0,0 +1,12 @@ +E xx xx dNdE dNdE_e Percentage +79.4328 39.9926 16.6357 730.e-12 30.e-12 6.8 +125.893 10.5215 1.30966 137.e-12 6.e-12 3.67 +199.526 3.65188 0.22562 30.5e-12 1.5e-12 2.25 +316.228 2.70062 0.19314 9.3e-12 0.9e-12 2.03 +501.187 2.19932 0.24697 2.3e-12 0.4e-12 1.51 +794.328 1.66672 0.22231 0.72e-12 0.08e-12 1.53 +1258.93 1.48853 0.19754 0.23e-12 0.025e-12 1.66 +1995.26 2.3236 0.35084 0.090e-12 0.015e-12 2.36 +3162.28 2.28266 0.50886 0.041e-12 0.011e-12 3.8 +5011.87 2.53844 0.69935 0.017e-12 0.005e-12 6.1 +7943.28 5.31584 1.59031 0.013e-12 0.005e-12 15 From 03fb27ca69cc9e50151849cc488a10282891e881 Mon Sep 17 00:00:00 2001 From: Abelardo Moralejo Date: Fri, 10 Nov 2023 09:39:03 +0100 Subject: [PATCH 6/8] Added forgotten lines to make this executable --- lstchain/scripts/lstchain_dl2_add_sourcepos.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lstchain/scripts/lstchain_dl2_add_sourcepos.py b/lstchain/scripts/lstchain_dl2_add_sourcepos.py index 281dc033bf..74fa2a17ab 100755 --- a/lstchain/scripts/lstchain_dl2_add_sourcepos.py +++ b/lstchain/scripts/lstchain_dl2_add_sourcepos.py @@ -66,3 +66,6 @@ def main(): table[['src_x', 'src_y']].to_hdf(args.file, new_table_name, mode='r+', format='table', data_columns=True) print('... done!') + +if __name__ == '__main__': + main() From 0fe60b2488a178586f30336db5b154a6d770fa27 Mon Sep 17 00:00:00 2001 From: Abelardo Moralejo Date: Sat, 11 Nov 2023 20:21:13 +0100 Subject: [PATCH 7/8] Solved theta2 / gammaness confusion in info message --- notebooks/calculate_sensitivity_from_Crab.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/calculate_sensitivity_from_Crab.ipynb b/notebooks/calculate_sensitivity_from_Crab.ipynb index 05db02096f..82fa717131 100644 --- a/notebooks/calculate_sensitivity_from_Crab.ipynb +++ b/notebooks/calculate_sensitivity_from_Crab.ipynb @@ -1421,7 +1421,7 @@ " print('No valid cuts found for source-independent analysis!')\n", " continue\n", " else:\n", - " print(f'Gammaness cuts: {theta2bins[indices0[0]+1]:.4f}, {theta2bins[indices1[0]+1]:.4f}')\n", + " print(f'Gammaness cuts: {gammaness_bins[indices0[0]+1]:.4f}, {gammaness_bins[indices1[0]+1]:.4f}')\n", " print(f'Theta2 cuts: {theta2bins[indices0[1]+1]:.4f}, {theta2bins[indices1[1]+1]:.4f}')\n", " \n", " xx, yy = plot_rebinned(theta2bins, nevts_on, nevts_on**0.5, rebin_factor[iebin], '')\n", From 9484b0f3c75ab6b9556548d44b9d2aa11e41a0b7 Mon Sep 17 00:00:00 2001 From: moralejo Date: Fri, 24 Nov 2023 11:16:40 +0000 Subject: [PATCH 8/8] Addressed reviewer's comments, several small changes for clarity --- .../calculate_sensitivity_from_Crab.ipynb | 44 +++++++------------ 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/notebooks/calculate_sensitivity_from_Crab.ipynb b/notebooks/calculate_sensitivity_from_Crab.ipynb index 82fa717131..24c9818755 100644 --- a/notebooks/calculate_sensitivity_from_Crab.ipynb +++ b/notebooks/calculate_sensitivity_from_Crab.ipynb @@ -32,9 +32,7 @@ "from ctapipe_io_lst import LSTEventSource\n", "from ctapipe.io import read_table\n", "\n", - "%matplotlib inline\n", - "\n", - "#%load_ext filprofiler" + "%matplotlib inline" ] }, { @@ -99,8 +97,8 @@ "# Assumed (effective) observation time for the calculation (note: it has nothing to do with \n", "# the actual observation time of the sample used for the calculation!):\n", "obs_time = 50 * u.h \n", - "# obs_time = 1/60. * u.h # 1 minute\n", - "# obs_time = 0.083333333 * u.h # 5 minutes\n", + "# obs_time = 1 * u.min\n", + "# obs_time = 5 * u.min\n", "# obs_time = 0.5 * u.h\n", "# obs_time = 0.7 * u.h\n", "# obs_time = 5 * u.h\n", @@ -133,9 +131,16 @@ "# We separate them so that we can optimize the cuts in one and apply them to \n", "# the other (and vice versa)\n", "\n", - "# For source-independent analysis we will use one off region up to 250 GeV. \n", + "# Background estimation:\n", + "# For source-independent analysis, we will use one off region up to 250 GeV. \n", "# Three off regions for higher energies:\n", "first_ebin_with_3offs = 7 # i.e. from ~250 GeV onwards\n", + "# NOTE! This is just what is used to *estimate* the average background in the on-region. \n", + "# For the actual computation of sensitivity, in order to follow the standard definition, \n", + "# we use (Li & Ma's) alpha = 0.2 (define in the cell above), i.e. we assume we can compute \n", + "# the background in a region 5 times larger than the on-region. In practice it is somehow \n", + "# optimistic for a standalone telescope like LST-1, with its limited angular resolution.\n", + "#\n", "\n", "# Intensity cuts:\n", "min_intensity = 50\n", @@ -182,17 +187,6 @@ "norm_range = np.array([[0.1, 0.16], [20., 59.8]]) # deg2 for theta2, deg for Alpha" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "9574f30b", - "metadata": {}, - "outputs": [], - "source": [ - "sa = LSTEventSource.create_subarray(tel_id=1)\n", - "focal = sa.tel[1].optics.effective_focal_length" - ] - }, { "cell_type": "code", "execution_count": null, @@ -200,6 +194,9 @@ "metadata": {}, "outputs": [], "source": [ + "sa = LSTEventSource.create_subarray(tel_id=1)\n", + "focal = sa.tel[1].optics.effective_focal_length\n", + "\n", "# The source position src_x, src_y (in m), stored in \"source_position\", is calculated by \n", "# lstchain_dl2_add_sourcepos.py using the effective focal length (29.30565 m), which means \n", "# that it is \"consistent\" with the reco values reco_src_x, reco_src_y (which are affected \n", @@ -236,8 +233,6 @@ }, "outputs": [], "source": [ - "#%%filprofile\n", - "\n", "tablename = \"/dl2/event/telescope/parameters/LST_LSTCam\"\n", "livetimes = []\n", "\n", @@ -350,7 +345,6 @@ }, "outputs": [], "source": [ - "#%%filprofile\n", "# Columns to be kept in the case of source-dependent analysis (again, so save memory):\n", "\n", "columns_srcdep = [\"('on', 'expected_src_x')\",\n", @@ -502,8 +496,6 @@ }, "outputs": [], "source": [ - "#%%filprofile\n", - "\n", "# Obtain the cumulative histograms: integrate in gammaness and in theta (or alpha).\n", "\n", "for evtid in range(cum_on_events[0].shape[0]):\n", @@ -685,7 +677,7 @@ "source": [ "#################################################################################################\n", "# #\n", - "# SECOND PART OF THE NOTEBOOK!!! RUN FROM HERE OF YOU JUST WANT TO RE-RUN THE CUT OPTIMIZATION #\n", + "# SECOND PART OF THE NOTEBOOK!!! RUN FROM HERE IF YOU JUST WANT TO RE-RUN THE CUT OPTIMIZATION #\n", "# USING THE SAME DATA SAMPLES! #\n", "# #\n", "##################################################################################################" @@ -715,8 +707,8 @@ "# Assumed (effective) observation time for the calculation (note: it has nothing to do with \n", "# the actual observation time of the sample used for the calculation!):\n", "# obs_time = 50 * u.h \n", - "# obs_time = 1/60. * u.h # 1 minute\n", - "# obs_time = 0.083333333 * u.h # 5 minutes\n", + "# obs_time = 1 * u.min\n", + "# obs_time = 5 * u.min\n", "# obs_time = 0.5 * u.h\n", "# obs_time = 0.7 * u.h\n", "# obs_time = 5 * u.h\n", @@ -849,8 +841,6 @@ "metadata": {}, "outputs": [], "source": [ - "#%%filprofile\n", - "\n", "lima_signi = [[], []]\n", "flux_for_5_sigma = [[], []]\n", "\n",