diff options
| -rwxr-xr-x | fig2.py | 2 | ||||
| -rw-r--r-- | plot_command | 2 | ||||
| -rwxr-xr-x | plot_sens.py | 2 | ||||
| -rwxr-xr-x | plot_sens_sourcescan.py | 411 | ||||
| -rw-r--r-- | submitter/out | 0 | ||||
| -rw-r--r-- | submitter/sens_dag_source.py | 183 | ||||
| -rw-r--r-- | utils/enums.py | 6 | ||||
| -rw-r--r-- | utils/plot.py | 188 |
8 files changed, 787 insertions, 7 deletions
@@ -108,7 +108,7 @@ def main(): map(fr_utils.angles_to_fr, flavour_angles) ) - nbins = 15 + nbins = 25 fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111) diff --git a/plot_command b/plot_command index 54ec50e..cf733f5 100644 --- a/plot_command +++ b/plot_command @@ -1 +1,3 @@ python plot_sens.py --data real --dimensions 3 4 5 6 7 8 --fix-source-ratio True --source-ratios 1 2 0 1 0 0 0 1 0 --split-jobs True --stat-method bayesian --run-method fixed_angle --infile /data/user/smandalia/flavour_ratio/data --likelihood golemfit --sens-bins 20 + +python contour.py --data realisation --debug True --injected-ratio 1 1 1 --likelihood golemfit --mn-live-points 500 --mn-tolerance 0.3 --outfile /data/user/smandalia/flavour_ratio/data/contour/golemfit/realisation/ --plot-chains True --plot-triangle True --run-scan False diff --git a/plot_sens.py b/plot_sens.py index ba3e923..f91b5f2 100755 --- a/plot_sens.py +++ b/plot_sens.py @@ -163,7 +163,7 @@ def parse_args(args=None): help='Set the new physics dimensions to consider' ) parser.add_argument( - '--source-ratios', type=int, nargs='*', default=[2, 1, 0], + '--source-ratios', type=int, nargs='*', default=[1, 2, 0], help='Set the source flavour ratios for the case when you want to fix it' ) if args is None: return parser.parse_args() diff --git a/plot_sens_sourcescan.py b/plot_sens_sourcescan.py new file mode 100755 index 0000000..130817d --- /dev/null +++ b/plot_sens_sourcescan.py @@ -0,0 +1,411 @@ +#! /usr/bin/env python +# author : S. Mandalia +# s.p.mandalia@qmul.ac.uk +# +# date : April 28, 2018 + +""" +HESE BSM flavour ratio analysis plotting script +""" + +from __future__ import absolute_import, division + +import os +import argparse +from functools import partial +from copy import deepcopy + +import numpy as np +import numpy.ma as ma + +from utils import fr as fr_utils +from utils import gf as gf_utils +from utils import llh as llh_utils +from utils import misc as misc_utils +from utils import plot as plot_utils +from utils.enums import EnergyDependance, Likelihood, MixingScenario, ParamTag +from utils.enums import PriorsCateg, SensitivityCateg, StatCateg +from utils.param import Param, ParamSet, get_paramsets + +from utils import mn as mn_utils + + +def define_nuisance(): + """Define the nuisance parameters.""" + tag = ParamTag.SM_ANGLES + g_prior = PriorsCateg.GAUSSIAN + lg_prior = PriorsCateg.LIMITEDGAUSS + e = 1e-9 + nuisance = [ + Param(name='s_12_2', value=0.307, seed=[0.26, 0.35], ranges=[0., 1.], std=0.013, tex=r's_{12}^2', prior=lg_prior, tag=tag), + Param(name='c_13_4', value=(1-(0.02206))**2, seed=[0.950, 0.961], ranges=[0., 1.], std=0.00147, tex=r'c_{13}^4', prior=lg_prior, tag=tag), + Param(name='s_23_2', value=0.538, seed=[0.31, 0.75], ranges=[0., 1.], std=0.069, tex=r's_{23}^2', prior=lg_prior, tag=tag), + Param(name='dcp', value=4.08404, seed=[0+e, 2*np.pi-e], ranges=[0., 2*np.pi], std=2.0, tex=r'\delta_{CP}', tag=tag), + Param( + name='m21_2', value=7.40E-23, seed=[7.2E-23, 7.6E-23], ranges=[6.80E-23, 8.02E-23], + std=2.1E-24, tex=r'\Delta m_{21}^2{\rm GeV}^{-2}', prior=g_prior, tag=tag + ), + Param( + name='m3x_2', value=2.494E-21, seed=[2.46E-21, 2.53E-21], ranges=[2.399E-21, 2.593E-21], + std=3.3E-23, tex=r'\Delta m_{3x}^2{\rm GeV}^{-2}', prior=g_prior, tag=tag + ) + ] + tag = ParamTag.NUISANCE + nuisance.extend([ + Param(name='convNorm', value=1., seed=[0.5, 2. ], ranges=[0.1, 10.], std=0.4, prior=lg_prior, tag=tag), + Param(name='promptNorm', value=0., seed=[0. , 6. ], ranges=[0. , 20.], std=2.4, prior=lg_prior, tag=tag), + Param(name='muonNorm', value=1., seed=[0.1, 2. ], ranges=[0. , 10.], std=0.1, tag=tag), + Param(name='astroNorm', value=6.9, seed=[0., 5. ], ranges=[0. , 20.], std=1.5, tag=tag), + Param(name='astroDeltaGamma', value=2.5, seed=[2.4, 3. ], ranges=[-5., 5. ], std=0.1, tag=tag) + ]) + return ParamSet(nuisance) + + +def nuisance_argparse(parser): + nuisance = define_nuisance() + for parm in nuisance: + parser.add_argument( + '--'+parm.name, type=float, default=parm.value, + help=parm.name+' to inject' + ) + + +def process_args(args): + """Process the input args.""" + if args.fix_mixing is not MixingScenario.NONE and args.fix_scale: + raise NotImplementedError('Fixed mixing and scale not implemented') + if args.fix_mixing is not MixingScenario.NONE and args.fix_mixing_almost: + raise NotImplementedError( + '--fix-mixing and --fix-mixing-almost cannot be used together' + ) + if args.fix_scale: + raise NotImplementedError( + '--fix-scale not implemented' + ) + + args.measured_ratio = fr_utils.normalise_fr(args.measured_ratio) + # if args.fix_source_ratio: + # assert len(args.source_ratios) % 3 == 0 + # srs = [args.source_ratios[3*x:3*x+3] + # for x in range(int(len(args.source_ratios)/3))] + # args.source_ratios = map(fr_utils.normalise_fr, srs) + + if args.energy_dependance is EnergyDependance.SPECTRAL: + args.binning = np.logspace( + np.log10(args.binning[0]), np.log10(args.binning[1]), args.binning[2]+1 + ) + + if args.split_jobs and args.run_method is SensitivityCateg.FULL: + raise NotImplementedError( + 'split_jobs and run_method not implemented' + ) + + args.dimensions = np.sort(args.dimensions) + + args_copy = deepcopy(args) + scale_regions = [] + for dim in args.dimensions: + args_copy.dimension = dim + _, scale_region = fr_utils.estimate_scale(args_copy) + scale_regions.append(scale_region) + args.scale_region = [np.min(scale_regions), np.max(scale_regions)] + args.scale = np.power(10., np.average(np.log10(args.scale_region))) + + +def parse_args(args=None): + """Parse command line arguments""" + parser = argparse.ArgumentParser( + description="HESE BSM flavour ratio analysis plotting script", + formatter_class=misc_utils.SortingHelpFormatter, + ) + parser.add_argument( + '--infile', type=str, default='./untitled', + help='Path to input dir' + ) + parser.add_argument( + '--run-method', default='full', + type=partial(misc_utils.enum_parse, c=SensitivityCateg), + choices=SensitivityCateg, + help='Choose which type of sensivity plot to make' + ) + parser.add_argument( + '--stat-method', default='bayesian', + type=partial(misc_utils.enum_parse, c=StatCateg), choices=StatCateg, + help='Statistical method to employ' + ) + parser.add_argument( + '--sens-bins', type=int, default=10, + help='Number of bins for the Bayes factor plot' + ) + parser.add_argument( + '--split-jobs', type=misc_utils.parse_bool, default='False', + help='Did the jobs get split' + ) + parser.add_argument( + '--plot', type=misc_utils.parse_bool, default='True', + help='Make sensitivity plots' + ) + parser.add_argument( + '--plot-statistic', type=misc_utils.parse_bool, default='False', + help='Plot MultiNest evidence or LLH value' + ) + fr_utils.fr_argparse(parser) + gf_utils.gf_argparse(parser) + llh_utils.likelihood_argparse(parser) + mn_utils.mn_argparse(parser) + nuisance_argparse(parser) + misc_utils.remove_option(parser, 'dimension') + misc_utils.remove_option(parser, 'source_ratio') + misc_utils.remove_option(parser, 'scale') + misc_utils.remove_option(parser, 'scale_region') + parser.add_argument( + '--dimensions', type=int, nargs='*', default=[3, 6], + help='Set the new physics dimensions to consider' + ) + parser.add_argument( + '--source-bins', type=int, default=5, + help='Binning in source flavour space' + ) + if args is None: return parser.parse_args() + else: return parser.parse_args(args.split()) + + +def main(): + args = parse_args() + process_args(args) + args.scale = 0 + misc_utils.print_args(args) + + asimov_paramset, llh_paramset = get_paramsets(args, define_nuisance()) + + scale = llh_paramset.from_tag(ParamTag.SCALE)[0] + mmangles = llh_paramset.from_tag(ParamTag.MMANGLES) + if args.run_method is SensitivityCateg.FULL: + st_paramset_arr = [llh_paramset.from_tag(ParamTag.SCALE, invert=True)] + elif args.run_method in [SensitivityCateg.FIXED_ANGLE, SensitivityCateg.CORR_ANGLE]: + nscale_pmset = llh_paramset.from_tag([ParamTag.SCALE, ParamTag.MMANGLES], invert=True) + st_paramset_arr = [nscale_pmset] * 3 + elif args.run_method in [SensitivityCateg.FIXED_ONE_ANGLE, SensitivityCateg.CORR_ONE_ANGLE]: + nscale_pmset = llh_paramset.from_tag(ParamTag.SCALE, invert=True) + st_paramset_arr = [] + for x in xrange(3): + st_paramset_arr.append( + ParamSet([prms for prms in nscale_pmset + if mmangles[x].name != prms.name]) + ) + + corr_angles_categ = [SensitivityCateg.CORR_ANGLE, SensitivityCateg.CORR_ONE_ANGLE] + fixed_angle_categ = [SensitivityCateg.FIXED_ANGLE, SensitivityCateg.FIXED_ONE_ANGLE] + + if args.run_method in corr_angles_categ: + scan_angles = np.linspace(0+1e-9, 1-1e-9, args.sens_bins) + else: scan_angles = np.array([0]) + print 'scan_angles', scan_angles + + dims = len(args.dimensions) + binning = np.linspace(0, 1, args.source_bins) + grid = np.dstack(np.meshgrid(binning, binning)).reshape( + args.source_bins*args.source_bins, 2 + ) + source_ratios = [] + for x in grid: + if x[0]+x[1] > 1: + continue + source_ratios.append([x[0], x[1], 1-x[0]-x[1]]) + args.source_ratios = source_ratios + n_source_ratios = map(fr_utils.normalise_fr, source_ratios) + + srcs = len(n_source_ratios) + if args.run_method is SensitivityCateg.FULL: + statistic_arr = np.full((dims, srcs, args.sens_bins, 2), np.nan) + elif args.run_method in fixed_angle_categ: + statistic_arr = np.full((dims, srcs, len(st_paramset_arr), args.sens_bins, 2), np.nan) + elif args.run_method in corr_angles_categ: + statistic_arr = np.full( + (dims, srcs, len(st_paramset_arr), args.sens_bins, args.sens_bins, 3), np.nan + ) + + print 'Loading data' + for idim, dim in enumerate(args.dimensions): + argsc = deepcopy(args) + argsc.dimension = dim + _, scale_region = fr_utils.estimate_scale(argsc) + argsc.scale_region = scale_region + scan_scales = np.linspace( + np.log10(scale_region[0]), np.log10(scale_region[1]), args.sens_bins + ) + scan_scales = np.concatenate([[-100.], scan_scales]) + + for isrc, src in enumerate(n_source_ratios): + argsc.source_ratio = src + infile = args.infile + if args.likelihood is Likelihood.GOLEMFIT: + infile += '/golemfit/' + elif args.likelihood is Likelihood.GAUSSIAN: + infile += '/gaussian/' + if args.likelihood is Likelihood.GAUSSIAN: + infile += '{0}/'.format(str(args.sigma_ratio).replace('.', '_')) + # infile += '/DIM{0}/fix_ifr/{1}/{2}/{3}/fr_stat'.format( + infile += '/DIM{0}/fix_ifr/sourcescan/{1}/{2}/{3}/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/prior/{1}/{2}/{3}/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/{1}/{2}/{3}/old/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/seed2/{1}/{2}/{3}/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/100TeV/{1}/{2}/{3}/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/strictprior/{1}/{2}/{3}/fr_stat'.format( + # infile += '/DIM{0}/fix_ifr/noprior/{1}/{2}/{3}/fr_stat'.format( + dim, *map(misc_utils.parse_enum, [args.stat_method, args.run_method, args.data]) + ) + misc_utils.gen_identifier(argsc) + print '== {0:<25} = {1}'.format('infile', infile) + + if args.split_jobs: + for idx_an, an in enumerate(scan_angles): + for idx_sc, sc in enumerate(scan_scales): + filename = infile + '_scale_{0:.0E}'.format(np.power(10, sc)) + try: + if args.run_method in fixed_angle_categ: + print 'Loading from {0}'.format(filename+'.npy') + statistic_arr[idim][isrc][:,idx_sc] = np.load(filename+'.npy')[:,0] + if args.run_method in corr_angles_categ: + filename += '_angle_{0:<04.2}'.format(an) + print 'Loading from {0}'.format(filename+'.npy') + statistic_arr[idim][isrc][:,idx_an,idx_sc] = np.load(filename+'.npy')[:,0,0] + except: + print 'Unable to load file {0}'.format(filename+'.npy') + continue + else: + print 'Loading from {0}'.format(infile+'.npy') + try: + statistic_arr[idim][isrc] = np.load(infile+'.npy') + except: + print 'Unable to load file {0}'.format(infile+'.npy') + continue + + data = ma.masked_invalid(statistic_arr) + + print 'data', data + print 'data.shape', data.shape + if args.plot_statistic: + assert 0 + print 'Plotting statistic' + + argsc = deepcopy(args) + for idim, dim in enumerate(args.dimensions): + argsc.dimension = dim + _, scale_region = fr_utils.estimate_scale(argsc) + argsc.scale_region = scale_region + base_infile = args.infile + if args.likelihood is Likelihood.GOLEMFIT: + base_infile += '/golemfit/' + elif args.likelihood is Likelihood.GAUSSIAN: + base_infile += '/gaussian/' + if args.likelihood is Likelihood.GAUSSIAN: + base_infile += '{0}/'.format(str(args.sigma_ratio).replace('.', '_')) + # base_infile += '/DIM{0}/fix_ifr'.format(dim) + base_infile += '/DIM{0}/fix_ifr/sourcescan'.format(dim) + # base_infile += '/DIM{0}/fix_ifr/prior'.format(dim) + # base_infile += '/DIM{0}/fix_ifr/seed2'.format(dim) + # base_infile += '/DIM{0}/fix_ifr/100TeV'.format(dim) + # base_infile += '/DIM{0}/fix_ifr/strictprior'.format(dim) + # base_infile += '/DIM{0}/fix_ifr/noprior'.format(dim) + + for isrc, src in enumerate(n_source_ratios): + argsc.source_ratio = src + infile = base_infile +'/{0}/{1}/{2}/fr_stat'.format( + # infile = base_infile +'/{0}/{1}/{2}/old/fr_stat'.format( + *map(misc_utils.parse_enum, [args.stat_method, args.run_method, args.data]) + ) + misc_utils.gen_identifier(argsc) + basename = os.path.dirname(infile) + baseoutfile = basename[:5]+basename[5:].replace('data', 'plots') + baseoutfile += '/' + os.path.basename(infile) + if args.run_method is SensitivityCateg.FULL: + outfile = baseoutfile + plot_utils.plot_statistic( + data = data[idim][isrc], + outfile = outfile, + outformat = ['png'], + args = argsc, + scale_param = scale, + ) + if args.run_method in fixed_angle_categ: + for idx_scen in xrange(len(st_paramset_arr)): + print '|||| SCENARIO = {0}'.format(idx_scen) + outfile = baseoutfile + '_SCEN{0}'.format(idx_scen) + if idx_scen == 0: label = r'$\mathcal{O}_{12}=\pi/4$' + elif idx_scen == 1: label = r'$\mathcal{O}_{13}=\pi/4$' + elif idx_scen == 2: label = r'$\mathcal{O}_{23}=\pi/4$' + plot_utils.plot_statistic( + data = data[idim][isrc][idx_scen], + outfile = outfile, + outformat = ['png'], + args = argsc, + scale_param = scale, + label = label + ) + elif args.run_method in corr_angles_categ: + for idx_scen in xrange(len(st_paramset_arr)): + print '|||| SCENARIO = {0}'.format(idx_scen) + basescenoutfile = baseoutfile + '_SCEN{0}'.format(idx_scen) + if idx_scen == 0: label = r'$\mathcal{O}_{12}=' + elif idx_scen == 1: label = r'$\mathcal{O}_{13}=' + elif idx_scen == 2: label = r'$\mathcal{O}_{23}=' + for idx_an, an in enumerate(scan_angles): + print '|||| ANGLE = {0:<04.2}'.format(float(an)) + outfile = basescenoutfile + '_ANGLE{0}'.format(idx_an) + _label = label + r'{0:<04.2}$'.format(an) + plot_utils.plot_statistic( + data = data[idim][isrc][idx_scen][idx_an][:,1:], + outfile = outfile, + outformat = ['png'], + args = argsc, + scale_param = scale, + label = _label + ) + + if args.plot: + print 'Plotting sensitivities' + + basename = args.infile[:5]+args.infile[5:].replace('data', 'plots') + baseoutfile = basename + '/{0}/{1}/{2}/'.format( + *map(misc_utils.parse_enum, [args.likelihood, args.stat_method, args.data]) + ) + + if args.run_method is SensitivityCateg.FULL: + plot_utils.plot_sens_full( + data = data, + outfile = baseoutfile + '/FULL', + outformat = ['png', 'pdf'], + args = args, + ) + elif args.run_method in fixed_angle_categ: + # plot_utils.plot_sens_fixed_angle_pretty( + # data = data, + # outfile = baseoutfile + '/fixed_angle_pretty_substantial', + # outformat = ['png', 'pdf'], + # args = args, + # ) + # plot_utils.plot_sens_fixed_angle( + # data = data, + # outfile = baseoutfile + '/FIXED_ANGLE', + # outformat = ['png'], + # args = args, + # ) + plot_utils.plot_source_ternary_1D( + data = data, + outfile = baseoutfile + '/source_ternary', + outformat = ['png'], + args = args, + ) + elif args.run_method in corr_angles_categ: + plot_utils.plot_sens_corr_angle( + data = data, + outfile = baseoutfile + '/CORR_ANGLE', + outformat = ['png', 'pdf'], + args = args, + ) + + +main.__doc__ = __doc__ + + +if __name__ == '__main__': + main() diff --git a/submitter/out b/submitter/out new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/submitter/out diff --git a/submitter/sens_dag_source.py b/submitter/sens_dag_source.py new file mode 100644 index 0000000..bdb5924 --- /dev/null +++ b/submitter/sens_dag_source.py @@ -0,0 +1,183 @@ +#! /usr/bin/env python + +import os +import numpy as np + +full_scan_mfr = [ + # (1, 1, 1), (1, 2, 0) +] + +bins = 5 +binning = np.linspace(0, 1, bins) +grid = np.dstack(np.meshgrid(binning, binning)).reshape(bins*bins, 2) +sources = [] +for x in grid: + if x[0]+x[1] > 1: + continue + sources.append([x[0], x[1], 1-x[0]-x[1]]) + +# fix_sfr_mfr = [ +# (1, 1, 1, 1, 2, 0), +# (1, 1, 1, 1, 0, 0), +# (1, 1, 1, 0, 1, 0), +# # (1, 1, 1, 0, 0, 1), +# # (1, 1, 0, 1, 2, 0), +# # (1, 1, 0, 1, 0, 0), +# # (1, 1, 0, 0, 1, 0), +# # (1, 0, 0, 1, 0, 0), +# # (0, 1, 0, 0, 1, 0), +# # (1, 2, 0, 1, 2, 0), +# # (1, 2, 0, 0, 1, 0), +# ] +fix_sfr_mfr = [] +for s in sources: + fix_sfr_mfr.append((1, 1, 1, s[0], s[1], s[2])) +print 'fix_sfr_mfr', fix_sfr_mfr +print 'len(fix_sfr_mfr)', len(fix_sfr_mfr) + +GLOBAL_PARAMS = {} + +# Bayes Factor +sens_eval_bin = 'true' # set to 'all' to run normally +GLOBAL_PARAMS.update(dict( + sens_run = 'True', + run_method = 'fixed_angle', # full, fixed_angle, corr_angle + stat_method = 'bayesian', + sens_bins = 10, + seed = None +)) + +# MultiNest +GLOBAL_PARAMS.update(dict( + # mn_live_points = 1000, + # mn_live_points = 600, + mn_live_points = 100, + # mn_tolerance = 0.1, + mn_tolerance = 0.3, + mn_output = './mnrun' +)) + +# FR +dimension = [6] +# dimension = [3, 6] +# dimension = [3, 4, 5, 6, 7, 8] +GLOBAL_PARAMS.update(dict( + threads = 1, + binning = '6e4 1e7 20', + no_bsm = 'False', + scale_region = "1E10", + energy_dependance = 'spectral', + spectral_index = -2, + fix_mixing = 'None', + fix_mixing_almost = 'False', + fold_index = 'True', + save_measured_fr = 'False', + output_measured_fr = './frs/' +)) + +# Likelihood +GLOBAL_PARAMS.update(dict( + likelihood = 'golemfit', + sigma_ratio = '0.01' +)) + +# GolemFit +GLOBAL_PARAMS.update(dict( + ast = 'p2_0', + data = 'real' +)) + +# Plot +GLOBAL_PARAMS.update(dict( + plot_statistic = 'True' +)) + +outfile = 'dagman_FR_SENS_{0}_{1}_{2}_{3}'.format( + GLOBAL_PARAMS['stat_method'], GLOBAL_PARAMS['run_method'], + GLOBAL_PARAMS['likelihood'], GLOBAL_PARAMS['data'] +) +# outfile += '_seed2' +# outfile += '_tol03' +# outfile += '_NULL' +# outfile += '_prior' +# outfile += '_strictprior' +# outfile += '_noprior' +outfile += '_sourcescan' +outfile += '.submit' +golemfitsourcepath = os.environ['GOLEMSOURCEPATH'] + '/GolemFit' +condor_script = golemfitsourcepath + '/scripts/flavour_ratio/submitter/sens_submit.sub' + +if sens_eval_bin.lower() != 'all': + if GLOBAL_PARAMS['run_method'].lower() == 'corr_angle': + raise NotImplementedError + sens_runs = GLOBAL_PARAMS['sens_bins']**2 + else: + sens_runs = GLOBAL_PARAMS['sens_bins'] + 1 +else: sens_runs = 1 + +with open(outfile, 'w') as f: + job_number = 1 + for dim in dimension: + print 'dimension', dim + outchain_head = '/data/user/smandalia/flavour_ratio/data/{0}/DIM{1}'.format( + GLOBAL_PARAMS['likelihood'], dim + ) + for frs in fix_sfr_mfr: + print 'frs', frs + output = outchain_head + '/fix_ifr/' + if GLOBAL_PARAMS['likelihood'].lower() == 'gaussian': + output += '{0}/'.format(str(GLOBAL_PARAMS['sigma_ratio']).replace('.', '_')) + # output += 'seed2/' + # output += 'mn_noverlap/' + # output += 'tol_03/' + # output += 'prior/' + # output += 'strictprior/' + # output += 'noprior/' + output += 'sourcescan/' + for r in xrange(sens_runs): + print 'run', r + f.write('JOB\tjob{0}\t{1}\n'.format(job_number, condor_script)) + f.write('VARS\tjob{0}\tdimension="{1}"\n'.format(job_number, dim)) + f.write('VARS\tjob{0}\tmr0="{1}"\n'.format(job_number, frs[0])) + f.write('VARS\tjob{0}\tmr1="{1}"\n'.format(job_number, frs[1])) + f.write('VARS\tjob{0}\tmr2="{1}"\n'.format(job_number, frs[2])) + f.write('VARS\tjob{0}\tfix_source_ratio="{1}"\n'.format(job_number, True)) + f.write('VARS\tjob{0}\tsr0="{1}"\n'.format(job_number, frs[3])) + f.write('VARS\tjob{0}\tsr1="{1}"\n'.format(job_number, frs[4])) + f.write('VARS\tjob{0}\tsr2="{1}"\n'.format(job_number, frs[5])) + if sens_eval_bin.lower() != 'all': + f.write('VARS\tjob{0}\tsens_eval_bin="{1}"\n'.format(job_number, r)) + else: + f.write('VARS\tjob{0}\tsens_eval_bin="{1}"\n'.format(job_number, 'all')) + for key in GLOBAL_PARAMS.iterkeys(): + f.write('VARS\tjob{0}\t{1}="{2}"\n'.format(job_number, key, GLOBAL_PARAMS[key])) + f.write('VARS\tjob{0}\toutfile="{1}"\n'.format(job_number, output)) + job_number += 1 + # break + + # for frs in full_scan_mfr: + # print 'frs', frs + # output = outchain_head + '/full/' + # if GLOBAL_PARAMS['likelihood'].lower() == 'gaussian': + # output += '{0}/'.format(str(GLOBAL_PARAMS['sigma_ratio']).replace('.', '_')) + # for r in xrange(sens_runs): + # print 'run', r + # f.write('JOB\tjob{0}\t{1}\n'.format(job_number, condor_script)) + # f.write('VARS\tjob{0}\tdimension="{1}"\n'.format(job_number, dim)) + # f.write('VARS\tjob{0}\tmr0="{1}"\n'.format(job_number, frs[0])) + # f.write('VARS\tjob{0}\tmr1="{1}"\n'.format(job_number, frs[1])) + # f.write('VARS\tjob{0}\tmr2="{1}"\n'.format(job_number, frs[2])) + # f.write('VARS\tjob{0}\tfix_source_ratio="{1}"\n'.format(job_number, False)) + # f.write('VARS\tjob{0}\tsr0="{1}"\n'.format(job_number, 0)) + # f.write('VARS\tjob{0}\tsr1="{1}"\n'.format(job_number, 0)) + # f.write('VARS\tjob{0}\tsr2="{1}"\n'.format(job_number, 0)) + # if sens_eval_bin.lower() != 'all': + # f.write('VARS\tjob{0}\tsens_eval_bin="{1}"\n'.format(job_number, r)) + # else: + # f.write('VARS\tjob{0}\tsens_eval_bin="{1}"\n'.format(job_number, 'all')) + # for key in GLOBAL_PARAMS.iterkeys(): + # f.write('VARS\tjob{0}\t{1}="{2}"\n'.format(job_number, key, GLOBAL_PARAMS[key])) + # f.write('VARS\tjob{0}\toutfile="{1}"\n'.format(job_number, output)) + # job_number += 1 + + print 'dag file = {0}'.format(outfile) diff --git a/utils/enums.py b/utils/enums.py index a7982f8..441a16f 100644 --- a/utils/enums.py +++ b/utils/enums.py @@ -76,3 +76,9 @@ class StatCateg(Enum): class SteeringCateg(Enum): P2_0 = 1 P2_1 = 2 + + +class Texture(Enum): + OEU = 0 + OET = 1 + OUT = 2 diff --git a/utils/plot.py b/utils/plot.py index f81f9da..5a3d1f7 100644 --- a/utils/plot.py +++ b/utils/plot.py @@ -32,12 +32,15 @@ from getdist import plots, mcsamples import ternary from ternary.heatmapping import polygon_generator +# print ternary.__file__ +# assert 0 import shapely.geometry as geometry from utils import misc as misc_utils -from utils.enums import DataType, EnergyDependance +from utils.enums import DataType, EnergyDependance, str_enum from utils.enums import Likelihood, MixingScenario, ParamTag, StatCateg +from utils.enums import Texture from utils.fr import angles_to_u, angles_to_fr @@ -595,7 +598,7 @@ def plot_sens_fixed_angle_pretty(data, outfile, outformat, args): null = statistic[min_idx] if args.stat_method is StatCateg.BAYESIAN: reduced_ev = -(statistic_rm - null) - al = scales_rm[reduced_ev > np.log(10**(bayes_K))] # Strong degree of belief + al = scales_rm[reduced_ev > np.log(10**(bayes_K))] elif args.stat_method is StatCateg.FREQUENTIST: reduced_ev = -2*(statistic_rm - null) al = scales_rm[reduced_ev > 2.71] # 90% CL for 1 DOF via Wilks @@ -997,9 +1000,10 @@ def get_tax(ax, scale): tax.clear_matplotlib_ticks() # Set ticks - ticks = np.linspace(0, 1, 6) - tax.ticks(ticks=ticks, locations=ticks*scale, axis='blr', linewidth=1, - offset=0.03, fontsize=fontsize, tick_formats='%.1f') + # ticks = np.linspace(0, 1, 6) + # tax.ticks(ticks=ticks, locations=ticks*scale, axis='blr', linewidth=1, + # offset=0.03, fontsize=fontsize, tick_formats='%.1f') + tax.ticks() tax._redraw_labels() @@ -1156,3 +1160,177 @@ def flavour_contour(frs, ax, nbins, coverage, linewidth=2, color='black'): ) ax.scatter(points.T[0], points.T[1], marker='o', s=2, alpha=1, color=color, zorder=3) + +def plot_source_ternary(data, outfile, outformat, args): + """Ternary plot in the source flavour space for each operator texture.""" + r_data = np.full((data.shape[0], data.shape[2], data.shape[1], data.shape[3], data.shape[4]), np.nan) + for idim in xrange(data.shape[0]): + for isrc in xrange(data.shape[1]): + for isce in xrange(data.shape[2]): + r_data[idim][isce][isrc] = data[idim][isrc][isce] + r_data = ma.masked_invalid(r_data) + print r_data.shape, 'r_data.shape' + nsrcs = int(len(args.source_ratios) / 3) + + for idim, dim in enumerate(args.dimensions): + print '|||| DIM = {0}, {1}'.format(idim, dim) + for isce in xrange(r_data.shape[1]): + print '|||| SCEN = {0}'.format(str_enum(Texture(isce))) + fig = plt.figure(figsize=(8, 8)) + ax = fig.add_subplot(111) + tax = get_tax(ax, scale=nsrcs) + interp_dict = {} + for isrc, src in enumerate(args.source_ratios): + src_sc = tuple(np.array(src)*(nsrcs-1)) + print '|||| SRC = {0}'.format(src) + scales, statistic = ma.compress_rows(r_data[idim][isce][isrc]).T + print 'scales', scales + print 'statistic', statistic + + try: + tck, u = splprep([scales, statistic], s=0) + except: + interp_dict[src_sc] = -60 + continue + # sc, st = splev(np.linspace(0, 1, 10000), tck) + sc, st = splev(np.linspace(0, 1, 20), tck) + scales_rm = sc[sc >= scales[1]] + statistic_rm = st[sc >= scales[1]] + # scales_rm = sc + # statistic_rm = st + + min_idx = np.argmin(scales) + null = statistic[min_idx] + if args.stat_method is StatCateg.BAYESIAN: + reduced_ev = -(statistic_rm - null) + print 'reduced_ev', reduced_ev + al = scales_rm[reduced_ev > np.log(10**(bayes_K))] + else: + assert 0 + if len(al) == 0: + print 'No points for DIM {0} FRS {1}!'.format(dim, src) + interp_dict[src_sc] = -60 + continue + if reduced_ev[-1] < np.log(10**(bayes_K)) - 0.1: + print 'Peaked contour does not exclude large scales! For ' \ + 'DIM {0} FRS{1}!'.format(dim, src) + interp_dict[src_sc] = -60 + continue + lim = al[0] + print 'limit = {0}'.format(lim) + + interp_dict[src_sc] = lim + print 'interp_dict', interp_dict + print + print 'vertices', heatmap(interp_dict, nsrcs) + print + tax.heatmap(interp_dict, scale=nsrcs, vmin=-60, vmax=-30) + misc_utils.make_dir(outfile) + for of in outformat: + print 'Saving plot as {0}'.format(outfile+'_SCEN{0}.'.format(isce)+of) + fig.savefig(outfile+'_SCEN{0}.'.format(isce)+of, bbox_inches='tight', dpi=150) + print 'nsrcs', nsrcs + assert 0 + + +def texture_label(x): + if x == Texture.OEU: + return r'$\mathcal{O}_{e\mu}$' + elif x == Texture.OET: + return r'$\mathcal{O}_{e\tau}$' + elif x == Texture.OUT: + return r'$\mathcal{O}_{\mu\tau}$' + else: + raise AssertionError + + +def plot_source_ternary_1D(data, outfile, outformat, args): + """Ternary plot in the source flavour space for each operator texture.""" + sources = args.source_ratios + x_1D = [] + i_src_1D = [] + for i, s in enumerate(sources): + if s[2] != 0: continue + x_1D.append(s[0]) + i_src_1D.append(i) + i_src_1D = list(reversed(i_src_1D)) + x_1D = list(reversed(x_1D)) + print 'x_1D', x_1D + def get_edges_from_cen(bincen): + hwidth = 0.5*(bincen[1] - bincen[0]) + return np.append([bincen[0]-hwidth], bincen[:]+hwidth) + be = get_edges_from_cen(x_1D) + + r_data = np.full((data.shape[0], data.shape[2], len(i_src_1D), data.shape[3], data.shape[4]), np.nan) + for idim in xrange(data.shape[0]): + for i1D, isrc in enumerate(i_src_1D): + for isce in xrange(data.shape[2]): + r_data[idim][isce][i1D] = data[idim][isrc][isce] + r_data = ma.masked_invalid(r_data) + print r_data.shape, 'r_data.shape' + + for idim, dim in enumerate(args.dimensions): + print '|||| DIM = {0}, {1}'.format(idim, dim) + fig = plt.figure(figsize=(4, 4)) + ax = fig.add_subplot(111) + ax.tick_params(axis='x', labelsize=12) + ax.tick_params(axis='y', labelsize=12) + ax.set_xlabel(r'$x$', fontsize=18) + ax.set_ylabel(r'${\rm New\:Physics\:Scale}\:[\:{\rm log}_{10} (\Lambda_{d=6}^{-1}\:/\:{\rm GeV}^{-2})\: ]$', fontsize=18) + ax.set_xlim(0, 1) + for isce in xrange(r_data.shape[1]): + print '|||| SCEN = {0}'.format(str_enum(Texture(isce))) + H = np.full(len(x_1D), np.nan) + for ix, x in enumerate(x_1D): + print '|||| X = {0}'.format(x) + scales, statistic = ma.compress_rows(r_data[idim][isce][ix]).T + print 'scales', scales + print 'statistic', statistic + + try: + tck, u = splprep([scales, statistic], s=0) + except: + continue + # sc, st = splev(np.linspace(0, 1, 10000), tck) + sc, st = splev(np.linspace(0, 1, 20), tck) + scales_rm = sc[sc >= scales[1]] + statistic_rm = st[sc >= scales[1]] + # scales_rm = sc + # statistic_rm = st + + min_idx = np.argmin(scales) + null = statistic[min_idx] + if args.stat_method is StatCateg.BAYESIAN: + reduced_ev = -(statistic_rm - null) + print 'reduced_ev', reduced_ev + al = scales_rm[reduced_ev > np.log(10**(bayes_K))] + else: + assert 0 + if len(al) == 0: + print 'No points for DIM {0} X {1}!'.format(dim, x) + continue + if reduced_ev[-1] < np.log(10**(bayes_K)) - 0.1: + print 'Peaked contour does not exclude large scales! For ' \ + 'DIM {0} X {1}!'.format(dim, x) + continue + lim = al[0] + print 'limit = {0}'.format(lim) + + H[ix] = lim + H = ma.masked_invalid(H) + H_0 = np.concatenate([[H[0]], H]) + ax.step(be, H_0, linewidth=2, + label=texture_label(Texture(isce)), linestyle='-', + drawstyle='steps-pre') + print 'H', H + print + for ymaj in ax.yaxis.get_majorticklocs(): + ax.axhline(y=ymaj, ls=':', color='gray', alpha=0.3, linewidth=1) + for xmaj in be: + ax.axvline(x=xmaj, ls=':', color='gray', alpha=0.3, linewidth=1) + ax.legend() + misc_utils.make_dir(outfile) + for of in outformat: + print 'Saving plot as {0}'.format(outfile+'_DIM{0}.'.format(dim)+of) + fig.savefig(outfile+'_DIM{0}.'.format(dim)+of, bbox_inches='tight', dpi=150) + |
