aboutsummaryrefslogtreecommitdiffstats
path: root/utils/multinest.py
blob: fdd87cd7062ab56674828122232aca8579c9c14d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# author : S. Mandalia
#          s.p.mandalia@qmul.ac.uk
#
# date   : April 19, 2018

"""
Useful functions to use MultiNest for the BSM flavour ratio analysis
"""

from __future__ import absolute_import, division

from functools import partial

import numpy as np

from pymultinest import analyse, run

from utils import likelihood
from utils.misc import gen_identifier, make_dir, solve_ratio


def CubePrior(cube, ndim, n_params):
    pass


def lnProb(cube, ndim, n_params, mn_paramset, llh_paramset, asimov_paramset,
           args, fitter):
    if ndim != len(mn_paramset):
        raise AssertionError(
            'Length of MultiNest scan paramset is not the same as the input '
            'params\ncube={0}\nmn_paramset]{1}'.format(cube, mn_paramset)
        )
    pranges = mn_paramset.seeds
    for i in xrange(ndim):
        mn_paramset[i].value = (pranges[i][1]-pranges[i][0])*cube[i] + pranges[i][0]
    for pm in mn_paramset.names:
        llh_paramset[pm].value = mn_paramset[pm].value
    theta = llh_paramset.values
    # print 'llh_paramset', llh_paramset
    llh = likelihood.ln_prob(
        theta=theta,
        args=args,
        asimov_paramset=asimov_paramset,
        llh_paramset=llh_paramset,
        fitter=fitter
    )
    # print 'llh', llh
    return llh


def mn_argparse(parser):
    parser.add_argument(
        '--mn-live-points', type=int, default=3000,
        help='Number of live points for MultiNest evaluations'
    )
    parser.add_argument(
        '--mn-tolerance', type=float, default=0.01,
        help='Tolerance for MultiNest'
    )
    parser.add_argument(
        '--mn-output', type=str, default='./mnrun/',
        help='Folder to store MultiNest evaluations'
    )


def mn_evidence(mn_paramset, llh_paramset, asimov_paramset, args, fitter,
                identifier='mn'):
    """Run the MultiNest algorithm to calculate the evidence."""
    n_params = len(mn_paramset)

    for n in mn_paramset.names:
        llh_paramset[n].value = mn_paramset[n].value

    lnProbEval = partial(
        lnProb,
        mn_paramset     = mn_paramset,
        llh_paramset    = llh_paramset,
        asimov_paramset = asimov_paramset,
        args            = args,
        fitter          = fitter
    )

    # prefix = './mnrun/DIM{0}/{1}_{2}_{3:>010}_'.format(
    #     args.dimension, args.likelihood, gen_identifier(args), np.random.randint(0, 2**30)
    # )
    llh = '{0}'.format(args.likelihood).split('.')[1]
    data = '{0}'.format(args.data).split('.')[1]
    sr1, sr2, sr3 = solve_ratio(args.source_ratio)
    prefix = './mnrun/DIM{0}/{1}/{2}/s{3}{4}{5}/{6}'.format(
        args.dimension, data, llh, sr1, sr2, sr3, identifier
    )
    make_dir(prefix)
    print 'Running evidence calculation for {0}'.format(prefix)
    run(
        LogLikelihood              = lnProbEval,
        Prior                      = CubePrior,
        n_dims                     = n_params,
        n_live_points              = args.mn_live_points,
        evidence_tolerance         = args.mn_tolerance,
        outputfiles_basename       = prefix,
        importance_nested_sampling = True,
        resume                     = False,
        verbose                    = True
    )

    analyser = analyse.Analyzer(
        outputfiles_basename=prefix, n_params=n_params
    )
    return analyser.get_stats()['global evidence']