aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortlatorre <tlatorre@uchicago.edu>2021-01-05 11:18:05 -0600
committertlatorre <tlatorre@uchicago.edu>2021-01-05 11:18:05 -0600
commit32297413483bb0dec96349996706dc025a9f29bf (patch)
treeec2430f990e383b0181f22d5192e57ac1ec5416e
parentda2610ffac5fc38df32f9e159cba6d5f56136938 (diff)
downloadsddm-32297413483bb0dec96349996706dc025a9f29bf.tar.gz
sddm-32297413483bb0dec96349996706dc025a9f29bf.tar.bz2
sddm-32297413483bb0dec96349996706dc025a9f29bf.zip
add --fast
-rwxr-xr-xutils/dm-search34
1 files changed, 22 insertions, 12 deletions
diff --git a/utils/dm-search b/utils/dm-search
index 533d2f7..8a42c87 100755
--- a/utils/dm-search
+++ b/utils/dm-search
@@ -228,7 +228,7 @@ def get_data_hists(data,bins,scale=1.0):
data_hists[id] = np.histogram(data[data.id == id].ke.values,bins=bins[id])[0]*scale
return data_hists
-def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_factor, muon_scale_factor, bins, reweight=False, print_nll=False, dm_sample=None):
+def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_factor, muon_scale_factor, bins, reweight=False, print_nll=False, dm_sample=None, fast=False):
df_dict = dict(tuple(mc.groupby('id')))
for id in (20,22,2020,2022,2222):
if id not in df_dict:
@@ -248,17 +248,26 @@ def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_fac
for id in (20,22,2020,2022,2222):
df_dict_dm[id] = dm_sample[dm_sample.id == id]
- def nll(x, grad=None):
- if (x < PRIORS_LOW).any() or (x > PRIORS_HIGH).any():
- return np.inf
+
+ if fast:
+ x = np.array(PRIORS)
- # Get the Monte Carlo histograms. We need to do this within the
- # likelihood function since we apply the energy resolution parameters
- # to the Monte Carlo.
mc_hists = get_mc_hists_fast(df_dict,x,bins,scale=1/atmo_scale_factor,reweight=reweight)
muon_hists = get_mc_hists_fast(df_dict_muon,x,bins,scale=1/muon_scale_factor)
dm_hists = get_mc_hists_fast(df_dict_dm,x,bins,scale=1/len(dm_sample))
+ def nll(x, grad=None):
+ if (x < PRIORS_LOW).any() or (x > PRIORS_HIGH).any():
+ return np.inf
+
+ if not fast:
+ # Get the Monte Carlo histograms. We need to do this within the
+ # likelihood function since we apply the energy resolution
+ # parameters to the Monte Carlo.
+ mc_hists = get_mc_hists_fast(df_dict,x,bins,scale=1/atmo_scale_factor,reweight=reweight)
+ muon_hists = get_mc_hists_fast(df_dict_muon,x,bins,scale=1/muon_scale_factor)
+ dm_hists = get_mc_hists_fast(df_dict_dm,x,bins,scale=1/len(dm_sample))
+
# Calculate the negative log of the likelihood of observing the data
# given the fit parameters
@@ -279,7 +288,7 @@ def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_fac
return nll
return nll
-def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll=False,walkers=100,thin=10,refit=True,universe=None):
+def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll=False,walkers=100,thin=10,refit=True,universe=None,fast=False):
"""
Run the fit and return the minimum along with samples from running an MCMC
starting near the minimum.
@@ -300,7 +309,7 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale
if universe is None:
dm_sample = get_dm_sample(DM_SAMPLES,dm_particle_id,dm_mass,dm_energy)
- nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,print_nll,dm_sample=dm_sample)
+ nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,print_nll,dm_sample=dm_sample,fast=fast)
pos = np.empty((walkers, len(PRIORS)),dtype=np.double)
for i in range(pos.shape[0]):
@@ -453,7 +462,7 @@ def get_dm_sample(n,dm_particle_id,dm_mass,dm_energy):
return pd.DataFrame(data)
-def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin,universe=None):
+def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin,universe=None,fast=False):
limits = {}
best_fit = {}
discovery_array = {}
@@ -467,7 +476,7 @@ def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,b
m1 = SNOMAN_MASS[id1]
m2 = SNOMAN_MASS[id2]
dm_energy = dm_mass
- xopt, universe, samples = do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin,universe)
+ xopt, universe, samples = do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin,universe,fast)
data_mc_with_weights = pd.merge(data_mc,weights[universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
@@ -555,6 +564,7 @@ if __name__ == '__main__':
parser.add_argument("--mcpl", nargs='+', required=True, help="GENIE MCPL files")
parser.add_argument("--run-info", required=True, help="run_info.log autosno file")
parser.add_argument("--universe", default=None, help="GENIE universe for systematics")
+ parser.add_argument("--fast", action='store_true', default=False, help="run fast version of likelihood without energy bias and resolution")
args = parser.parse_args()
setup_matplotlib(args.save)
@@ -837,7 +847,7 @@ if __name__ == '__main__':
sys.exit(0)
- limits, best_fit, discovery_array = get_limits(DM_MASSES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin,args.universe)
+ limits, best_fit, discovery_array = get_limits(DM_MASSES,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,args.steps,args.print_nll,args.walkers,args.thin,args.universe,args.fast)
fig = plt.figure()
for color, dm_particle_id in zip(('C0','C1'),(2020,2222)):