diff options
author | tlatorre <tlatorre@uchicago.edu> | 2020-12-08 19:45:53 -0600 |
---|---|---|
committer | tlatorre <tlatorre@uchicago.edu> | 2020-12-08 19:45:53 -0600 |
commit | 256ed092dc42d34e03cbd43f39c98e27680aec7a (patch) | |
tree | b5614a7b55eb8acb5eb8297ba7e1eedb9f0d2de6 | |
parent | 32be8ba6ec566e40285a7f6b556219bea4d4d6c0 (diff) | |
download | sddm-256ed092dc42d34e03cbd43f39c98e27680aec7a.tar.gz sddm-256ed092dc42d34e03cbd43f39c98e27680aec7a.tar.bz2 sddm-256ed092dc42d34e03cbd43f39c98e27680aec7a.zip |
speed up dm-search
-rwxr-xr-x | utils/dm-search | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/utils/dm-search b/utils/dm-search index 5e3d7b5..45c60c7 100755 --- a/utils/dm-search +++ b/utils/dm-search @@ -225,7 +225,7 @@ def get_data_hists(data,bins,scale=1.0): data_hists[id] = np.histogram(data[data.id == id].ke.values,bins=bins)[0]*scale return data_hists -def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_factor, muon_scale_factor, bins, reweight=False, print_nll=False): +def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_factor, muon_scale_factor, bins, reweight=False, print_nll=False, dm_sample=None): df_dict = {} for id in (20,22,2020,2022,2222): df_dict[id] = mc[mc.id == id] @@ -236,7 +236,8 @@ def make_nll(dm_particle_id, dm_mass, dm_energy, data, muons, mc, atmo_scale_fac data_hists = get_data_hists(data,bins) - dm_sample = get_dm_sample(DM_SAMPLES,dm_particle_id,dm_mass,dm_energy) + if dm_sample is None: + dm_sample = get_dm_sample(DM_SAMPLES,dm_particle_id,dm_mass,dm_energy) df_dict_dm = {} for id in (20,22,2020,2022,2222): @@ -291,7 +292,9 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale Returns a tuple (xopt, samples) where samples is an array of shape (steps, number of parameters). """ - nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,print_nll) + dm_sample = get_dm_sample(DM_SAMPLES,dm_particle_id,dm_mass,dm_energy) + + nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,print_nll,dm_sample=dm_sample) pos = np.empty((walkers, len(PRIORS)),dtype=np.double) for i in range(pos.shape[0]): @@ -346,7 +349,7 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','evn']) data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0) - nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll) + nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll,dm_sample=dm_sample) nlls.append(nll(xopt)) universe = np.argmin(nlls) @@ -356,7 +359,7 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0) # Create a new negative log likelihood function with the weighted Monte Carlo. - nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll) + nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll,dm_sample=dm_sample) # Now, we refit with the Monte Carlo weighted by the most likely GENIE # systematics. @@ -432,17 +435,16 @@ def get_dm_sample(n,dm_particle_id,dm_mass,dm_energy): energy1 = [] data = np.empty(n,dtype=[('energy1',np.double),('energy2',np.double),('ke',np.double),('id1',np.int),('id2',np.int),('id',np.int)]) for i, (v1, v2) in enumerate(islice(gen_decay(dm_mass,dm_energy,m1,m2),n)): - pos = rand_ball(PSUP_RADIUS) E1 = v1[0] E2 = v2[0] - p1 = np.linalg.norm(v1[1:]) - p2 = np.linalg.norm(v2[1:]) T1 = E1 - m1 T2 = E2 - m2 - # FIXME: Get electron and muon resolution - T1 += norm.rvs(scale=T1*0.05) - T2 += norm.rvs(scale=T2*0.05) data[i] = T1, T2, T1 + T2, id1, id2, dm_particle_id + + # FIXME: Get electron and muon resolution + data['energy1'] += norm.rvs(scale=data['energy1']*0.05) + data['energy2'] += norm.rvs(scale=data['energy2']*0.05) + return pd.DataFrame(data) def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin): |