aboutsummaryrefslogtreecommitdiff
path: root/utils/dm-search
diff options
context:
space:
mode:
Diffstat (limited to 'utils/dm-search')
-rwxr-xr-xutils/dm-search16
1 files changed, 8 insertions, 8 deletions
diff --git a/utils/dm-search b/utils/dm-search
index cd91eab..9e1a136 100755
--- a/utils/dm-search
+++ b/utils/dm-search
@@ -347,13 +347,11 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale
xopt = opt.optimize(x0)
# Get the total number of "universes" simulated in the GENIE reweight tool
- nuniverses = weights['universe'].max()+1
-
- weights_dict = dict(tuple(weights.groupby('universe')))
+ nuniverses = max(weights.keys())+1
nlls = []
for universe in range(nuniverses):
- data_mc_with_weights = pd.merge(data_mc,weights_dict[universe],how='left',on=['run','unique_id'])
+ data_mc_with_weights = pd.merge(data_mc,weights[universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
nll = make_nll(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc_with_weights,atmo_scale_factor,muon_scale_factor,bins,reweight=True,print_nll=print_nll,dm_sample=dm_sample)
@@ -362,7 +360,7 @@ def do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale
universe = np.argmin(nlls)
if refit:
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
+ data_mc_with_weights = pd.merge(data_mc,weights[universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
# Create a new negative log likelihood function with the weighted Monte Carlo.
@@ -470,7 +468,7 @@ def get_limits(dm_masses,data,muon,data_mc,atmo_scale_factor,muon_scale_factor,b
dm_energy = dm_mass
xopt, universe, samples = do_fit(dm_particle_id,dm_mass,dm_energy,data,muon,data_mc,weights,atmo_scale_factor,muon_scale_factor,bins,steps,print_nll,walkers,thin)
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == universe],how='left',on=['run','unique_id'])
+ data_mc_with_weights = pd.merge(data_mc,weights[universe],how='left',on=['run','unique_id'])
data_mc_with_weights.weight = data_mc_with_weights.weight.fillna(1.0)
limit = np.percentile(samples[:,6],90)
@@ -649,6 +647,8 @@ if __name__ == '__main__':
# 15752 154 957 -0.006827
weights = weights[weights.weight > 0]
+ weights = dict(tuple(weights.groupby('universe')))
+
ev_mc = correct_energy_bias(ev_mc)
muon_mc = correct_energy_bias(muon_mc)
@@ -785,8 +785,8 @@ if __name__ == '__main__':
# Set the random seed so we get reproducible results here
np.random.seed(0)
- data_mc_with_weights = pd.merge(data_mc,weights[weights.universe == 0],how='left',on=['run','unique_id'])
- data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[weights.universe == 0],how='left',on=['run','unique_id'])
+ data_mc_with_weights = pd.merge(data_mc,weights[0],how='left',on=['run','unique_id'])
+ data_atm_mc_with_weights = pd.merge(data_atm_mc,weights[0],how='left',on=['run','unique_id'])
discoveries = 0