summaryrefslogtreecommitdiff
path: root/likelihood.py
diff options
context:
space:
mode:
Diffstat (limited to 'likelihood.py')
-rw-r--r--likelihood.py76
1 files changed, 33 insertions, 43 deletions
diff --git a/likelihood.py b/likelihood.py
index 2487442..c797afe 100644
--- a/likelihood.py
+++ b/likelihood.py
@@ -1,6 +1,6 @@
import numpy as np
from histogram import HistogramDD
-from uncertainties import ufloat, umath
+from uncertainties import ufloat, umath, unumpy
from math import sqrt
from itertools import izip, compress
from chroma.tools import profile_if_possible
@@ -8,7 +8,7 @@ from chroma.tools import profile_if_possible
class Likelihood(object):
"Class to evaluate likelihoods for detector events."
def __init__(self, sim, event=None, tbins=100, trange=(-0.5e-9, 999.5e-9),
- qbins=10, qrange=(-0.5, 9.5)):
+ qbins=10, qrange=(-0.5, 9.5), time_only=True):
"""
Args:
- sim: chroma.sim.Simulation
@@ -22,14 +22,18 @@ class Likelihood(object):
Min and max time to include in PDF
- qbins: int, *optional*
Number of charge bins in PDF
- - qrange: tuple of 2 floats, *optional
+ - qrange: tuple of 2 floats, *optional*
Min and max charge to include in PDF
+ - time_only: bool, *optional*
+ Only use time observable (instead of time and charge) in computing
+ likelihood. Defaults to True.
"""
self.sim = sim
self.tbins = tbins
self.trange = trange
self.qbins = qbins
self.qrange = qrange
+ self.time_only = time_only
if event is not None:
self.set_event(event)
@@ -47,52 +51,38 @@ class Likelihood(object):
will be propagated `nreps` times.
"""
- hitcount, pdfcount = self.sim.create_pdf(nevals, vertex_generator,
- self.tbins, self.trange,
- self.qbins, self.qrange,
- nreps=nreps)
-
+ hitcount, pdf_prob, pdf_prob_uncert = self.sim.eval_pdf(self.event.channels,
+ nevals, vertex_generator,
+ 2.0e-9, self.trange,
+ 1, self.qrange,
+ nreps=nreps,
+ time_only=self.time_only)
+
# Normalize probabilities and put a floor to keep the log finite
hit_prob = hitcount.astype(np.float32) / (nreps * nevals)
hit_prob = np.maximum(hit_prob, 0.5 / (nreps*nevals))
- # Normalize t,q PDFs
- pdf = pdfcount.astype(np.float32)
- norm = pdf.sum(axis=2).sum(axis=1) / (self.qrange[1] - self.qrange[0]) / (self.trange[1] - self.trange[0])
+ # Set all zero or nan probabilities to limiting PDF value
+ bad_value = (pdf_prob <= 0.0) | np.isnan(pdf_prob)
+ if self.time_only:
+ pdf_floor = 1.0 / (self.trange[1] - self.trange[0])
+ else:
+ pdf_floor = 1.0 / (self.trange[1] - self.trange[0]) / (self.qrange[1] - self.qrange[0])
+ pdf_prob[bad_value] = pdf_floor
+ pdf_prob_uncert[bad_value] = pdf_floor
- pdf /= np.maximum(norm[:,np.newaxis,np.newaxis], 1)
-
- # NLL calculation: note that negation is at the end
+ print 'channels with no data:', (bad_value & self.event.channels.hit).astype(int).sum()
+ # NLL calculation: note that negation is at the end
# Start with the probabilties of hitting (or not) the channels
- log_likelihood = ufloat((
- np.log(hit_prob[self.event.channels.hit]).sum()
- +np.log(1.0-hit_prob[~self.event.channels.hit]).sum(),
- 0.0))
+ hit_channel_prob = np.log(hit_prob[self.event.channels.hit]).sum() + np.log(1.0-hit_prob[~self.event.channels.hit]).sum()
+ hit_channel_prob_uncert = ( (nevals * nreps * hit_prob * (1.0 - hit_prob)) / hit_prob**2 ).sum()**0.5
+ log_likelihood = ufloat((hit_channel_prob, 0.0))
# Then include the probability densities of the observed charges and times
- for i, t, q in compress(izip(range(self.event.channels.hit.size),self.event.channels.t,self.event.channels.q),self.event.channels.hit):
- tbin = (t - self.trange[0]) / (self.trange[1] - self.trange[0]) * self.tbins
- qbin = (q - self.trange[0]) / (self.qrange[1] - self.qrange[0]) * self.qbins
- if tbin < 0 or tbin >= self.tbins or qbin < 0 or qbin >= self.qbins:
- probability = 0.0
- nentries = 0
- else:
- probability = pdf[i, tbin, qbin]
- # If probability is zero, avoid warning here, but catch the problem
- # in the next if block
- probability_err = probability / max(1, sqrt(pdfcount[i, tbin, qbin]))
- nentries = norm[i]
-
- if probability == 0.0 or np.isnan(probability):
- if nentries > 0:
- probability = 0.5/nentries
- probability_err = probability
- else:
- probability = 0.5/(self.qrange[1] - self.qrange[0])/(self.trange[1] - self.trange[0])
- probability_err = probability
-
- log_likelihood += umath.log(ufloat((probability, probability_err)))
+ hit_pdf_ufloat = unumpy.uarray((pdf_prob[self.event.channels.hit],
+ pdf_prob_uncert[self.event.channels.hit]))
+ log_likelihood += unumpy.log(hit_pdf_ufloat).sum()
return -log_likelihood
@@ -111,11 +101,11 @@ if __name__ == '__main__':
event = sim.simulate(1, constant_particle_gun('e-',(0,0,0),(1,0,0),100.0)).next()
- print 'nhit = %i' % np.count_nonzero(event.hits.hit)
+ print 'nhit = %i' % np.count_nonzero(event.channels.hit)
likelihood = Likelihood(sim, event)
- for x in np.linspace(-10.0, 10.0, 10*5+1):
+ for x in np.linspace(-10.0, 10.0, 2*10*5+1):
start = time.time()
- l = likelihood.eval(constant_particle_gun('e-',(x,0,0),(1,0,0),100.0),100, nreps=10)
+ l = likelihood.eval(constant_particle_gun('e-',(0,0,0),(1,0,0),100.0),100, nreps=200)
print 'x = %5.1f, %s (%1.1f sec)' % (x, l, time.time() - start)