From 9dab6b3defa3054f32e814cf35c971089db20261 Mon Sep 17 00:00:00 2001 From: tlatorre Date: Mon, 18 Nov 2019 10:49:43 -0600 Subject: switch to using twice the PSUP reflection time for the time PDF --- src/likelihood.c | 105 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 27 deletions(-) diff --git a/src/likelihood.c b/src/likelihood.c index 2c2f34e..bf4148c 100644 --- a/src/likelihood.c +++ b/src/likelihood.c @@ -650,8 +650,8 @@ double time_cdf(double t, double mu_noise, double mu_indirect, double *mu, size_ if (t < tmean) ; - else if (t < tmean + PSUP_REFLECTION_TIME) - p += mu_indirect*(t-tmean)/PSUP_REFLECTION_TIME; + else if (t < tmean + 2*PSUP_REFLECTION_TIME) + p += mu_indirect*(t-tmean)/(2*PSUP_REFLECTION_TIME); else p += mu_indirect; @@ -665,26 +665,44 @@ double time_cdf(double t, double mu_noise, double mu_indirect, double *mu, size_ return p/mu_total; } +/* Returns the probability that a photon is detected at time `t`. + * + * The probability distribution is the sum of three different components: dark + * noise, indirect light, and direct light. The dark noise is assumed to be + * constant in time. The direct light is assumed to be a delta function around + * the times `ts`, where each element of `ts` comes from a different particle. + * This assumption is probably valid for particles like muons which don't + * scatter much, and the hope is that it is *good enough* for electrons too. + * The probability distribution for indirect light is assumed to be a step + * function lasting 2*PSUP_REFLECTION_TIME past some time `tmean`. + * + * Note: Initially I had the indirect light be a step function which only + * lasted PSUP_REFLECTION_TIME (which is set to the time it takes light to + * cross from one side of the PSUP to the other) because I thought that this + * represented the maximum amount of time it took for light to traverse the + * detector (and the PMT hit time plot for a laserball run has a bump after the + * main peak which lasts for about 80 ns). However, I later realized that for + * events near the edge of the detector, like say a flasher or muon, the + * reflected light from the other side of the detector will take twice as long. + * So, now we use twice the PSUP reflection time. + * + * In the future it might be cool to experiment with new ways of defining the + * time PDF. For example, we could get a much more accurate approximation for + * the reflected light time distribution by using something like a kernel + * density estimator and for each PMT we could loop over the other PMTs and add + * a point to our PDF at the light travel time between the PMTs weighted by the + * second PMT's charge. I think currently this would be way too slow, and it's + * not obvious how much it would help. + * + * The probability returned is calculated by taking the sum of these three + * components and convolving it with a gaussian with standard deviation `sigma` + * which should typically be the PMT transit time spread. */ double time_pdf(double t, double mu_noise, double mu_indirect, double *mu, size_t n, double *ts, double tmean, double *ts_sigma) { - /* Returns the probability that a photon is detected at time `t`. - * - * The probability distribution is the sum of three different components: - * dark noise, indirect light, and direct light. The dark noise is assumed - * to be constant in time. The direct light is assumed to be a delta - * function around the times `ts`, where each element of `ts` comes from a - * different particle. This assumption is probably valid for particles - * like muons which don't scatter much, and the hope is that it is *good - * enough* for electrons too. The probability distribution for indirect - * light is assumed to be a step function past some time `tmean`. - * - * The probability returned is calculated by taking the sum of these three - * components and convolving it with a gaussian with standard deviation - * `sigma` which should typically be the PMT transit time spread. */ size_t i; double p, mu_total; - p = mu_noise/GTVALID + mu_indirect*(erf((t-tmean)/(sqrt(2)*PMT_TTS))-erf((t-tmean-PSUP_REFLECTION_TIME)/(sqrt(2)*PMT_TTS)))/(2*PSUP_REFLECTION_TIME); + p = mu_noise/GTVALID + mu_indirect*(erf((t-tmean)/(sqrt(2)*PMT_TTS))-erf((t-tmean-2*PSUP_REFLECTION_TIME)/(sqrt(2)*PMT_TTS)))/(4*PSUP_REFLECTION_TIME); mu_total = mu_noise + mu_indirect; for (i = 0; i < n; i++) { @@ -696,18 +714,51 @@ double time_pdf(double t, double mu_noise, double mu_indirect, double *mu, size_ return p/mu_total; } +/* Returns the first order statistic for observing a PMT hit at time `t` given + * `n` hits. + * + * The first order statistic is computed from the probability distribution + * above. It's not obvious whether one should take the first order statistic + * before or after convolving with the PMT transit time spread. Since at least + * some of the transit time spread in SNO comes from the different transit + * times across the face of the PMT, it seems better to convolve first which is + * what we do here. In addition, the problem is not analytically tractable if + * you do things the other way around. + * + * Note: Technically we should include a final convolution representing both + * the ECA+PCA uncertainty and the digitization noise. I was initially worried + * about this causing an issue because as you take higher and higher order + * statistics the width of the time PDF becomes smaller and smaller and these + * other uncertainties may become relevant. However, the maximum number of PE + * we should be calculating the time PDF for is when QLX rails which is at + * approximately: + * + * (4096-600)*12/30 ~= 1000 + * + * and the width at that point is ~0.6 ns. The digitization noise is + * approximately 4096/400.0 ~= 0.1 ns and according to Javi: + * + * > The ECA+PCA uncertainties are negligible with respect to the TTS. + * > Basically, the width of the prompt peak for N16 calibration events (mainly + * > SPEs) is 1.6ns, compatible with the PMT TTS. + * > + * > That said, long ago I estimated the precision of the ECA calibration to be + * > about 0.4ns. The PCA delays are pretty stable and precisely measured, so + * > probably negligible compared to the ECA uncertainties. + * + * So therefore, even at the highest possible charge we should still be + * dominated by the width of the PDF. + * + * It is probably too expensive to try and actually calculate another + * convolution at this point since we'd have to do it numerically. I think a + * better option would just be to cut off the first order statistic at some + * point, i.e. to calculate: + * + * logp[j] += log_pt(ev->pmt_hits[i].t, j > MAX_PE_TIME_PDF ? MAX_PE_TIME_PDF : j, mu_noise, mu_indirect_total, &mu[i][0][0], n*3, &ts[i][0][0], ts[i][0][0], &ts_sigma[i][0][0]); + * + */ double log_pt(double t, size_t n, double mu_noise, double mu_indirect, double *mu, size_t n2, double *ts, double tmean, double *ts_sigma) { - /* Returns the first order statistic for observing a PMT hit at time `t` - * given `n` hits. - * - * The first order statistic is computed from the probability distribution - * above. It's not obvious whether one should take the first order - * statistic before or after convolving with the PMT transit time spread. - * Since at least some of the transit time spread in SNO comes from the - * different transit times across the face of the PMT, it seems better to - * convolve first which is what we do here. In addition, the problem is not - * analytically tractable if you do things the other way around. */ if (n == 1) return ln(n) + log(time_pdf(t,mu_noise,mu_indirect,mu,n2,ts,tmean,ts_sigma)); else -- cgit