diff options
author | Anthony LaTorre <tlatorre9@gmail.com> | 2011-08-25 21:42:42 -0400 |
---|---|---|
committer | Anthony LaTorre <tlatorre9@gmail.com> | 2011-08-25 21:42:42 -0400 |
commit | e75eda8a637c01c34c71063b91a86845cc1c5beb (patch) | |
tree | d525cdaeda966c878e5387bf327f7e9aee643afc | |
parent | fa8d1082f9d989f2a3819540a9bf30dc67618709 (diff) | |
parent | b8e7b443242c716c12006442c2738e09ed77c0c9 (diff) | |
download | chroma-e75eda8a637c01c34c71063b91a86845cc1c5beb.tar.gz chroma-e75eda8a637c01c34c71063b91a86845cc1c5beb.tar.bz2 chroma-e75eda8a637c01c34c71063b91a86845cc1c5beb.zip |
merge
-rw-r--r-- | fileio/root.C | 50 | ||||
-rw-r--r-- | fileio/root.py | 137 | ||||
-rw-r--r-- | gpu.py | 124 | ||||
-rw-r--r-- | likelihood.py | 76 | ||||
-rwxr-xr-x | sim.py | 29 | ||||
-rw-r--r-- | src/daq.cu | 98 | ||||
-rw-r--r-- | tests/test_fileio.py | 73 |
7 files changed, 530 insertions, 57 deletions
diff --git a/fileio/root.C b/fileio/root.C index 7052a71..07e50a1 100644 --- a/fileio/root.C +++ b/fileio/root.C @@ -11,15 +11,18 @@ struct Photon { double wavelength; // nm unsigned int history; int last_hit_triangle; + + ClassDef(Photon, 1); }; struct Track { std::string particle; - double time; TVector3 position; TVector3 direction; double start_time; double total_energy; + + ClassDef(Track, 1); }; struct MC { @@ -34,6 +37,7 @@ struct MC { std::vector<Photon> photon_start; std::vector<Photon> photon_stop; + ClassDef(MC, 1); }; struct Channel { @@ -42,23 +46,30 @@ struct Channel { double time; double charge; unsigned int mc_history; + + ClassDef(Channel, 1); }; struct Event { int event_id; MC mc; int nhit; + int max_channel_id; std::vector<Channel> channel; + ClassDef(Event, 1); + // Populate arrays of length nentries with hit, time, and charge // information, indexed by channel ID void get_channels(unsigned int nentries, int *hit, float *time, - float *charge) + float *charge, unsigned int *mc_history=0) { for (unsigned int i=0; i < nentries; i++) { hit[i] = 0; time[i] = -1e9f; charge[i] = -1e9f; + if (mc_history) + mc_history[i] = 0; } for (unsigned int i=0; i < channel.size(); i++) { @@ -68,18 +79,45 @@ struct Event { hit[channel_id] = 1; time[channel_id] = channel[i].time; charge[channel_id] = channel[i].charge; + if (mc_history) + mc_history[channel_id] = channel[i].mc_history; } } } }; -void fill_photons(Event *ev, bool start, +void get_photons(const std::vector<Photon> &photons, float *positions, + float *directions, float *polarizations, float *wavelengths, + float *times, unsigned int *histories, int *last_hit_triangles) +{ + for (unsigned int i=0; i < photons.size(); i++) { + const Photon &photon = photons[i]; + positions[3*i] = photon.position.X(); + positions[3*i+1] = photon.position.Y(); + positions[3*i+2] = photon.position.Z(); + + directions[3*i] = photon.direction.X(); + directions[3*i+1] = photon.direction.Y(); + directions[3*i+2] = photon.direction.Z(); + + polarizations[3*i] = photon.polarization.X(); + polarizations[3*i+1] = photon.polarization.Y(); + polarizations[3*i+2] = photon.polarization.Z(); + + wavelengths[i] = photon.wavelength; + times[i] = photon.time; + histories[i] = photon.history; + last_hit_triangles[i] = photon.last_hit_triangle; + } +} + + +void fill_photons(std::vector<Photon> &photons, unsigned int nphotons, float *pos, float *dir, float *pol, float *wavelength, float *t0, - int *histories=0, int *last_hit_triangle=0) + unsigned int *histories=0, int *last_hit_triangle=0) { - std::vector<Photon> &photons = start ? ev->mc.photon_start : ev->mc.photon_stop; photons.resize(nphotons); for (unsigned int i=0; i < nphotons; i++) { @@ -107,6 +145,8 @@ void fill_hits(Event *ev, unsigned int nchannels, float *time, { ev->channel.resize(0); ev->nhit = 0; + ev->max_channel_id = nchannels - 1; + Channel ch; for (unsigned int i=0; i < nchannels; i++) { if (time[i] < 1e8) { diff --git a/fileio/root.py b/fileio/root.py index 1fa2425..4c9d9bb 100644 --- a/fileio/root.py +++ b/fileio/root.py @@ -7,6 +7,129 @@ ROOT.gROOT.ProcessLine('.L '+os.path.join(os.path.dirname(__file__), 'root.C+g') import ROOT import chroma.event as event +def tvector3_to_ndarray(vec): + '''Convert a ROOT.TVector3 into a numpy np.float32 array''' + return np.array((vec.X(), vec.Y(), vec.Z()), dtype=np.float32) + +def make_photon_with_arrays(size): + '''Returns a new chroma.event.Photons object for `size` number of + photons with empty arrays set for all the photon attributes.''' + return event.Photons(positions=np.empty((size,3), dtype=np.float32), + directions=np.empty((size,3), dtype=np.float32), + polarizations=np.empty((size,3), dtype=np.float32), + wavelengths=np.empty(size, dtype=np.float32), + times=np.empty(size, dtype=np.float32), + histories=np.empty(size, dtype=np.uint32), + last_hit_triangles=np.empty(size, dtype=np.int32)) + + +def root_event_to_python_event(ev): + '''Returns a new chroma.event.Event object created from the + contents of the ROOT event `ev`.''' + pyev = event.Event(ev.event_id) + + # MC + pyev.particle_name = str(ev.mc.particle) + pyev.gen_position = tvector3_to_ndarray(ev.mc.gen_position) + pyev.gen_direction = tvector3_to_ndarray(ev.mc.gen_direction) + pyev.gen_total_energy = ev.mc.gen_total_energy + + pyev.nphoton = ev.mc.nphoton + + for subtrack in ev.mc.subtrack: + pysubtrack = event.Subtrack(str(subtrack.particle), + tvector3_to_ndarray(subtrack.position), + tvector3_to_ndarray(subtrack.direction), + subtrack.start_time, + subtrack.total_energy) + pyev.subtracks.append(pysubtrack) + + # photon start + if ev.mc.photon_start.size() > 0: + photons = make_photon_with_arrays(ev.mc.photon_start.size()) + ROOT.get_photons(ev.mc.photon_start, photons.positions.ravel(), photons.directions.ravel(), + photons.polarizations.ravel(), photons.wavelengths, photons.times, + photons.histories, photons.last_hit_triangles) + pyev.photon_start = photons + + # photon stop + if ev.mc.photon_stop.size() > 0: + photons = make_photon_with_arrays(ev.mc.photon_stop.size()) + ROOT.get_photons(ev.mc.photon_stop, photons.positions.ravel(), photons.directions.ravel(), + photons.polarizations.ravel(), photons.wavelengths, photons.times, + photons.histories, photons.last_hit_triangles) + pyev.photon_stop = photons + + # hits + max_channel_id = ev.max_channel_id + hit = np.empty(shape=max_channel_id+1, dtype=np.int32) + t = np.empty(shape=max_channel_id+1, dtype=np.float32) + q = np.empty(shape=max_channel_id+1, dtype=np.float32) + histories = np.empty(shape=max_channel_id+1, dtype=np.uint32) + + ev.get_channels(max_channel_id+1, hit, t, q, histories) + pyev.channels = event.Channels(hit.astype(bool), t, q, histories) + return pyev + +class RootReader(object): + '''Reader of Chroma events from a ROOT file. This class can be used to + navigate up and down the file linearly or in a random access fashion. + All returned events are instances of the chroma.event.Event class. + + It implements the iterator protocol, so you can do + + for ev in RootReader('electron.root'): + # process event here + ''' + + def __init__(self, filename): + '''Open ROOT file named `filename` containing TTree `T`.''' + self.f = ROOT.TFile(filename) + self.T = self.f.T + self.i = -1 + + def __len__(self): + '''Returns number of events in this file.''' + return self.T.GetEntries() + + def next(self): + '''Return the next event in the file. Raises StopIteration + when you get to the end.''' + if self.i + 1 >= len(self): + raise StopIteration + + self.i += 1 + self.T.GetEntry(self.i) + return root_event_to_python_event(self.T.ev) + + def prev(self): + '''Return the next event in the file. Raises StopIteration if + that would go past the beginning.''' + if self.i <= 0: + self.i = -1 + raise StopIteration + + self.i -= 1 + self.T.GetEntry(self.i) + return root_event_to_python_event(self.T.ev) + + def current(self): + '''Return the current event in the file.''' + self.T.GetEntry(self.i) # just in case? + return root_event_to_python_event(self.T.ev) + + def jump_to(self, index): + '''Return the event at `index`. Updates current location.''' + if index < 0 or index >= len(self): + raise IndexError + + self.T.GetEntry(self.i) + return root_event_to_python_event(self.T.ev) + + def index(self): + '''Return the current event index''' + return self.i + class RootWriter(object): def __init__(self, filename): self.filename = filename @@ -28,20 +151,22 @@ class RootWriter(object): if pyev.photon_start is not None: photons = pyev.photon_start - ROOT.fill_photons(self.ev, True, + ROOT.fill_photons(self.ev.mc.photon_start, len(photons.positions), np.ravel(photons.positions), np.ravel(photons.directions), np.ravel(photons.polarizations), - photons.wavelengths, photons.times) + photons.wavelengths, photons.times, + photons.histories, photons.last_hit_triangles) if pyev.photon_stop is not None: - photons = photon_stop - ROOT.fill_photons(self.ev, True, + photons = pyev.photon_stop + ROOT.fill_photons(self.ev.mc.photon_stop, len(photons.positions), np.ravel(photons.positions), np.ravel(photons.directions), np.ravel(photons.polarizations), - photons.wavelengths, photons.times) + photons.wavelengths, photons.times, + photons.histories, photons.last_hit_triangles) self.ev.mc.subtrack.resize(0) if pyev.subtracks is not None: @@ -53,7 +178,7 @@ class RootWriter(object): self.ev.mc.subtrack[i].start_time = subtrack.start_time self.ev.mc.subtrack[i].total_energy = subtrack.total_energy - ROOT.fill_hits(self.ev, len(pyev.hits.t), pyev.hits.t, pyev.hits.q, pyev.hits.histories) + ROOT.fill_hits(self.ev, len(pyev.channels.t), pyev.channels.t, pyev.channels.q, pyev.channels.histories) self.T.Fill() def close(self): @@ -115,7 +115,8 @@ class GPU(object): self.daq_module = SourceModule(chroma.src.daq, options=cuda_options, no_extern_c=True) self.daq_funcs = CUDAFuncs(self.daq_module, ['reset_earliest_time_int', 'run_daq', - 'convert_sortable_int_to_float', 'bin_hits']) + 'convert_sortable_int_to_float', 'bin_hits', + 'accumulate_pdf_eval']) def print_device_usage(self): print 'device usage:' @@ -422,5 +423,126 @@ class GPU(object): '''Returns the 1D hitcount array and the 3D [channel, time, charge] histogram''' return self.hitcount_gpu.get(), self.pdf_gpu.get() + def setup_pdf_eval(self, event_hit, event_time, event_charge, + min_twidth, trange, min_qwidth, qrange, min_bin_content=10, + time_only=True): + '''Setup GPU arrays to compute PDF values for the given event. + The pdf_eval calculation allows the PDF to be evaluated at a + single point for each channel as the Monte Carlo is run. The + effective bin size will be as small as (`min_twidth`, + `min_qwidth`) around the point of interest, but will be large + enough to ensure that `min_bin_content` Monte Carlo events + fall into the bin. + + event_hit: ndarray + Hit or not-hit status for each channel in the detector. + event_time: ndarray + Hit time for each channel in the detector. If channel + not hit, the time will be ignored. + event_charge: ndarray + Integrated charge for each channel in the detector. + If channel not hit, the charge will be ignored. + + min_twidth: float + Minimum bin size in the time dimension + trange: (float, float) + Range of time dimension in PDF + min_qwidth: float + Minimum bin size in charge dimension + qrange: (float, float) + Range of charge dimension in PDF + min_bin_content: int + The bin will be expanded to include at least this many events + time_only: bool + If True, only the time observable will be used in the PDF. + ''' + self.event_hit_gpu = gpuarray.to_gpu(event_hit.astype(np.uint32)) + self.event_time_gpu = gpuarray.to_gpu(event_time.astype(np.float32)) + self.event_charge_gpu = gpuarray.to_gpu(event_charge.astype(np.float32)) + + self.eval_hitcount_gpu = gpuarray.zeros(len(event_hit), dtype=np.uint32) + self.eval_bincount_gpu = gpuarray.zeros(len(event_hit), dtype=np.uint32) + self.nearest_mc_gpu = gpuarray.empty(shape=len(event_hit) * min_bin_content, + dtype=np.float32) + self.nearest_mc_gpu.fill(1e9) + + self.min_twidth = min_twidth + self.trange = trange + self.min_qwidth = min_qwidth + self.qrange = qrange + self.min_bin_content = min_bin_content + self.time_only = time_only + + def clear_pdf_eval(self): + '''Reset PDF evaluation counters to start accumulating new Monte Carlo.''' + self.eval_hitcount_gpu.fill(0) + self.eval_bincount_gpu.fill(0) + self.nearest_mc_gpu.fill(1e9) + + def accumulate_pdf_eval(self): + '''Add the most recent results of run_daq() to the PDF evaluation.''' + + self.daq_funcs.accumulate_pdf_eval(np.int32(self.time_only), + np.int32(len(self.event_hit_gpu)), + self.event_hit_gpu, + self.event_time_gpu, + self.event_charge_gpu, + self.earliest_time_gpu, + self.channel_q_gpu, + self.eval_hitcount_gpu, + self.eval_bincount_gpu, + np.float32(self.min_twidth), + np.float32(self.trange[0]), + np.float32(self.trange[1]), + np.float32(self.min_qwidth), + np.float32(self.qrange[0]), + np.float32(self.qrange[1]), + self.nearest_mc_gpu, + np.int32(self.min_bin_content), + block=(self.nthreads_per_block,1,1), + grid=(len(self.earliest_time_gpu)//self.nthreads_per_block+1,1)) + + def get_pdf_eval(self): + evhit = self.event_hit_gpu.get().astype(bool) + hitcount = self.eval_hitcount_gpu.get() + bincount = self.eval_bincount_gpu.get() + + pdf_value = np.zeros(len(hitcount), dtype=float) + pdf_frac_uncert = np.zeros_like(pdf_value) + + # PDF value for high stats bins + high_stats = bincount >= self.min_bin_content + if high_stats.any(): + if self.time_only: + pdf_value[high_stats] = bincount[high_stats].astype(float) / hitcount[high_stats] / self.min_twidth + else: + assert Exception('Unimplemented 2D (time,charge) mode!') + + pdf_frac_uncert[high_stats] = 1.0/np.sqrt(bincount[high_stats]) + + # PDF value for low stats bins + low_stats = ~high_stats & (hitcount > 0) & evhit + + nearest_mc = self.nearest_mc_gpu.get().reshape((len(hitcount), self.min_bin_content)) + + # Deal with the case where we did not even get min_bin_content events in the PDF + # but also clamp the lower range to ensure we don't index by a negative number in 2 lines + last_valid_entry = np.maximum(0, (nearest_mc < 1e9).astype(int).sum(axis=1) - 1) + distance = nearest_mc[np.arange(len(last_valid_entry)),last_valid_entry] + + if low_stats.any(): + if self.time_only: + pdf_value[low_stats] = (last_valid_entry[low_stats] + 1).astype(float) / hitcount[low_stats] / distance[low_stats] / 2.0 + else: + assert Exception('Unimplemented 2D (time,charge) mode!') + + pdf_frac_uncert[low_stats] = 1.0/np.sqrt(last_valid_entry[low_stats] + 1) + + # PDFs with no stats got zero by default during array creation + + print 'high_stats:', high_stats.sum(), 'low_stats', low_stats.sum() + + return hitcount, pdf_value, pdf_value * pdf_frac_uncert + def __del__(self): self.context.pop() diff --git a/likelihood.py b/likelihood.py index 2487442..c797afe 100644 --- a/likelihood.py +++ b/likelihood.py @@ -1,6 +1,6 @@ import numpy as np from histogram import HistogramDD -from uncertainties import ufloat, umath +from uncertainties import ufloat, umath, unumpy from math import sqrt from itertools import izip, compress from chroma.tools import profile_if_possible @@ -8,7 +8,7 @@ from chroma.tools import profile_if_possible class Likelihood(object): "Class to evaluate likelihoods for detector events." def __init__(self, sim, event=None, tbins=100, trange=(-0.5e-9, 999.5e-9), - qbins=10, qrange=(-0.5, 9.5)): + qbins=10, qrange=(-0.5, 9.5), time_only=True): """ Args: - sim: chroma.sim.Simulation @@ -22,14 +22,18 @@ class Likelihood(object): Min and max time to include in PDF - qbins: int, *optional* Number of charge bins in PDF - - qrange: tuple of 2 floats, *optional + - qrange: tuple of 2 floats, *optional* Min and max charge to include in PDF + - time_only: bool, *optional* + Only use time observable (instead of time and charge) in computing + likelihood. Defaults to True. """ self.sim = sim self.tbins = tbins self.trange = trange self.qbins = qbins self.qrange = qrange + self.time_only = time_only if event is not None: self.set_event(event) @@ -47,52 +51,38 @@ class Likelihood(object): will be propagated `nreps` times. """ - hitcount, pdfcount = self.sim.create_pdf(nevals, vertex_generator, - self.tbins, self.trange, - self.qbins, self.qrange, - nreps=nreps) - + hitcount, pdf_prob, pdf_prob_uncert = self.sim.eval_pdf(self.event.channels, + nevals, vertex_generator, + 2.0e-9, self.trange, + 1, self.qrange, + nreps=nreps, + time_only=self.time_only) + # Normalize probabilities and put a floor to keep the log finite hit_prob = hitcount.astype(np.float32) / (nreps * nevals) hit_prob = np.maximum(hit_prob, 0.5 / (nreps*nevals)) - # Normalize t,q PDFs - pdf = pdfcount.astype(np.float32) - norm = pdf.sum(axis=2).sum(axis=1) / (self.qrange[1] - self.qrange[0]) / (self.trange[1] - self.trange[0]) + # Set all zero or nan probabilities to limiting PDF value + bad_value = (pdf_prob <= 0.0) | np.isnan(pdf_prob) + if self.time_only: + pdf_floor = 1.0 / (self.trange[1] - self.trange[0]) + else: + pdf_floor = 1.0 / (self.trange[1] - self.trange[0]) / (self.qrange[1] - self.qrange[0]) + pdf_prob[bad_value] = pdf_floor + pdf_prob_uncert[bad_value] = pdf_floor - pdf /= np.maximum(norm[:,np.newaxis,np.newaxis], 1) - - # NLL calculation: note that negation is at the end + print 'channels with no data:', (bad_value & self.event.channels.hit).astype(int).sum() + # NLL calculation: note that negation is at the end # Start with the probabilties of hitting (or not) the channels - log_likelihood = ufloat(( - np.log(hit_prob[self.event.channels.hit]).sum() - +np.log(1.0-hit_prob[~self.event.channels.hit]).sum(), - 0.0)) + hit_channel_prob = np.log(hit_prob[self.event.channels.hit]).sum() + np.log(1.0-hit_prob[~self.event.channels.hit]).sum() + hit_channel_prob_uncert = ( (nevals * nreps * hit_prob * (1.0 - hit_prob)) / hit_prob**2 ).sum()**0.5 + log_likelihood = ufloat((hit_channel_prob, 0.0)) # Then include the probability densities of the observed charges and times - for i, t, q in compress(izip(range(self.event.channels.hit.size),self.event.channels.t,self.event.channels.q),self.event.channels.hit): - tbin = (t - self.trange[0]) / (self.trange[1] - self.trange[0]) * self.tbins - qbin = (q - self.trange[0]) / (self.qrange[1] - self.qrange[0]) * self.qbins - if tbin < 0 or tbin >= self.tbins or qbin < 0 or qbin >= self.qbins: - probability = 0.0 - nentries = 0 - else: - probability = pdf[i, tbin, qbin] - # If probability is zero, avoid warning here, but catch the problem - # in the next if block - probability_err = probability / max(1, sqrt(pdfcount[i, tbin, qbin])) - nentries = norm[i] - - if probability == 0.0 or np.isnan(probability): - if nentries > 0: - probability = 0.5/nentries - probability_err = probability - else: - probability = 0.5/(self.qrange[1] - self.qrange[0])/(self.trange[1] - self.trange[0]) - probability_err = probability - - log_likelihood += umath.log(ufloat((probability, probability_err))) + hit_pdf_ufloat = unumpy.uarray((pdf_prob[self.event.channels.hit], + pdf_prob_uncert[self.event.channels.hit])) + log_likelihood += unumpy.log(hit_pdf_ufloat).sum() return -log_likelihood @@ -111,11 +101,11 @@ if __name__ == '__main__': event = sim.simulate(1, constant_particle_gun('e-',(0,0,0),(1,0,0),100.0)).next() - print 'nhit = %i' % np.count_nonzero(event.hits.hit) + print 'nhit = %i' % np.count_nonzero(event.channels.hit) likelihood = Likelihood(sim, event) - for x in np.linspace(-10.0, 10.0, 10*5+1): + for x in np.linspace(-10.0, 10.0, 2*10*5+1): start = time.time() - l = likelihood.eval(constant_particle_gun('e-',(x,0,0),(1,0,0),100.0),100, nreps=10) + l = likelihood.eval(constant_particle_gun('e-',(0,0,0),(1,0,0),100.0),100, nreps=200) print 'x = %5.1f, %s (%1.1f sec)' % (x, l, time.time() - start) @@ -82,8 +82,7 @@ class Simulation(object): ev.photon_stop = self.gpu_worker.get_photons() if run_daq: - ev.hits = self.gpu_worker.get_hits() - ev.channels = ev.hits + ev.channels = self.gpu_worker.get_hits() yield ev @@ -117,6 +116,32 @@ class Simulation(object): self.gpu_worker.add_hits_to_pdf() return self.gpu_worker.get_pdfs() + + + def eval_pdf(self, event_channels, nevents, vertex_generator, + min_twidth, trange, min_qwidth, qrange, + min_bin_content=20, nreps=1, time_only=True): + photon_gen = repeating_iterator(self.photon_generator.generate_events(nevents, vertex_generator), + nreps) + return self.eval_pdf_from_photons(event_channels, nevents*nreps, photon_gen, + min_twidth, trange, min_qwidth, qrange, + min_bin_content=min_bin_content, time_only=time_only) + + def eval_pdf_from_photons(self, event_channels, nevents, photon_generator, + min_twidth, trange, min_qwidth, qrange, + min_bin_content=20, time_only=True): + '''Returns tuple: 1D array of channel hit counts, 1D array of PDF probability densities''' + self.gpu_worker.setup_pdf_eval(event_channels.hit, event_channels.t, event_channels.q, + min_twidth, trange, min_qwidth, qrange, + min_bin_content=min_bin_content, time_only=True) + + for ev in itertools.islice(photon_generator, nevents): + self.gpu_worker.load_photons(ev.photon_start) + self.gpu_worker.propagate() + self.gpu_worker.run_daq() + self.gpu_worker.accumulate_pdf_eval() + + return self.gpu_worker.get_pdf_eval() @profile_if_possible @@ -106,5 +106,103 @@ __global__ void bin_hits(int nchannels, pdf[bin] += 1; } } + +__global__ void accumulate_pdf_eval(int time_only, + int nchannels, + unsigned int *event_hit, + float *event_time, + float *event_charge, + float *mc_time, + unsigned int *mc_charge, // quirk of DAQ! + unsigned int *hitcount, + unsigned int *bincount, + float min_twidth, float tmin, float tmax, + float min_qwidth, float qmin, float qmax, + float *nearest_mc, + int min_bin_content) +{ + int id = threadIdx.x + blockDim.x * blockIdx.x; + + if (id >= nchannels) + return; + + // Was this channel hit in the Monte Carlo? + float channel_mc_time = mc_time[id]; + if (channel_mc_time >= 1e8) + return; // Nothing else to do + + // Is this channel inside the range of the PDF? + float distance; + int channel_bincount = bincount[id]; + if (time_only) { + if (channel_mc_time < tmin || channel_mc_time > tmax) + return; // Nothing else to do + + // This MC information is contained in the PDF + hitcount[id] += 1; + + // Was this channel hit in the event-of-interest? + int channel_event_hit = event_hit[id]; + if (!channel_event_hit) + return; // No need to update PDF value for unhit channel + + // Are we inside the minimum size bin? + float channel_event_time = event_time[id]; + distance = fabsf(channel_mc_time - channel_event_time); + if (distance < min_twidth/2.0) + bincount[id] = channel_bincount + 1; + + } else { // time and charge PDF + float channel_mc_charge = mc_charge[id]; // int->float conversion because DAQ just returns an integer + + if (channel_mc_time < tmin || channel_mc_time > tmax || + channel_mc_charge < qmin || channel_mc_charge > qmax) + return; // Nothing else to do + + // This MC information is contained in the PDF + hitcount[id] += 1; + + // Was this channel hit in the event-of-interest? + int channel_event_hit = event_hit[id]; + if (!channel_event_hit) + return; // No need to update PDF value for unhit channel + + // Are we inside the minimum size bin? + float channel_event_time = event_time[id]; + float channel_event_charge = event_charge[id]; + float normalized_time_distance = fabsf(channel_event_time - channel_mc_time)/min_twidth/2.0; + float normalized_charge_distance = fabsf(channel_event_charge - channel_mc_charge)/min_qwidth/2.0; + distance = sqrt(normalized_time_distance*normalized_time_distance + normalized_charge_distance*normalized_charge_distance); + + if (distance < 1.0f) + bincount[id] = channel_bincount + 1; + } + + // Do we need to keep updating the nearest_mc list? + if (channel_bincount + 1 >= min_bin_content) + return; // No need to perform insertion into nearest_mc because bincount is a better estimate of the PDF value + + // insertion sort the distance into the array of nearest MC points + int offset = min_bin_content * id; + int entry = min_bin_content - 1; + + // If last entry less than new entry, nothing to update + if (distance > nearest_mc[offset + entry]) + return; + + // Find where to insert the new entry while sliding the rest + // to the right + entry--; + while (entry >= 0) { + if (nearest_mc[offset+entry] >= distance) + nearest_mc[offset+entry+1] = nearest_mc[offset+entry]; + else + break; + entry--; + } + + nearest_mc[offset+entry+1] = distance; +} + } // extern "C" diff --git a/tests/test_fileio.py b/tests/test_fileio.py new file mode 100644 index 0000000..d21c9ff --- /dev/null +++ b/tests/test_fileio.py @@ -0,0 +1,73 @@ +import unittest +from chroma.fileio import root +from chroma import event +import numpy as np + +class TestFileIO(unittest.TestCase): + def test_file_write_and_read(self): + ev = event.Event(1, 'e-', (0,0,1), (1,0,0), 15) + + photon_start = root.make_photon_with_arrays(1) + photon_start.positions[0] = (1,2,3) + photon_start.directions[0] = (4,5,6) + photon_start.polarizations[0] = (7,8,9) + photon_start.wavelengths[0] = 400.0 + photon_start.times[0] = 100.0 + photon_start.histories[0] = 20 + photon_start.last_hit_triangles[0] = 5 + ev.photon_start = photon_start + + photon_stop = root.make_photon_with_arrays(1) + photon_stop.positions[0] = (1,2,3) + photon_stop.directions[0] = (4,5,6) + photon_stop.polarizations[0] = (7,8,9) + photon_stop.wavelengths[0] = 400.0 + photon_stop.times[0] = 100.0 + photon_stop.histories[0] = 20 + photon_stop.last_hit_triangles[0] = 5 + ev.photon_stop = photon_stop + + ev.nphoton = 1 + + ev.subtracks.append(event.Subtrack('e-', (40,30,20), (-1, -2, -3), 400, 800)) + + channels = event.Channels(hit=np.array([True, False]), + t=np.array([20.0, 1e9], dtype=np.float32), + q=np.array([2.0, 0.0], dtype=np.float32), + histories=np.array([8, 32], dtype=np.uint32)) + ev.channels = channels + + filename = '/tmp/chroma-filewritertest.root' + writer = root.RootWriter(filename) + writer.write_event(ev) + writer.close() + + # Exercise the RootReader methods + reader = root.RootReader(filename) + self.assertEquals(len(reader), 1) + + self.assertRaises(StopIteration, reader.prev) + + reader.next() + + self.assertEqual(reader.index(), 0) + self.assertRaises(StopIteration, reader.next) + + reader.jump_to(0) + + # Enough screwing around, let's get the one event in the file + newev = reader.current() + + + # Now check if everything is correct in the event + for attribute in ['event_id', 'particle_name','gen_total_energy']: + self.assertEqual(getattr(ev, attribute), getattr(newev, attribute), 'compare %s' % attribute) + for attribute in ['gen_position', 'gen_direction']: + self.assertTrue(np.allclose(getattr(ev, attribute), getattr(newev, attribute)), 'compare %s' % attribute) + + for attribute in ['positions', 'directions', 'wavelengths', 'polarizations', 'times', + 'histories', 'last_hit_triangles']: + self.assertTrue(np.allclose(getattr(ev.photon_start, attribute), + getattr(newev.photon_start, attribute)), 'compare %s' % attribute) + self.assertTrue(np.allclose(getattr(ev.photon_stop, attribute), + getattr(newev.photon_stop, attribute)), 'compare %s' % attribute) |