summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStan Seibert <stan@mtrr.org>2011-09-19 14:36:13 -0400
committerStan Seibert <stan@mtrr.org>2011-09-19 14:36:13 -0400
commita21b05e4727403e2e061234289af9e60e6022e5a (patch)
tree7a7d4c5809c370f3e542cfc8cb0bec7c2e4f5cdc
parentcfecff941fc619eb7269128afc62d9c11ae78aff (diff)
parenta38c56ff1e268298568077af7f03c8ac64c6fb82 (diff)
downloadchroma-a21b05e4727403e2e061234289af9e60e6022e5a.tar.gz
chroma-a21b05e4727403e2e061234289af9e60e6022e5a.tar.bz2
chroma-a21b05e4727403e2e061234289af9e60e6022e5a.zip
merge relayout branch
-rw-r--r--.hgignore6
-rw-r--r--.rootlogon.C2
-rw-r--r--LICENSE.txt11
-rwxr-xr-xbin/chroma-cam (renamed from chroma-cam)0
-rwxr-xr-xbin/chroma-sim (renamed from chroma-sim)0
-rw-r--r--chroma/__init__.py (renamed from __init__.py)2
-rwxr-xr-xchroma/benchmark.py (renamed from benchmark.py)1
-rw-r--r--[-rwxr-xr-x]chroma/camera.py (renamed from camera.py)2
-rw-r--r--chroma/color/Illuminanta.csv (renamed from color/Illuminanta.csv)0
-rw-r--r--chroma/color/__init__.py (renamed from color/__init__.py)0
-rw-r--r--chroma/color/chromaticity.py (renamed from color/chromaticity.py)0
-rw-r--r--chroma/color/ciexyz64_1.csv (renamed from color/ciexyz64_1.csv)0
-rw-r--r--chroma/color/colormap.py (renamed from color/colormap.py)0
-rw-r--r--chroma/color/sbrgb10w.csv (renamed from color/sbrgb10w.csv)0
-rw-r--r--chroma/color/scvle_1.csv (renamed from color/scvle_1.csv)0
-rw-r--r--chroma/color/vl1924e_1.csv (renamed from color/vl1924e_1.csv)0
-rw-r--r--chroma/cuda/__init__.py2
-rw-r--r--chroma/cuda/daq.cu (renamed from src/daq.cu)0
-rw-r--r--chroma/cuda/geometry.h (renamed from src/geometry.h)0
-rw-r--r--chroma/cuda/hybrid_render.cu (renamed from src/hybrid_render.cu)0
-rw-r--r--chroma/cuda/intersect.h (renamed from src/intersect.h)0
-rw-r--r--chroma/cuda/linalg.h (renamed from src/linalg.h)0
-rw-r--r--chroma/cuda/matrix.h (renamed from src/matrix.h)0
-rw-r--r--chroma/cuda/mesh.h (renamed from src/mesh.h)0
-rw-r--r--chroma/cuda/photon.h (renamed from src/photon.h)0
-rw-r--r--chroma/cuda/physical_constants.h (renamed from src/physical_constants.h)0
-rw-r--r--chroma/cuda/propagate.cu (renamed from src/propagate.cu)0
-rw-r--r--chroma/cuda/random.h (renamed from src/random.h)0
-rw-r--r--chroma/cuda/render.cu (renamed from src/render.cu)0
-rw-r--r--chroma/cuda/rotate.h (renamed from src/rotate.h)0
-rw-r--r--chroma/cuda/sorting.h (renamed from src/sorting.h)0
-rw-r--r--chroma/cuda/tools.cu (renamed from src/tools.cu)0
-rw-r--r--chroma/cuda/transform.cu (renamed from src/transform.cu)0
-rw-r--r--chroma/detectors/__init__.py (renamed from detectors/__init__.py)0
-rw-r--r--chroma/detectors/lbne.py (renamed from detectors/lbne.py)0
-rw-r--r--chroma/detectors/miniclean.py (renamed from detectors/miniclean.py)0
-rw-r--r--chroma/detectors/miniclean_cassettes.txt (renamed from detectors/miniclean_cassettes.txt)0
-rw-r--r--chroma/detectors/miniclean_polygons.txt (renamed from detectors/miniclean_polygons.txt)0
-rw-r--r--chroma/detectors/sno.py (renamed from detectors/sno.py)0
-rw-r--r--chroma/detectors/sno_av_ascii.stl.bz2 (renamed from detectors/sno_av_ascii.stl.bz2)bin10939133 -> 10939133 bytes
-rw-r--r--chroma/detectors/sno_pmt_info.hdf5 (renamed from detectors/sno_pmt_info.hdf5)bin128083 -> 128083 bytes
-rw-r--r--chroma/event.py (renamed from event.py)0
-rw-r--r--chroma/generator/__init__.py (renamed from generator/__init__.py)0
-rw-r--r--chroma/generator/g4gen.py (renamed from generator/g4gen.py)32
-rw-r--r--chroma/generator/photon.py (renamed from generator/photon.py)1
-rw-r--r--chroma/generator/vertex.py (renamed from generator/vertex.py)0
-rw-r--r--chroma/geometry.py (renamed from geometry.py)0
-rw-r--r--chroma/gpu/__init__.py6
-rw-r--r--chroma/gpu/daq.py49
-rw-r--r--chroma/gpu/geometry.py174
-rw-r--r--chroma/gpu/pdf.py177
-rw-r--r--chroma/gpu/photon.py175
-rw-r--r--chroma/gpu/render.py66
-rw-r--r--chroma/gpu/tools.py180
-rw-r--r--chroma/io/__init__.py (renamed from fileio/__init__.py)0
-rw-r--r--chroma/io/root.C (renamed from fileio/root.C)0
-rw-r--r--chroma/io/root.py (renamed from fileio/root.py)23
-rw-r--r--chroma/itertoolset.py (renamed from itertoolset.py)0
-rw-r--r--chroma/likelihood.py (renamed from likelihood.py)7
-rw-r--r--chroma/make.py (renamed from make.py)0
-rw-r--r--chroma/models/Colbert_HighRes_Brow.STL (renamed from models/Colbert_HighRes_Brow.STL)bin13910384 -> 13910384 bytes
-rw-r--r--chroma/models/Colbert_HighRes_Smile.STL (renamed from models/Colbert_HighRes_Smile.STL)bin8699284 -> 8699284 bytes
-rw-r--r--chroma/models/MiniFig.STL (renamed from models/MiniFig.STL)0
-rw-r--r--chroma/models/companioncube.stl (renamed from models/companioncube.stl)bin862334 -> 862334 bytes
-rw-r--r--chroma/models/liberty.stl (renamed from models/liberty.stl)bin853084 -> 853084 bytes
-rw-r--r--chroma/models/lionsolid.stl (renamed from models/lionsolid.stl)bin3717984 -> 3717984 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Barrel_Insert.STL (renamed from models/portal_gun/Portal_Gun_Barrel_Insert.STL)bin122284 -> 122284 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Bottom_Cover.STL (renamed from models/portal_gun/Portal_Gun_Bottom_Cover.STL)bin3691384 -> 3691384 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Center_Tube.STL (renamed from models/portal_gun/Portal_Gun_Center_Tube.STL)bin45884 -> 45884 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Claw.STL (renamed from models/portal_gun/Portal_Gun_Claw.STL)bin248084 -> 248084 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Left_Half.STL (renamed from models/portal_gun/Portal_Gun_Left_Half.STL)bin2139984 -> 2139984 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Right_Half.STL (renamed from models/portal_gun/Portal_Gun_Right_Half.STL)bin2387784 -> 2387784 bytes
-rw-r--r--chroma/models/portal_gun/Portal_Gun_Top_Claw_Mount.STL (renamed from models/portal_gun/Portal_Gun_Top_Claw_Mount.STL)bin202884 -> 202884 bytes
-rw-r--r--chroma/models/tie_interceptor6.stl (renamed from models/tie_interceptor6.stl)bin442284 -> 442284 bytes
-rw-r--r--chroma/optics.py (renamed from optics.py)0
-rw-r--r--chroma/parabola.py88
-rw-r--r--chroma/pi0.py (renamed from pi0.py)0
-rw-r--r--chroma/project.py (renamed from project.py)3
-rw-r--r--chroma/sample.py (renamed from sample.py)0
-rw-r--r--chroma/scenes/__init__.py (renamed from scenes/__init__.py)0
-rw-r--r--chroma/scenes/checkerboard.py (renamed from scenes/checkerboard.py)0
-rw-r--r--[-rwxr-xr-x]chroma/sim.py (renamed from sim.py)0
-rw-r--r--chroma/solids/__init__.py (renamed from solids/__init__.py)0
-rw-r--r--chroma/solids/hamamatsu_12inch.txt (renamed from solids/hamamatsu_12inch.txt)0
-rw-r--r--chroma/solids/pmts.py (renamed from solids/pmts.py)0
-rw-r--r--chroma/solids/sno_cone.txt (renamed from solids/sno_cone.txt)0
-rw-r--r--chroma/solids/sno_pmt.txt (renamed from solids/sno_pmt.txt)0
-rw-r--r--chroma/solids/sno_pmt_reduced.txt (renamed from solids/sno_pmt_reduced.txt)0
-rw-r--r--chroma/stl.py (renamed from stl.py)0
-rw-r--r--chroma/tools.py (renamed from tools.py)0
-rw-r--r--chroma/transform.py (renamed from transform.py)0
-rw-r--r--distribute_setup.py485
-rw-r--r--gpu.py799
-rw-r--r--setup.py35
-rw-r--r--src/G4chroma.cc (renamed from generator/G4chroma.cc)2
-rw-r--r--src/G4chroma.hh (renamed from generator/G4chroma.hh)0
-rw-r--r--src/mute.cc41
-rw-r--r--test/__init__.py (renamed from src/__init__.py)0
-rw-r--r--test/data/ray_intersection.npz (renamed from tests/data/ray_intersection.npz)bin1920080 -> 1920080 bytes
-rw-r--r--test/linalg_test.cu (renamed from tests/linalg_test.cu)0
-rw-r--r--test/linalg_test.py (renamed from tests/linalg_test.py)2
-rw-r--r--test/matrix_test.cu (renamed from tests/matrix_test.cu)0
-rw-r--r--test/matrix_test.py (renamed from tests/matrix_test.py)3
-rw-r--r--test/rotate_test.cu (renamed from tests/rotate_test.cu)0
-rw-r--r--test/rotate_test.py (renamed from tests/rotate_test.py)2
-rw-r--r--test/test_generator_photon.py (renamed from tests/test_generator_photon.py)0
-rw-r--r--test/test_generator_vertex.py (renamed from tests/test_generator_vertex.py)0
-rw-r--r--test/test_io.py (renamed from tests/test_fileio.py)4
-rw-r--r--test/test_parabola.py66
-rw-r--r--test/test_pdf.py (renamed from tests/test_pdf.py)0
-rw-r--r--test/test_propagation.py (renamed from tests/test_propagation.py)0
-rw-r--r--test/test_ray_intersection.py (renamed from tests/test_ray_intersection.py)2
-rw-r--r--test/test_rayleigh.py (renamed from tests/test_rayleigh.py)0
-rw-r--r--test/test_sample_cdf.cu (renamed from tests/test_sample_cdf.cu)0
-rw-r--r--test/test_sample_cdf.py67
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/test_sample_cdf.py64
117 files changed, 1675 insertions, 904 deletions
diff --git a/.hgignore b/.hgignore
index 88cf9cb..f321c54 100644
--- a/.hgignore
+++ b/.hgignore
@@ -5,4 +5,8 @@ syntax:glob
*.d
*.root
*.lprof
-./doc/build \ No newline at end of file
+./doc/build
+build/
+Chroma.egg-info/
+distribute*.gz
+distribute*.egg
diff --git a/.rootlogon.C b/.rootlogon.C
index 17915c5..ce0159c 100644
--- a/.rootlogon.C
+++ b/.rootlogon.C
@@ -1,3 +1,3 @@
{
- gROOT->ProcessLine(".L fileio/root.C+g");
+ //gROOT->ProcessLine(".L fileio/root.C+g");
}
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..c3a978d
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,11 @@
+Copyright (c) 2011, Anthony LaTorre and Stanley Seibert
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/chroma-cam b/bin/chroma-cam
index 8465aaa..8465aaa 100755
--- a/chroma-cam
+++ b/bin/chroma-cam
diff --git a/chroma-sim b/bin/chroma-sim
index fa106a2..fa106a2 100755
--- a/chroma-sim
+++ b/bin/chroma-sim
diff --git a/__init__.py b/chroma/__init__.py
index 3359bbc..bc6d5cc 100644
--- a/__init__.py
+++ b/chroma/__init__.py
@@ -1,7 +1,7 @@
from camera import Camera, EventViewer, view, build
import geometry
import event
-from fileio import root
+from io import root
import generator
import gpu
import itertoolset
diff --git a/benchmark.py b/chroma/benchmark.py
index c2f7ac9..a45ff90 100755
--- a/benchmark.py
+++ b/chroma/benchmark.py
@@ -8,7 +8,6 @@ import sys
import itertools
from chroma import gpu
-from chroma import camera
from chroma import event
from chroma import sample
from chroma import generator
diff --git a/camera.py b/chroma/camera.py
index 470a1b9..575d13d 100755..100644
--- a/camera.py
+++ b/chroma/camera.py
@@ -18,7 +18,7 @@ from chroma.transform import rotate, make_rotation_matrix
from chroma.sample import uniform_sphere
from chroma.optics import vacuum
from chroma.project import from_film
-from chroma.fileio.root import RootReader
+from chroma.io.root import RootReader
from chroma import make
from chroma import gpu
diff --git a/color/Illuminanta.csv b/chroma/color/Illuminanta.csv
index 3dba86c..3dba86c 100644
--- a/color/Illuminanta.csv
+++ b/chroma/color/Illuminanta.csv
diff --git a/color/__init__.py b/chroma/color/__init__.py
index a8e7550..a8e7550 100644
--- a/color/__init__.py
+++ b/chroma/color/__init__.py
diff --git a/color/chromaticity.py b/chroma/color/chromaticity.py
index 0076957..0076957 100644
--- a/color/chromaticity.py
+++ b/chroma/color/chromaticity.py
diff --git a/color/ciexyz64_1.csv b/chroma/color/ciexyz64_1.csv
index f437db7..f437db7 100644
--- a/color/ciexyz64_1.csv
+++ b/chroma/color/ciexyz64_1.csv
diff --git a/color/colormap.py b/chroma/color/colormap.py
index 6f3e056..6f3e056 100644
--- a/color/colormap.py
+++ b/chroma/color/colormap.py
diff --git a/color/sbrgb10w.csv b/chroma/color/sbrgb10w.csv
index b654ad3..b654ad3 100644
--- a/color/sbrgb10w.csv
+++ b/chroma/color/sbrgb10w.csv
diff --git a/color/scvle_1.csv b/chroma/color/scvle_1.csv
index 0ab7def..0ab7def 100644
--- a/color/scvle_1.csv
+++ b/chroma/color/scvle_1.csv
diff --git a/color/vl1924e_1.csv b/chroma/color/vl1924e_1.csv
index 8b240c0..8b240c0 100644
--- a/color/vl1924e_1.csv
+++ b/chroma/color/vl1924e_1.csv
diff --git a/chroma/cuda/__init__.py b/chroma/cuda/__init__.py
new file mode 100644
index 0000000..dd87cbc
--- /dev/null
+++ b/chroma/cuda/__init__.py
@@ -0,0 +1,2 @@
+import os.path
+srcdir = os.path.dirname(os.path.abspath(__file__))
diff --git a/src/daq.cu b/chroma/cuda/daq.cu
index 5a68846..5a68846 100644
--- a/src/daq.cu
+++ b/chroma/cuda/daq.cu
diff --git a/src/geometry.h b/chroma/cuda/geometry.h
index 2b5eacb..2b5eacb 100644
--- a/src/geometry.h
+++ b/chroma/cuda/geometry.h
diff --git a/src/hybrid_render.cu b/chroma/cuda/hybrid_render.cu
index 29edefa..29edefa 100644
--- a/src/hybrid_render.cu
+++ b/chroma/cuda/hybrid_render.cu
diff --git a/src/intersect.h b/chroma/cuda/intersect.h
index 978bde8..978bde8 100644
--- a/src/intersect.h
+++ b/chroma/cuda/intersect.h
diff --git a/src/linalg.h b/chroma/cuda/linalg.h
index 35b2423..35b2423 100644
--- a/src/linalg.h
+++ b/chroma/cuda/linalg.h
diff --git a/src/matrix.h b/chroma/cuda/matrix.h
index 0a66e58..0a66e58 100644
--- a/src/matrix.h
+++ b/chroma/cuda/matrix.h
diff --git a/src/mesh.h b/chroma/cuda/mesh.h
index d60d801..d60d801 100644
--- a/src/mesh.h
+++ b/chroma/cuda/mesh.h
diff --git a/src/photon.h b/chroma/cuda/photon.h
index 8b7e588..8b7e588 100644
--- a/src/photon.h
+++ b/chroma/cuda/photon.h
diff --git a/src/physical_constants.h b/chroma/cuda/physical_constants.h
index 2ff87cd..2ff87cd 100644
--- a/src/physical_constants.h
+++ b/chroma/cuda/physical_constants.h
diff --git a/src/propagate.cu b/chroma/cuda/propagate.cu
index fdcf532..fdcf532 100644
--- a/src/propagate.cu
+++ b/chroma/cuda/propagate.cu
diff --git a/src/random.h b/chroma/cuda/random.h
index e9d2e75..e9d2e75 100644
--- a/src/random.h
+++ b/chroma/cuda/random.h
diff --git a/src/render.cu b/chroma/cuda/render.cu
index d4ed443..d4ed443 100644
--- a/src/render.cu
+++ b/chroma/cuda/render.cu
diff --git a/src/rotate.h b/chroma/cuda/rotate.h
index 15f8037..15f8037 100644
--- a/src/rotate.h
+++ b/chroma/cuda/rotate.h
diff --git a/src/sorting.h b/chroma/cuda/sorting.h
index 2bf1a5a..2bf1a5a 100644
--- a/src/sorting.h
+++ b/chroma/cuda/sorting.h
diff --git a/src/tools.cu b/chroma/cuda/tools.cu
index 80d06ec..80d06ec 100644
--- a/src/tools.cu
+++ b/chroma/cuda/tools.cu
diff --git a/src/transform.cu b/chroma/cuda/transform.cu
index 1f4405e..1f4405e 100644
--- a/src/transform.cu
+++ b/chroma/cuda/transform.cu
diff --git a/detectors/__init__.py b/chroma/detectors/__init__.py
index 2028300..2028300 100644
--- a/detectors/__init__.py
+++ b/chroma/detectors/__init__.py
diff --git a/detectors/lbne.py b/chroma/detectors/lbne.py
index 97d09e9..97d09e9 100644
--- a/detectors/lbne.py
+++ b/chroma/detectors/lbne.py
diff --git a/detectors/miniclean.py b/chroma/detectors/miniclean.py
index 92dd019..92dd019 100644
--- a/detectors/miniclean.py
+++ b/chroma/detectors/miniclean.py
diff --git a/detectors/miniclean_cassettes.txt b/chroma/detectors/miniclean_cassettes.txt
index a5e59f2..a5e59f2 100644
--- a/detectors/miniclean_cassettes.txt
+++ b/chroma/detectors/miniclean_cassettes.txt
diff --git a/detectors/miniclean_polygons.txt b/chroma/detectors/miniclean_polygons.txt
index 594ed7d..594ed7d 100644
--- a/detectors/miniclean_polygons.txt
+++ b/chroma/detectors/miniclean_polygons.txt
diff --git a/detectors/sno.py b/chroma/detectors/sno.py
index d5c9ce9..d5c9ce9 100644
--- a/detectors/sno.py
+++ b/chroma/detectors/sno.py
diff --git a/detectors/sno_av_ascii.stl.bz2 b/chroma/detectors/sno_av_ascii.stl.bz2
index 1298a60..1298a60 100644
--- a/detectors/sno_av_ascii.stl.bz2
+++ b/chroma/detectors/sno_av_ascii.stl.bz2
Binary files differ
diff --git a/detectors/sno_pmt_info.hdf5 b/chroma/detectors/sno_pmt_info.hdf5
index 1358189..1358189 100644
--- a/detectors/sno_pmt_info.hdf5
+++ b/chroma/detectors/sno_pmt_info.hdf5
Binary files differ
diff --git a/event.py b/chroma/event.py
index a2b99cc..a2b99cc 100644
--- a/event.py
+++ b/chroma/event.py
diff --git a/generator/__init__.py b/chroma/generator/__init__.py
index 5dd5eca..5dd5eca 100644
--- a/generator/__init__.py
+++ b/chroma/generator/__init__.py
diff --git a/generator/g4gen.py b/chroma/generator/g4gen.py
index 91ddc18..0ed5433 100644
--- a/generator/g4gen.py
+++ b/chroma/generator/g4gen.py
@@ -1,22 +1,19 @@
+from chroma.generator.mute import *
+
+import pyublas
+import numpy as np
+from chroma.event import Photons, Vertex
+
+g4mute()
from Geant4 import *
+g4unmute()
import g4py.ezgeom
import g4py.NISTmaterials
import g4py.ParticleGun
-import pyublas
-import numpy as np
-from chroma.event import Photons, Vertex
+from chroma.generator import _g4chroma
-try:
- import G4chroma
-except:
- # Try building the module
- import subprocess
- import sys, os
- module_dir = os.path.split(os.path.realpath(__file__))[0]
- print >>sys.stderr, 'Compiling G4chroma.so...'
- retcode = subprocess.call('g++ -o \'%s/G4chroma.so\' -shared \'%s/G4chroma.cc\' -fPIC `geant4-config --cflags --libs` `python-config --cflags --libs --ldflags` -lboost_python' % (module_dir, module_dir), shell=True)
- assert retcode == 0
- import G4chroma
+#cppmute()
+#cppunmute()
class G4Generator(object):
def __init__(self, material, seed=None):
@@ -37,7 +34,7 @@ class G4Generator(object):
g4py.NISTmaterials.Construct()
g4py.ezgeom.Construct()
- self.physics_list = G4chroma.ChromaPhysicsList()
+ self.physics_list = _g4chroma.ChromaPhysicsList()
gRunManager.SetUserInitialization(self.physics_list)
self.particle_gun = g4py.ParticleGun.Construct()
@@ -49,10 +46,11 @@ class G4Generator(object):
self.world.CreateBoxVolume(self.world_material, 100*m, 100*m, 100*m)
self.world.PlaceIt(G4ThreeVector(0,0,0))
- self.tracking_action = G4chroma.PhotonTrackingAction()
+ self.tracking_action = _g4chroma.PhotonTrackingAction()
gRunManager.SetUserAction(self.tracking_action)
+ g4mute()
gRunManager.Initialize()
-
+ g4unmute()
# preinitialize the process by running a simple event
self.generate_photons([Vertex('e-', (0,0,0), (1,0,0), 0, 1.0)])
diff --git a/generator/photon.py b/chroma/generator/photon.py
index 6c656b2..39e8cf4 100644
--- a/generator/photon.py
+++ b/chroma/generator/photon.py
@@ -1,7 +1,6 @@
import g4gen
import multiprocessing
import numpy as np
-import itertools
import threading
import zmq
import uuid
diff --git a/generator/vertex.py b/chroma/generator/vertex.py
index 0ec4b31..0ec4b31 100644
--- a/generator/vertex.py
+++ b/chroma/generator/vertex.py
diff --git a/geometry.py b/chroma/geometry.py
index f942222..f942222 100644
--- a/geometry.py
+++ b/chroma/geometry.py
diff --git a/chroma/gpu/__init__.py b/chroma/gpu/__init__.py
new file mode 100644
index 0000000..b5a1ff3
--- /dev/null
+++ b/chroma/gpu/__init__.py
@@ -0,0 +1,6 @@
+from chroma.gpu.tools import *
+from chroma.gpu.geometry import *
+from chroma.gpu.render import *
+from chroma.gpu.photon import *
+from chroma.gpu.daq import *
+from chroma.gpu.pdf import *
diff --git a/chroma/gpu/daq.py b/chroma/gpu/daq.py
new file mode 100644
index 0000000..82958dc
--- /dev/null
+++ b/chroma/gpu/daq.py
@@ -0,0 +1,49 @@
+import numpy as np
+from pycuda import gpuarray as ga
+
+from chroma.gpu.tools import get_cu_module, cuda_options, GPUFuncs, \
+ chunk_iterator
+from chroma import event
+
+class GPUChannels(object):
+ def __init__(self, t, q, flags):
+ self.t = t
+ self.q = q
+ self.flags = flags
+
+ def get(self):
+ t = self.t.get()
+ q = self.q.get().astype(np.float32)
+
+ # For now, assume all channels with small
+ # enough hit time were hit.
+ return event.Channels(t<1e8, t, q, self.flags.get())
+
+class GPUDaq(object):
+ def __init__(self, gpu_geometry, max_pmt_id, pmt_rms=1.2e-9):
+ self.earliest_time_gpu = ga.empty(max_pmt_id+1, dtype=np.float32)
+ self.earliest_time_int_gpu = ga.empty(max_pmt_id+1, dtype=np.uint32)
+ self.channel_history_gpu = ga.zeros_like(self.earliest_time_int_gpu)
+ self.channel_q_gpu = ga.zeros_like(self.earliest_time_int_gpu)
+ self.daq_pmt_rms = pmt_rms
+ self.solid_id_map_gpu = gpu_geometry.solid_id_map
+
+ self.module = get_cu_module('daq.cu', options=cuda_options,
+ include_source_directory=False)
+ self.gpu_funcs = GPUFuncs(self.module)
+
+ def acquire(self, gpuphotons, rng_states, nthreads_per_block=64, max_blocks=1024):
+ self.gpu_funcs.reset_earliest_time_int(np.float32(1e9), np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, block=(nthreads_per_block,1,1), grid=(len(self.earliest_time_int_gpu)//nthreads_per_block+1,1))
+ self.channel_q_gpu.fill(0)
+ self.channel_history_gpu.fill(0)
+
+ n = len(gpuphotons.pos)
+
+ for first_photon, photons_this_round, blocks in \
+ chunk_iterator(n, nthreads_per_block, max_blocks):
+ self.gpu_funcs.run_daq(rng_states, np.uint32(0x1 << 2), np.float32(self.daq_pmt_rms), np.int32(first_photon), np.int32(photons_this_round), gpuphotons.t, gpuphotons.flags, gpuphotons.last_hit_triangles, self.solid_id_map_gpu, np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, self.channel_q_gpu, self.channel_history_gpu, block=(nthreads_per_block,1,1), grid=(blocks,1))
+
+ self.gpu_funcs.convert_sortable_int_to_float(np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, self.earliest_time_gpu, block=(nthreads_per_block,1,1), grid=(len(self.earliest_time_int_gpu)//nthreads_per_block+1,1))
+
+ return GPUChannels(self.earliest_time_gpu, self.channel_q_gpu, self.channel_history_gpu)
+
diff --git a/chroma/gpu/geometry.py b/chroma/gpu/geometry.py
new file mode 100644
index 0000000..a223006
--- /dev/null
+++ b/chroma/gpu/geometry.py
@@ -0,0 +1,174 @@
+import numpy as np
+import pycuda.driver as cuda
+from pycuda import gpuarray as ga
+from pycuda import characterize
+
+from chroma.geometry import standard_wavelengths
+from chroma.gpu.tools import get_cu_module, get_cu_source, cuda_options, \
+ chunk_iterator, format_array, format_size, to_uint3, to_float3, \
+ make_gpu_struct
+
+class GPUGeometry(object):
+ def __init__(self, geometry, wavelengths=None, print_usage=False):
+ if wavelengths is None:
+ wavelengths = standard_wavelengths
+
+ try:
+ wavelength_step = np.unique(np.diff(wavelengths)).item()
+ except ValueError:
+ raise ValueError('wavelengths must be equally spaced apart.')
+
+ geometry_source = get_cu_source('geometry.h')
+ material_struct_size = characterize.sizeof('Material', geometry_source)
+ surface_struct_size = characterize.sizeof('Surface', geometry_source)
+ geometry_struct_size = characterize.sizeof('Geometry', geometry_source)
+
+ self.material_data = []
+ self.material_ptrs = []
+
+ def interp_material_property(wavelengths, property):
+ # note that it is essential that the material properties be
+ # interpolated linearly. this fact is used in the propagation
+ # code to guarantee that probabilities still sum to one.
+ return np.interp(wavelengths, property[:,0], property[:,1]).astype(np.float32)
+
+ for i in range(len(geometry.unique_materials)):
+ material = geometry.unique_materials[i]
+
+ if material is None:
+ raise Exception('one or more triangles is missing a material.')
+
+ refractive_index = interp_material_property(wavelengths, material.refractive_index)
+ refractive_index_gpu = ga.to_gpu(refractive_index)
+ absorption_length = interp_material_property(wavelengths, material.absorption_length)
+ absorption_length_gpu = ga.to_gpu(absorption_length)
+ scattering_length = interp_material_property(wavelengths, material.scattering_length)
+ scattering_length_gpu = ga.to_gpu(scattering_length)
+
+ self.material_data.append(refractive_index_gpu)
+ self.material_data.append(absorption_length_gpu)
+ self.material_data.append(scattering_length_gpu)
+
+ material_gpu = \
+ make_gpu_struct(material_struct_size,
+ [refractive_index_gpu, absorption_length_gpu,
+ scattering_length_gpu,
+ np.uint32(len(wavelengths)),
+ np.float32(wavelength_step),
+ np.float32(wavelengths[0])])
+
+ self.material_ptrs.append(material_gpu)
+
+ self.material_pointer_array = \
+ make_gpu_struct(8*len(self.material_ptrs), self.material_ptrs)
+
+ self.surface_data = []
+ self.surface_ptrs = []
+
+ for i in range(len(geometry.unique_surfaces)):
+ surface = geometry.unique_surfaces[i]
+
+ if surface is None:
+ # need something to copy to the surface array struct
+ # that is the same size as a 64-bit pointer.
+ # this pointer will never be used by the simulation.
+ self.surface_ptrs.append(np.uint64(0))
+ continue
+
+ detect = interp_material_property(wavelengths, surface.detect)
+ detect_gpu = ga.to_gpu(detect)
+ absorb = interp_material_property(wavelengths, surface.absorb)
+ absorb_gpu = ga.to_gpu(absorb)
+ reflect_diffuse = interp_material_property(wavelengths, surface.reflect_diffuse)
+ reflect_diffuse_gpu = ga.to_gpu(reflect_diffuse)
+ reflect_specular = interp_material_property(wavelengths, surface.reflect_specular)
+ reflect_specular_gpu = ga.to_gpu(reflect_specular)
+
+ self.surface_data.append(detect_gpu)
+ self.surface_data.append(absorb_gpu)
+ self.surface_data.append(reflect_diffuse_gpu)
+ self.surface_data.append(reflect_specular_gpu)
+
+ surface_gpu = \
+ make_gpu_struct(surface_struct_size,
+ [detect_gpu, absorb_gpu,
+ reflect_diffuse_gpu,
+ reflect_specular_gpu,
+ np.uint32(len(wavelengths)),
+ np.float32(wavelength_step),
+ np.float32(wavelengths[0])])
+
+ self.surface_ptrs.append(surface_gpu)
+
+ self.surface_pointer_array = \
+ make_gpu_struct(8*len(self.surface_ptrs), self.surface_ptrs)
+
+ self.vertices = ga.to_gpu(to_float3(geometry.mesh.vertices))
+ self.triangles = ga.to_gpu(to_uint3(geometry.mesh.triangles))
+
+ material_codes = (((geometry.material1_index & 0xff) << 24) |
+ ((geometry.material2_index & 0xff) << 16) |
+ ((geometry.surface_index & 0xff) << 8)).astype(np.uint32)
+
+ self.material_codes = ga.to_gpu(material_codes)
+
+ self.lower_bounds = ga.to_gpu(to_float3(geometry.lower_bounds))
+ self.upper_bounds = ga.to_gpu(to_float3(geometry.upper_bounds))
+ self.colors = ga.to_gpu(geometry.colors.astype(np.uint32))
+ self.node_map = ga.to_gpu(geometry.node_map.astype(np.uint32))
+ self.node_map_end = ga.to_gpu(geometry.node_map_end.astype(np.uint32))
+ self.solid_id_map = ga.to_gpu(geometry.solid_id.astype(np.uint32))
+
+ self.gpudata = make_gpu_struct(geometry_struct_size,
+ [self.vertices, self.triangles,
+ self.material_codes,
+ self.colors, self.lower_bounds,
+ self.upper_bounds, self.node_map,
+ self.node_map_end,
+ self.material_pointer_array,
+ self.surface_pointer_array,
+ np.uint32(geometry.start_node),
+ np.uint32(geometry.first_node)])
+
+ self.geometry = geometry
+
+ if print_usage:
+ self.print_device_usage()
+
+ def print_device_usage(self):
+ print 'device usage:'
+ print '-'*10
+ print format_array('vertices', self.vertices)
+ print format_array('triangles', self.triangles)
+ print format_array('lower_bounds', self.lower_bounds)
+ print format_array('upper_bounds', self.upper_bounds)
+ print format_array('node_map', self.node_map)
+ print format_array('node_map_end', self.node_map_end)
+ print '%-15s %6s %6s' % ('total', '', format_size(self.vertices.nbytes + self.triangles.nbytes + self.lower_bounds.nbytes + self.upper_bounds.nbytes + self.node_map.nbytes + self.node_map_end.nbytes))
+ print '-'*10
+ free, total = cuda.mem_get_info()
+ print '%-15s %6s %6s' % ('device total', '', format_size(total))
+ print '%-15s %6s %6s' % ('device used', '', format_size(total-free))
+ print '%-15s %6s %6s' % ('device free', '', format_size(free))
+ print
+
+ def reset_colors(self):
+ self.colors.set_async(self.geometry.colors.astype(np.uint32))
+
+ def color_solids(self, solid_hit, colors, nblocks_per_thread=64,
+ max_blocks=1024):
+ solid_hit_gpu = ga.to_gpu(np.array(solid_hit, dtype=np.bool))
+ solid_colors_gpu = ga.to_gpu(np.array(colors, dtype=np.uint32))
+
+ module = get_cu_module('mesh.h', options=cuda_options)
+ color_solids = module.get_function('color_solids')
+
+ for first_triangle, triangles_this_round, blocks in \
+ chunk_iterator(self.triangles.size, nblocks_per_thread,
+ max_blocks):
+ color_solids(np.int32(first_triangle),
+ np.int32(triangles_this_round), self.solid_id_map,
+ solid_hit_gpu, solid_colors_gpu, self.gpudata,
+ block=(nblocks_per_thread,1,1),
+ grid=(blocks,1))
+
diff --git a/chroma/gpu/pdf.py b/chroma/gpu/pdf.py
new file mode 100644
index 0000000..053d5f3
--- /dev/null
+++ b/chroma/gpu/pdf.py
@@ -0,0 +1,177 @@
+import numpy as np
+from pycuda import gpuarray as ga
+
+from chroma.gpu.tools import get_cu_module, cuda_options, GPUFuncs
+
+class GPUPDF(object):
+ def __init__(self):
+ self.module = get_cu_module('daq.cu', options=cuda_options,
+ include_source_directory=False)
+ self.gpu_funcs = GPUFuncs(self.module)
+
+ def setup_pdf(self, max_pmt_id, tbins, trange, qbins, qrange):
+ """Setup GPU arrays to hold PDF information.
+
+ max_pmt_id: int, largest PMT id #
+ tbins: number of time bins
+ trange: tuple of (min, max) time in PDF
+ qbins: number of charge bins
+ qrange: tuple of (min, max) charge in PDF
+ """
+ self.events_in_histogram = 0
+ self.hitcount_gpu = ga.zeros(max_pmt_id+1, dtype=np.uint32)
+ self.pdf_gpu = ga.zeros(shape=(max_pmt_id+1, tbins, qbins),
+ dtype=np.uint32)
+ self.tbins = tbins
+ self.trange = trange
+ self.qbins = qbins
+ self.qrange = qrange
+
+ def clear_pdf(self):
+ """Rezero the PDF counters."""
+ self.hitcount_gpu.fill(0)
+ self.pdf_gpu.fill(0)
+
+ def add_hits_to_pdf(self, gpuchannels, nthreads_per_block=64):
+ self.gpu_funcs.bin_hits(np.int32(len(self.hitcount_gpu)),
+ gpuchannels.q,
+ gpuchannels.t,
+ self.hitcount_gpu,
+ np.int32(self.tbins),
+ np.float32(self.trange[0]),
+ np.float32(self.trange[1]),
+ np.int32(self.qbins),
+ np.float32(self.qrange[0]),
+ np.float32(self.qrange[1]),
+ self.pdf_gpu,
+ block=(nthreads_per_block,1,1),
+ grid=(len(gpuchannels.t)//nthreads_per_block+1,1))
+
+
+ self.events_in_histogram += 1
+
+ def get_pdfs(self):
+ """Returns the 1D hitcount array and the 3D [channel, time, charge]
+ histogram."""
+ return self.hitcount_gpu.get(), self.pdf_gpu.get()
+
+ def setup_pdf_eval(self, event_hit, event_time, event_charge, min_twidth,
+ trange, min_qwidth, qrange, min_bin_content=10,
+ time_only=True):
+ """Setup GPU arrays to compute PDF values for the given event.
+ The pdf_eval calculation allows the PDF to be evaluated at a
+ single point for each channel as the Monte Carlo is run. The
+ effective bin size will be as small as (`min_twidth`,
+ `min_qwidth`) around the point of interest, but will be large
+ enough to ensure that `min_bin_content` Monte Carlo events
+ fall into the bin.
+
+ event_hit: ndarray
+ Hit or not-hit status for each channel in the detector.
+ event_time: ndarray
+ Hit time for each channel in the detector. If channel
+ not hit, the time will be ignored.
+ event_charge: ndarray
+ Integrated charge for each channel in the detector.
+ If channel not hit, the charge will be ignored.
+
+ min_twidth: float
+ Minimum bin size in the time dimension
+ trange: (float, float)
+ Range of time dimension in PDF
+ min_qwidth: float
+ Minimum bin size in charge dimension
+ qrange: (float, float)
+ Range of charge dimension in PDF
+ min_bin_content: int
+ The bin will be expanded to include at least this many events
+ time_only: bool
+ If True, only the time observable will be used in the PDF.
+ """
+ self.event_hit_gpu = ga.to_gpu(event_hit.astype(np.uint32))
+ self.event_time_gpu = ga.to_gpu(event_time.astype(np.float32))
+ self.event_charge_gpu = ga.to_gpu(event_charge.astype(np.float32))
+
+ self.eval_hitcount_gpu = ga.zeros(len(event_hit), dtype=np.uint32)
+ self.eval_bincount_gpu = ga.zeros(len(event_hit), dtype=np.uint32)
+ self.nearest_mc_gpu = ga.empty(shape=len(event_hit) * min_bin_content,
+ dtype=np.float32)
+ self.nearest_mc_gpu.fill(1e9)
+
+ self.min_twidth = min_twidth
+ self.trange = trange
+ self.min_qwidth = min_qwidth
+ self.qrange = qrange
+ self.min_bin_content = min_bin_content
+ self.time_only = time_only
+
+ def clear_pdf_eval(self):
+ "Reset PDF evaluation counters to start accumulating new Monte Carlo."
+ self.eval_hitcount_gpu.fill(0)
+ self.eval_bincount_gpu.fill(0)
+ self.nearest_mc_gpu.fill(1e9)
+
+ def accumulate_pdf_eval(self, gpuchannels, nthreads_per_block=64):
+ "Add the most recent results of run_daq() to the PDF evaluation."
+ self.gpu_funcs.accumulate_pdf_eval(np.int32(self.time_only),
+ np.int32(len(self.event_hit_gpu)),
+ self.event_hit_gpu,
+ self.event_time_gpu,
+ self.event_charge_gpu,
+ gpuchannels.t,
+ gpuchannels.q,
+ self.eval_hitcount_gpu,
+ self.eval_bincount_gpu,
+ np.float32(self.min_twidth),
+ np.float32(self.trange[0]),
+ np.float32(self.trange[1]),
+ np.float32(self.min_qwidth),
+ np.float32(self.qrange[0]),
+ np.float32(self.qrange[1]),
+ self.nearest_mc_gpu,
+ np.int32(self.min_bin_content),
+ block=(nthreads_per_block,1,1),
+ grid=(len(gpuchannels.t)//nthreads_per_block+1,1))
+
+ def get_pdf_eval(self):
+ evhit = self.event_hit_gpu.get().astype(bool)
+ hitcount = self.eval_hitcount_gpu.get()
+ bincount = self.eval_bincount_gpu.get()
+
+ pdf_value = np.zeros(len(hitcount), dtype=float)
+ pdf_frac_uncert = np.zeros_like(pdf_value)
+
+ # PDF value for high stats bins
+ high_stats = bincount >= self.min_bin_content
+ if high_stats.any():
+ if self.time_only:
+ pdf_value[high_stats] = bincount[high_stats].astype(float) / hitcount[high_stats] / self.min_twidth
+ else:
+ assert Exception('Unimplemented 2D (time,charge) mode!')
+
+ pdf_frac_uncert[high_stats] = 1.0/np.sqrt(bincount[high_stats])
+
+ # PDF value for low stats bins
+ low_stats = ~high_stats & (hitcount > 0) & evhit
+
+ nearest_mc = self.nearest_mc_gpu.get().reshape((len(hitcount), self.min_bin_content))
+
+ # Deal with the case where we did not even get min_bin_content events
+ # in the PDF but also clamp the lower range to ensure we don't index
+ # by a negative number in 2 lines
+ last_valid_entry = np.maximum(0, (nearest_mc < 1e9).astype(int).sum(axis=1) - 1)
+ distance = nearest_mc[np.arange(len(last_valid_entry)),last_valid_entry]
+
+ if low_stats.any():
+ if self.time_only:
+ pdf_value[low_stats] = (last_valid_entry[low_stats] + 1).astype(float) / hitcount[low_stats] / distance[low_stats] / 2.0
+ else:
+ assert Exception('Unimplemented 2D (time,charge) mode!')
+
+ pdf_frac_uncert[low_stats] = 1.0/np.sqrt(last_valid_entry[low_stats] + 1)
+
+ # PDFs with no stats got zero by default during array creation
+
+ print 'high_stats:', high_stats.sum(), 'low_stats', low_stats.sum()
+
+ return hitcount, pdf_value, pdf_value * pdf_frac_uncert
diff --git a/chroma/gpu/photon.py b/chroma/gpu/photon.py
new file mode 100644
index 0000000..2a9a1ff
--- /dev/null
+++ b/chroma/gpu/photon.py
@@ -0,0 +1,175 @@
+import numpy as np
+import sys
+import gc
+from pycuda import gpuarray as ga
+
+from chroma.tools import profile_if_possible
+from chroma import event
+from chroma.gpu.tools import get_cu_module, cuda_options, GPUFuncs, \
+ chunk_iterator, to_float3
+
+
+class GPUPhotons(object):
+ def __init__(self, photons, ncopies=1):
+ """Load ``photons`` onto the GPU, replicating as requested.
+
+ Args:
+ - photons: chroma.Event.Photons
+ Photon state information to load onto GPU
+ - ncopies: int, *optional*
+ Number of times to replicate the photons
+ on the GPU. This is used if you want
+ to propagate the same event many times,
+ for example in a likelihood calculation.
+
+ The amount of GPU storage will be proportionally
+ larger if ncopies > 1, so be careful.
+ """
+ nphotons = len(photons)
+ self.pos = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
+ self.dir = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
+ self.pol = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
+ self.wavelengths = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
+ self.t = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
+ self.last_hit_triangles = ga.empty(shape=nphotons*ncopies, dtype=np.int32)
+ self.flags = ga.empty(shape=nphotons*ncopies, dtype=np.uint32)
+
+ # Assign the provided photons to the beginning (possibly
+ # the entire array if ncopies is 1
+ self.pos[:nphotons].set(to_float3(photons.pos))
+ self.dir[:nphotons].set(to_float3(photons.dir))
+ self.pol[:nphotons].set(to_float3(photons.pol))
+ self.wavelengths[:nphotons].set(photons.wavelengths.astype(np.float32))
+ self.t[:nphotons].set(photons.t.astype(np.float32))
+ self.last_hit_triangles[:nphotons].set(photons.last_hit_triangles.astype(np.int32))
+ self.flags[:nphotons].set(photons.flags.astype(np.uint32))
+
+ module = get_cu_module('propagate.cu', options=cuda_options)
+ self.gpu_funcs = GPUFuncs(module)
+
+ # Replicate the photons to the rest of the slots if needed
+ if ncopies > 1:
+ max_blocks = 1024
+ nthreads_per_block = 64
+ for first_photon, photons_this_round, blocks in \
+ chunk_iterator(nphotons, nthreads_per_block, max_blocks):
+ self.gpu_funcs.photon_duplicate(np.int32(first_photon), np.int32(photons_this_round),
+ self.pos, self.dir, self.wavelengths, self.pol, self.t,
+ self.flags, self.last_hit_triangles,
+ np.int32(ncopies-1),
+ np.int32(nphotons),
+ block=(nthreads_per_block,1,1), grid=(blocks, 1))
+
+
+ # Save the duplication information for the iterate_copies() method
+ self.true_nphotons = nphotons
+ self.ncopies = ncopies
+
+ def get(self):
+ pos = self.pos.get().view(np.float32).reshape((len(self.pos),3))
+ dir = self.dir.get().view(np.float32).reshape((len(self.dir),3))
+ pol = self.pol.get().view(np.float32).reshape((len(self.pol),3))
+ wavelengths = self.wavelengths.get()
+ t = self.t.get()
+ last_hit_triangles = self.last_hit_triangles.get()
+ flags = self.flags.get()
+ return event.Photons(pos, dir, pol, wavelengths, t, last_hit_triangles, flags)
+
+ def iterate_copies(self):
+ '''Returns an iterator that yields GPUPhotonsSlice objects
+ corresponding to the event copies stored in ``self``.'''
+ for i in xrange(self.ncopies):
+ window = slice(self.true_nphotons*i, self.true_nphotons*(i+1))
+ yield GPUPhotonsSlice(pos=self.pos[window],
+ dir=self.dir[window],
+ pol=self.pol[window],
+ wavelengths=self.wavelengths[window],
+ t=self.t[window],
+ last_hit_triangles=self.last_hit_triangles[window],
+ flags=self.flags[window])
+
+ @profile_if_possible
+ def propagate(self, gpu_geometry, rng_states, nthreads_per_block=64,
+ max_blocks=1024, max_steps=10):
+ """Propagate photons on GPU to termination or max_steps, whichever
+ comes first.
+
+ May be called repeatedly without reloading photon information if
+ single-stepping through photon history.
+
+ ..warning::
+ `rng_states` must have at least `nthreads_per_block`*`max_blocks`
+ number of curandStates.
+ """
+ nphotons = self.pos.size
+ step = 0
+ input_queue = np.empty(shape=nphotons+1, dtype=np.uint32)
+ input_queue[0] = 0
+ # Order photons initially in the queue to put the clones next to each other
+ for copy in xrange(self.ncopies):
+ input_queue[1+copy::self.ncopies] = np.arange(self.true_nphotons, dtype=np.uint32) + copy * self.true_nphotons
+ input_queue_gpu = ga.to_gpu(input_queue)
+ output_queue = np.zeros(shape=nphotons+1, dtype=np.uint32)
+ output_queue[0] = 1
+ output_queue_gpu = ga.to_gpu(output_queue)
+
+ while step < max_steps:
+ # Just finish the rest of the steps if the # of photons is low
+ if nphotons < nthreads_per_block * 16 * 8:
+ nsteps = max_steps - step
+ else:
+ nsteps = 1
+
+ for first_photon, photons_this_round, blocks in \
+ chunk_iterator(nphotons, nthreads_per_block, max_blocks):
+ self.gpu_funcs.propagate(np.int32(first_photon), np.int32(photons_this_round), input_queue_gpu[1:], output_queue_gpu, rng_states, self.pos, self.dir, self.wavelengths, self.pol, self.t, self.flags, self.last_hit_triangles, np.int32(nsteps), gpu_geometry.gpudata, block=(nthreads_per_block,1,1), grid=(blocks, 1))
+
+ step += nsteps
+
+ if step < max_steps:
+ temp = input_queue_gpu
+ input_queue_gpu = output_queue_gpu
+ output_queue_gpu = temp
+ output_queue_gpu[:1].set(np.uint32(1))
+ nphotons = input_queue_gpu[:1].get()[0] - 1
+
+ if ga.max(self.flags).get() & (1 << 31):
+ print >>sys.stderr, "WARNING: ABORTED PHOTONS"
+
+ def __del__(self):
+ del self.pos
+ del self.dir
+ del self.pol
+ del self.wavelengths
+ del self.t
+ del self.flags
+ del self.last_hit_triangles
+ # Free up GPU memory quickly if now available
+ gc.collect()
+
+class GPUPhotonsSlice(GPUPhotons):
+ '''A `slice`-like view of a subrange of another GPU photons array.
+ Works exactly like an instance of GPUPhotons, but the GPU storage
+ is taken from another GPUPhotons instance.
+
+ Returned by the GPUPhotons.iterate_copies() iterator.'''
+ def __init__(self, pos, dir, pol, wavelengths, t, last_hit_triangles,
+ flags):
+ '''Create new object using slices of GPUArrays from an instance
+ of GPUPhotons. NOTE THESE ARE NOT CPU ARRAYS!'''
+ self.pos = pos
+ self.dir = dir
+ self.pol = pol
+ self.wavelengths = wavelengths
+ self.t = t
+ self.last_hit_triangles = last_hit_triangles
+ self.flags = flags
+
+ module = get_cu_module('propagate.cu', options=cuda_options)
+ self.gpu_funcs = GPUFuncs(module)
+
+ self.true_nphotons = len(pos)
+ self.ncopies = 1
+
+ def __del__(self):
+ pass # Do nothing, because we don't own any of our GPU memory
diff --git a/chroma/gpu/render.py b/chroma/gpu/render.py
new file mode 100644
index 0000000..e645da4
--- /dev/null
+++ b/chroma/gpu/render.py
@@ -0,0 +1,66 @@
+import numpy as np
+from pycuda import gpuarray as ga
+
+from chroma.gpu.tools import get_cu_module, cuda_options, GPUFuncs, \
+ to_float3
+
+class GPURays(object):
+ """The GPURays class holds arrays of ray positions and directions
+ on the GPU that are used to render a geometry."""
+ def __init__(self, pos, dir, max_alpha_depth=10, nblocks=64):
+ self.pos = ga.to_gpu(to_float3(pos))
+ self.dir = ga.to_gpu(to_float3(dir))
+
+ self.max_alpha_depth = max_alpha_depth
+
+ self.nblocks = nblocks
+
+ transform_module = get_cu_module('transform.cu', options=cuda_options)
+ self.transform_funcs = GPUFuncs(transform_module)
+
+ render_module = get_cu_module('render.cu', options=cuda_options)
+ self.render_funcs = GPUFuncs(render_module)
+
+ self.dx = ga.empty(max_alpha_depth*self.pos.size, dtype=np.float32)
+ self.color = ga.empty(self.dx.size, dtype=ga.vec.float4)
+ self.dxlen = ga.zeros(self.pos.size, dtype=np.uint32)
+
+ def rotate(self, phi, n):
+ "Rotate by an angle phi around the axis `n`."
+ self.transform_funcs.rotate(np.int32(self.pos.size), self.pos, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
+ self.transform_funcs.rotate(np.int32(self.dir.size), self.dir, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.dir.size//self.nblocks+1,1))
+
+ def rotate_around_point(self, phi, n, point):
+ """"Rotate by an angle phi around the axis `n` passing through
+ the point `point`."""
+ self.transform_funcs.rotate_around_point(np.int32(self.pos.size), self.pos, np.float32(phi), ga.vec.make_float3(*n), ga.vec.make_float3(*point), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
+ self.transform_funcs.rotate(np.int32(self.dir.size), self.dir, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.dir.size//self.nblocks+1,1))
+
+ def translate(self, v):
+ "Translate the ray positions by the vector `v`."
+ self.transform_funcs.translate(np.int32(self.pos.size), self.pos, ga.vec.make_float3(*v), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
+
+ def render(self, gpu_geometry, pixels, alpha_depth=10,
+ keep_last_render=False):
+ """Render `gpu_geometry` and fill the GPU array `pixels` with pixel
+ colors."""
+ if not keep_last_render:
+ self.dxlen.fill(0)
+
+ if alpha_depth > self.max_alpha_depth:
+ raise Exception('alpha_depth > max_alpha_depth')
+
+ if not isinstance(pixels, ga.GPUArray):
+ raise TypeError('`pixels` must be a %s instance.' % ga.GPUArray)
+
+ if pixels.size != self.pos.size:
+ raise ValueError('`pixels`.size != number of rays')
+
+ self.render_funcs.render(np.int32(self.pos.size), self.pos, self.dir, gpu_geometry.gpudata, np.uint32(alpha_depth), pixels, self.dx, self.dxlen, self.color, block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
+
+ def snapshot(self, gpu_geometry, alpha_depth=10):
+ "Render `gpu_geometry` and return a numpy array of pixel colors."
+ pixels = ga.empty(self.pos.size, dtype=np.uint32)
+ self.render(gpu_geometry, pixels, alpha_depth)
+ return pixels.get()
+
diff --git a/chroma/gpu/tools.py b/chroma/gpu/tools.py
new file mode 100644
index 0000000..a11a561
--- /dev/null
+++ b/chroma/gpu/tools.py
@@ -0,0 +1,180 @@
+import numpy as np
+import pycuda.tools
+from pycuda import characterize
+import pycuda.driver as cuda
+from pycuda import gpuarray as ga
+
+from chroma.cuda import srcdir
+
+cuda.init()
+
+# standard nvcc options
+cuda_options = ('--use_fast_math',)#, '--ptxas-options=-v']
+
+@pycuda.tools.context_dependent_memoize
+def get_cu_module(name, options=None, include_source_directory=True):
+ """Returns a pycuda.compiler.SourceModule object from a CUDA source file
+ located in the chroma cuda directory at cuda/[name]."""
+ if options is None:
+ options = []
+ elif isinstance(options, tuple):
+ options = list(options)
+ else:
+ raise TypeError('`options` must be a tuple.')
+
+ if include_source_directory:
+ options += ['-I' + srcdir]
+
+ with open('%s/%s' % (srcdir, name)) as f:
+ source = f.read()
+
+ return pycuda.compiler.SourceModule(source, options=options,
+ no_extern_c=True)
+
+@pycuda.tools.memoize
+def get_cu_source(name):
+ """Get the source code for a CUDA source file located in the chroma cuda
+ directory at src/[name]."""
+ with open('%s/%s' % (srcdir, name)) as f:
+ source = f.read()
+ return source
+
+class GPUFuncs(object):
+ """Simple container class for GPU functions as attributes."""
+ def __init__(self, module):
+ self.module = module
+ self.funcs = {}
+
+ def __getattr__(self, name):
+ try:
+ return self.funcs[name]
+ except KeyError:
+ f = self.module.get_function(name)
+ self.funcs[name] = f
+ return f
+
+init_rng_src = """
+#include <curand_kernel.h>
+
+extern "C"
+{
+
+__global__ void init_rng(int nthreads, curandState *s, unsigned long long seed, unsigned long long offset)
+{
+ int id = blockIdx.x*blockDim.x + threadIdx.x;
+
+ if (id >= nthreads)
+ return;
+
+ curand_init(seed, id, offset, &s[id]);
+}
+
+} // extern "C"
+"""
+
+def get_rng_states(size, seed=1):
+ "Return `size` number of CUDA random number generator states."
+ rng_states = cuda.mem_alloc(size*characterize.sizeof('curandStateXORWOW', '#include <curand_kernel.h>'))
+
+ module = pycuda.compiler.SourceModule(init_rng_src, no_extern_c=True)
+ init_rng = module.get_function('init_rng')
+
+ init_rng(np.int32(size), rng_states, np.uint64(seed), np.uint64(0), block=(64,1,1), grid=(size//64+1,1))
+
+ return rng_states
+
+def to_float3(arr):
+ "Returns an pycuda.gpuarray.vec.float3 array from an (N,3) array."
+ if not arr.flags['C_CONTIGUOUS']:
+ arr = np.asarray(arr, order='c')
+ return arr.astype(np.float32).view(ga.vec.float3)[:,0]
+
+def to_uint3(arr):
+ "Returns a pycuda.gpuarray.vec.uint3 array from an (N,3) array."
+ if not arr.flags['C_CONTIGUOUS']:
+ arr = np.asarray(arr, order='c')
+ return arr.astype(np.uint32).view(ga.vec.uint3)[:,0]
+
+def chunk_iterator(nelements, nthreads_per_block=64, max_blocks=1024):
+ """Iterator that yields tuples with the values requried to process
+ a long array in multiple kernel passes on the GPU.
+
+ Each yielded value is of the form,
+ (first_index, elements_this_iteration, nblocks_this_iteration)
+
+ Example:
+ >>> list(chunk_iterator(300, 32, 2))
+ [(0, 64, 2), (64, 64, 2), (128, 64, 2), (192, 64, 2), (256, 9, 1)]
+ """
+ first = 0
+ while first < nelements:
+ elements_left = nelements - first
+ blocks = int(elements_left // nthreads_per_block)
+ if elements_left % nthreads_per_block != 0:
+ blocks += 1 # Round up only if needed
+ blocks = min(max_blocks, blocks)
+ elements_this_round = min(elements_left, blocks * nthreads_per_block)
+
+ yield (first, elements_this_round, blocks)
+ first += elements_this_round
+
+def create_cuda_context(device_id=None):
+ """Initialize and return a CUDA context on the specified device.
+ If device_id is None, the default device is used."""
+ if device_id is None:
+ try:
+ context = pycuda.tools.make_default_context()
+ except cuda.LogicError:
+ # initialize cuda
+ cuda.init()
+ context = pycuda.tools.make_default_context()
+ else:
+ try:
+ device = cuda.Device(device_id)
+ except cuda.LogicError:
+ # initialize cuda
+ cuda.init()
+ device = cuda.Device(device_id)
+ context = device.make_context()
+
+ context.set_cache_config(cuda.func_cache.PREFER_L1)
+
+ return context
+
+def make_gpu_struct(size, members):
+ struct = cuda.mem_alloc(size)
+
+ i = 0
+ for member in members:
+ if isinstance(member, ga.GPUArray):
+ member = member.gpudata
+
+ if isinstance(member, cuda.DeviceAllocation):
+ if i % 8:
+ raise Exception('cannot align 64-bit pointer. '
+ 'arrange struct member variables in order of '
+ 'decreasing size.')
+
+ cuda.memcpy_htod(int(struct)+i, np.intp(int(member)))
+ i += 8
+ elif np.isscalar(member):
+ cuda.memcpy_htod(int(struct)+i, member)
+ i += member.nbytes
+ else:
+ raise TypeError('expected a GPU device pointer or scalar type.')
+
+ return struct
+
+def format_size(size):
+ if size < 1e3:
+ return '%.1f%s' % (size, ' ')
+ elif size < 1e6:
+ return '%.1f%s' % (size/1e3, 'K')
+ elif size < 1e9:
+ return '%.1f%s' % (size/1e6, 'M')
+ else:
+ return '%.1f%s' % (size/1e9, 'G')
+
+def format_array(name, array):
+ return '%-15s %6s %6s' % \
+ (name, format_size(len(array)), format_size(array.nbytes))
diff --git a/fileio/__init__.py b/chroma/io/__init__.py
index e69de29..e69de29 100644
--- a/fileio/__init__.py
+++ b/chroma/io/__init__.py
diff --git a/fileio/root.C b/chroma/io/root.C
index 2b39b1b..2b39b1b 100644
--- a/fileio/root.C
+++ b/chroma/io/root.C
diff --git a/fileio/root.py b/chroma/io/root.py
index af86deb..55f7858 100644
--- a/fileio/root.py
+++ b/chroma/io/root.py
@@ -1,11 +1,26 @@
import ROOT
-import os.path
+import os, os.path
+import shutil
import numpy as np
+import chroma.event as event
+
+# Create .chroma directory if it doesn't exist
+chroma_dir = os.path.expanduser('~/.chroma')
+if not os.path.isdir(chroma_dir):
+ if os.path.exists(chroma_dir):
+ raise Exception('$HOME/.chroma file exists where directory should be')
+ else:
+ os.mkdir(chroma_dir)
+# Check if latest ROOT file is present
+package_root_C = os.path.join(os.path.dirname(__file__), 'root.C')
+home_root_C = os.path.join(chroma_dir, 'root.C')
+if not os.path.exists(home_root_C) or \
+ os.stat(package_root_C).st_mtime > os.stat(home_root_C).st_mtime:
+ shutil.copy2(src=package_root_C, dst=home_root_C)
+# Import this C file for access to data structure
+ROOT.gROOT.ProcessLine('.L '+home_root_C+'+')
-ROOT.gROOT.ProcessLine('.L '+os.path.join(os.path.dirname(__file__), 'root.C+g'))
-import ROOT
-import chroma.event as event
def tvector3_to_ndarray(vec):
'''Convert a ROOT.TVector3 into a numpy np.float32 array'''
diff --git a/itertoolset.py b/chroma/itertoolset.py
index 4d293e8..4d293e8 100644
--- a/itertoolset.py
+++ b/chroma/itertoolset.py
diff --git a/likelihood.py b/chroma/likelihood.py
index 0e378b1..49b04a7 100644
--- a/likelihood.py
+++ b/chroma/likelihood.py
@@ -1,8 +1,6 @@
import numpy as np
-from histogram import HistogramDD
-from uncertainties import ufloat, umath, unumpy
-from math import sqrt
-from itertools import islice, izip, compress, repeat
+from uncertainties import ufloat, unumpy
+from itertools import islice, izip, repeat
from chroma.tools import profile_if_possible
class Likelihood(object):
@@ -95,7 +93,6 @@ class Likelihood(object):
if __name__ == '__main__':
from chroma import detectors
from chroma.sim import Simulation
- from chroma.optics import water
from chroma.generator import constant_particle_gun
from chroma import tools
import time
diff --git a/make.py b/chroma/make.py
index 0623b2b..0623b2b 100644
--- a/make.py
+++ b/chroma/make.py
diff --git a/models/Colbert_HighRes_Brow.STL b/chroma/models/Colbert_HighRes_Brow.STL
index 784c877..784c877 100644
--- a/models/Colbert_HighRes_Brow.STL
+++ b/chroma/models/Colbert_HighRes_Brow.STL
Binary files differ
diff --git a/models/Colbert_HighRes_Smile.STL b/chroma/models/Colbert_HighRes_Smile.STL
index 9cf1864..9cf1864 100644
--- a/models/Colbert_HighRes_Smile.STL
+++ b/chroma/models/Colbert_HighRes_Smile.STL
Binary files differ
diff --git a/models/MiniFig.STL b/chroma/models/MiniFig.STL
index 54c6a5b..54c6a5b 100644
--- a/models/MiniFig.STL
+++ b/chroma/models/MiniFig.STL
diff --git a/models/companioncube.stl b/chroma/models/companioncube.stl
index 09ea5a1..09ea5a1 100644
--- a/models/companioncube.stl
+++ b/chroma/models/companioncube.stl
Binary files differ
diff --git a/models/liberty.stl b/chroma/models/liberty.stl
index 89c7e87..89c7e87 100644
--- a/models/liberty.stl
+++ b/chroma/models/liberty.stl
Binary files differ
diff --git a/models/lionsolid.stl b/chroma/models/lionsolid.stl
index e133e8f..e133e8f 100644
--- a/models/lionsolid.stl
+++ b/chroma/models/lionsolid.stl
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Barrel_Insert.STL b/chroma/models/portal_gun/Portal_Gun_Barrel_Insert.STL
index 277edbc..277edbc 100644
--- a/models/portal_gun/Portal_Gun_Barrel_Insert.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Barrel_Insert.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Bottom_Cover.STL b/chroma/models/portal_gun/Portal_Gun_Bottom_Cover.STL
index 82b54dd..82b54dd 100644
--- a/models/portal_gun/Portal_Gun_Bottom_Cover.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Bottom_Cover.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Center_Tube.STL b/chroma/models/portal_gun/Portal_Gun_Center_Tube.STL
index d35837b..d35837b 100644
--- a/models/portal_gun/Portal_Gun_Center_Tube.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Center_Tube.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Claw.STL b/chroma/models/portal_gun/Portal_Gun_Claw.STL
index 310bdae..310bdae 100644
--- a/models/portal_gun/Portal_Gun_Claw.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Claw.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Left_Half.STL b/chroma/models/portal_gun/Portal_Gun_Left_Half.STL
index a96459a..a96459a 100644
--- a/models/portal_gun/Portal_Gun_Left_Half.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Left_Half.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Right_Half.STL b/chroma/models/portal_gun/Portal_Gun_Right_Half.STL
index 3a4c68f..3a4c68f 100644
--- a/models/portal_gun/Portal_Gun_Right_Half.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Right_Half.STL
Binary files differ
diff --git a/models/portal_gun/Portal_Gun_Top_Claw_Mount.STL b/chroma/models/portal_gun/Portal_Gun_Top_Claw_Mount.STL
index cfe61c7..cfe61c7 100644
--- a/models/portal_gun/Portal_Gun_Top_Claw_Mount.STL
+++ b/chroma/models/portal_gun/Portal_Gun_Top_Claw_Mount.STL
Binary files differ
diff --git a/models/tie_interceptor6.stl b/chroma/models/tie_interceptor6.stl
index 970e8da..970e8da 100644
--- a/models/tie_interceptor6.stl
+++ b/chroma/models/tie_interceptor6.stl
Binary files differ
diff --git a/optics.py b/chroma/optics.py
index 6784324..6784324 100644
--- a/optics.py
+++ b/chroma/optics.py
diff --git a/chroma/parabola.py b/chroma/parabola.py
new file mode 100644
index 0000000..f4160fb
--- /dev/null
+++ b/chroma/parabola.py
@@ -0,0 +1,88 @@
+import numpy as np
+import uncertainties
+from uncertainties import unumpy
+import ROOT
+
+def build_design_matrix(x, y):
+ y_invsigma = 1.0/unumpy.std_devs(y)
+ dims = x.shape[1]
+ n = 1 + dims + dims*(dims+1)/2
+
+ A = np.zeros(shape=(len(x),n))
+
+ A[:,0] = 1.0 * y_invsigma
+ for i in xrange(dims):
+ A[:,1+i] = x[:,i] * y_invsigma
+
+ col = 1 + dims
+ for j in xrange(dims):
+ for k in xrange(j,dims):
+ A[:,col] = x[:,j] * x[:,k] * y_invsigma
+ col += 1
+ return A
+
+def build_design_vector(y):
+ return unumpy.nominal_values(y)/unumpy.std_devs(y)
+
+def parabola_fit(points):
+ dims = points[0][0].shape[0]
+
+ x = np.array([p[0] for p in points])
+ f = np.array([p[1] for p in points])
+
+ A = build_design_matrix(x, f)
+ B = build_design_vector(f)[:,np.newaxis] # make column vector
+
+ # Compute best-fit parabola coefficients using a singular value
+ # decomposition.
+ U, w, V = np.linalg.svd(A, full_matrices=False)
+ V = V.T # Flip to convention used by Numerical Recipies
+ inv_w = 1.0/w
+ inv_w[np.abs(w) < 1e-6] = 0.0
+ # Numpy version of Eq 15.4.17 from Numerical Recipies (C edition)
+ coeffs = np.zeros(A.shape[1])
+ for i in xrange(len(coeffs)):
+ coeffs += (np.dot(U[:,i], B[:,0]) * inv_w[i]) * V[:,i]
+
+ # Chi2 and probability for best fit and quadratic coefficents
+ chi2_terms = np.dot(A, coeffs[:,np.newaxis]) - B
+ chi2 = (chi2_terms**2).sum()
+ ndf = len(points) - (1 + dims + dims * (dims + 1) / 2)
+ prob = ROOT.TMath.Prob(chi2, ndf)
+
+ # Covariance is from Eq 15.4.20
+ covariance = np.dot(V*inv_w**2, V.T)
+
+ # Pack the coefficients into ufloats
+ ufloat_coeffs = uncertainties.correlated_values(coeffs, covariance.tolist())
+
+ # Separate coefficients into a, b, and c
+ a = ufloat_coeffs[0]
+ b = ufloat_coeffs[1:dims+1]
+ c = np.zeros(shape=(dims,dims), dtype=object)
+ index = dims + 1
+ for i in xrange(dims):
+ for j in xrange(i, dims):
+ c[i,j] = ufloat_coeffs[index]
+ c[j,i] = ufloat_coeffs[index]
+ if j != i:
+ # We combined the redundant off-diagonal parts of c
+ # matrix, but now we have to divide by two to
+ # avoid double counting when c is used later
+ c[i,j] /= 2.0
+ c[j,i] /= 2.0
+ index += 1
+
+ return a, np.array(b), c, chi2, prob
+
+def parabola_eval(x, a, b, c):
+ if len(x.shape) == 1:
+ return a + np.dot(x, b) + np.dot(x, np.dot(c, x.T))
+ else:
+ y = np.array([a] * x.shape[0])
+
+ for i, xrow in enumerate(x):
+ y[i] += np.dot(xrow, b)
+ y[i] += np.dot(xrow, np.dot(c, xrow.T))
+
+ return y
diff --git a/pi0.py b/chroma/pi0.py
index 91e8097..91e8097 100644
--- a/pi0.py
+++ b/chroma/pi0.py
diff --git a/project.py b/chroma/project.py
index 7d9cabe..51fba1e 100644
--- a/project.py
+++ b/chroma/project.py
@@ -1,6 +1,5 @@
import numpy as np
-from itertools import repeat
-from chroma.transform import rotate, normalize
+from chroma.transform import normalize
def from_film(position=(0,0,0), axis1=(0,0,1), axis2=(1,0,0), size=(800,600),
width=0.035, focal_length=0.018):
diff --git a/sample.py b/chroma/sample.py
index 1909370..1909370 100644
--- a/sample.py
+++ b/chroma/sample.py
diff --git a/scenes/__init__.py b/chroma/scenes/__init__.py
index 1d57220..1d57220 100644
--- a/scenes/__init__.py
+++ b/chroma/scenes/__init__.py
diff --git a/scenes/checkerboard.py b/chroma/scenes/checkerboard.py
index c176b32..c176b32 100644
--- a/scenes/checkerboard.py
+++ b/chroma/scenes/checkerboard.py
diff --git a/sim.py b/chroma/sim.py
index 2e8f2f9..2e8f2f9 100755..100644
--- a/sim.py
+++ b/chroma/sim.py
diff --git a/solids/__init__.py b/chroma/solids/__init__.py
index c4d227f..c4d227f 100644
--- a/solids/__init__.py
+++ b/chroma/solids/__init__.py
diff --git a/solids/hamamatsu_12inch.txt b/chroma/solids/hamamatsu_12inch.txt
index e896b94..e896b94 100644
--- a/solids/hamamatsu_12inch.txt
+++ b/chroma/solids/hamamatsu_12inch.txt
diff --git a/solids/pmts.py b/chroma/solids/pmts.py
index 43aed3f..43aed3f 100644
--- a/solids/pmts.py
+++ b/chroma/solids/pmts.py
diff --git a/solids/sno_cone.txt b/chroma/solids/sno_cone.txt
index 5b56792..5b56792 100644
--- a/solids/sno_cone.txt
+++ b/chroma/solids/sno_cone.txt
diff --git a/solids/sno_pmt.txt b/chroma/solids/sno_pmt.txt
index 9553505..9553505 100644
--- a/solids/sno_pmt.txt
+++ b/chroma/solids/sno_pmt.txt
diff --git a/solids/sno_pmt_reduced.txt b/chroma/solids/sno_pmt_reduced.txt
index eceaadf..eceaadf 100644
--- a/solids/sno_pmt_reduced.txt
+++ b/chroma/solids/sno_pmt_reduced.txt
diff --git a/stl.py b/chroma/stl.py
index abf0cc9..abf0cc9 100644
--- a/stl.py
+++ b/chroma/stl.py
diff --git a/tools.py b/chroma/tools.py
index 1840fae..1840fae 100644
--- a/tools.py
+++ b/chroma/tools.py
diff --git a/transform.py b/chroma/transform.py
index 299da46..299da46 100644
--- a/transform.py
+++ b/chroma/transform.py
diff --git a/distribute_setup.py b/distribute_setup.py
new file mode 100644
index 0000000..5a95c92
--- /dev/null
+++ b/distribute_setup.py
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+ from distribute_setup import use_setuptools
+ use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
+
+try:
+ import subprocess
+
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
+
+except ImportError:
+ # will be used for python 2.3
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ # quoting arguments if windows
+ if sys.platform == 'win32':
+ def quote(arg):
+ if ' ' in arg:
+ return '"%s"' % arg
+ return arg
+ args = [quote(arg) for arg in args]
+ return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.21"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Distribute')
+ if not _python_cmd('setup.py', 'install'):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ finally:
+ os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Distribute egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15, no_fake=True):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
+ try:
+ try:
+ import pkg_resources
+ if not hasattr(pkg_resources, '_distribute'):
+ if not no_fake:
+ _fake_setuptools()
+ raise ImportError
+ except ImportError:
+ return _do_download(version, download_base, to_dir, download_delay)
+ try:
+ pkg_resources.require("distribute>="+version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
+ if was_imported:
+ sys.stderr.write(
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ sys.exit(2)
+ else:
+ del pkg_resources, sys.modules['pkg_resources'] # reload ok
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ except pkg_resources.DistributionNotFound:
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ finally:
+ if not no_fake:
+ _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15):
+ """Download distribute from a specified location and return its filename
+
+ `version` should be a valid distribute version number that is available
+ as an egg for download under the `download_base` URL (which should end
+ with a '/'). `to_dir` is the directory where the egg will be downloaded.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
+ """
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ tgz_name = "distribute-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
+ src = dst = None
+ if not os.path.exists(saveto): # Avoid repeated downloads
+ try:
+ log.warn("Downloading %s", url)
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(saveto, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+ return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+ def __no_sandbox(*args, **kw):
+ try:
+ from setuptools.sandbox import DirectorySandbox
+ if not hasattr(DirectorySandbox, '_old'):
+ def violation(*args):
+ pass
+ DirectorySandbox._old = DirectorySandbox._violation
+ DirectorySandbox._violation = violation
+ patched = True
+ else:
+ patched = False
+ except ImportError:
+ patched = False
+
+ try:
+ return function(*args, **kw)
+ finally:
+ if patched:
+ DirectorySandbox._violation = DirectorySandbox._old
+ del DirectorySandbox._old
+
+ return __no_sandbox
+
+def _patch_file(path, content):
+ """Will backup the file then patch it"""
+ existing_content = open(path).read()
+ if existing_content == content:
+ # already patched
+ log.warn('Already patched.')
+ return False
+ log.warn('Patching...')
+ _rename_path(path)
+ f = open(path, 'w')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+ return True
+
+_patch_file = _no_sandbox(_patch_file)
+
+def _same_content(path, content):
+ return open(path).read() == content
+
+def _rename_path(path):
+ new_name = path + '.OLD.%s' % time.time()
+ log.warn('Renaming %s into %s', path, new_name)
+ os.rename(path, new_name)
+ return new_name
+
+def _remove_flat_installation(placeholder):
+ if not os.path.isdir(placeholder):
+ log.warn('Unkown installation at %s', placeholder)
+ return False
+ found = False
+ for file in os.listdir(placeholder):
+ if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+ found = True
+ break
+ if not found:
+ log.warn('Could not locate setuptools*.egg-info')
+ return
+
+ log.warn('Removing elements out of the way...')
+ pkg_info = os.path.join(placeholder, file)
+ if os.path.isdir(pkg_info):
+ patched = _patch_egg_dir(pkg_info)
+ else:
+ patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+ if not patched:
+ log.warn('%s already patched.', pkg_info)
+ return False
+ # now let's move the files out of the way
+ for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+ element = os.path.join(placeholder, element)
+ if os.path.exists(element):
+ _rename_path(element)
+ else:
+ log.warn('Could not find the %s element of the '
+ 'Setuptools distribution', element)
+ return True
+
+_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
+def _after_install(dist):
+ log.warn('After install bootstrap.')
+ placeholder = dist.get_command_obj('install').install_purelib
+ _create_fake_setuptools_pkg_info(placeholder)
+
+def _create_fake_setuptools_pkg_info(placeholder):
+ if not placeholder or not os.path.exists(placeholder):
+ log.warn('Could not find the install location')
+ return
+ pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+ setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+ (SETUPTOOLS_FAKED_VERSION, pyver)
+ pkg_info = os.path.join(placeholder, setuptools_file)
+ if os.path.exists(pkg_info):
+ log.warn('%s already exists', pkg_info)
+ return
+
+ log.warn('Creating %s', pkg_info)
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+
+ pth_file = os.path.join(placeholder, 'setuptools.pth')
+ log.warn('Creating %s', pth_file)
+ f = open(pth_file, 'w')
+ try:
+ f.write(os.path.join(os.curdir, setuptools_file))
+ finally:
+ f.close()
+
+_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+
+def _patch_egg_dir(path):
+ # let's check if it's already patched
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ if os.path.exists(pkg_info):
+ if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+ log.warn('%s already patched.', pkg_info)
+ return False
+ _rename_path(path)
+ os.mkdir(path)
+ os.mkdir(os.path.join(path, 'EGG-INFO'))
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+ return True
+
+_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
+def _before_install():
+ log.warn('Before install bootstrap.')
+ _fake_setuptools()
+
+
+def _under_prefix(location):
+ if 'install' not in sys.argv:
+ return True
+ args = sys.argv[sys.argv.index('install')+1:]
+ for index, arg in enumerate(args):
+ for option in ('--root', '--prefix'):
+ if arg.startswith('%s=' % option):
+ top_dir = arg.split('root=')[-1]
+ return location.startswith(top_dir)
+ elif arg == option:
+ if len(args) > index:
+ top_dir = args[index+1]
+ return location.startswith(top_dir)
+ if arg == '--user' and USER_SITE is not None:
+ return location.startswith(USER_SITE)
+ return True
+
+
+def _fake_setuptools():
+ log.warn('Scanning installed packages')
+ try:
+ import pkg_resources
+ except ImportError:
+ # we're cool
+ log.warn('Setuptools or Distribute does not seem to be installed.')
+ return
+ ws = pkg_resources.working_set
+ try:
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+ replacement=False))
+ except TypeError:
+ # old distribute API
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+ if setuptools_dist is None:
+ log.warn('No setuptools distribution found')
+ return
+ # detecting if it was already faked
+ setuptools_location = setuptools_dist.location
+ log.warn('Setuptools installation detected at %s', setuptools_location)
+
+ # if --root or --preix was provided, and if
+ # setuptools is not located in them, we don't patch it
+ if not _under_prefix(setuptools_location):
+ log.warn('Not patching, --root or --prefix is installing Distribute'
+ ' in another location')
+ return
+
+ # let's see if its an egg
+ if not setuptools_location.endswith('.egg'):
+ log.warn('Non-egg installation')
+ res = _remove_flat_installation(setuptools_location)
+ if not res:
+ return
+ else:
+ log.warn('Egg installation')
+ pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+ if (os.path.exists(pkg_info) and
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ log.warn('Already patched.')
+ return
+ log.warn('Patching...')
+ # let's create a fake egg replacing setuptools one
+ res = _patch_egg_dir(setuptools_location)
+ if not res:
+ return
+ log.warn('Patched done.')
+ _relaunch()
+
+
+def _relaunch():
+ log.warn('Relaunching...')
+ # we have to relaunch the process
+ # pip marker to avoid a relaunch bug
+ if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+ sys.argv[0] = 'setup.py'
+ args = [sys.executable] + sys.argv
+ sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
+ else:
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+ """Install or upgrade setuptools and EasyInstall"""
+ tarball = download_setuptools()
+ _install(tarball)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/gpu.py b/gpu.py
deleted file mode 100644
index 4e2ecf3..0000000
--- a/gpu.py
+++ /dev/null
@@ -1,799 +0,0 @@
-import numpy as np
-import numpy.ma as ma
-from copy import copy
-from itertools import izip
-import os
-import sys
-import gc
-
-import pytools
-import pycuda.tools
-from pycuda.compiler import SourceModule
-from pycuda import characterize
-import pycuda.driver as cuda
-from pycuda import gpuarray as ga
-
-import chroma.src
-from chroma.tools import timeit, profile_if_possible
-from chroma.geometry import standard_wavelengths
-from chroma import event
-
-cuda.init()
-
-# standard nvcc options
-cuda_options = ('--use_fast_math',)#, '--ptxas-options=-v']
-
-@pycuda.tools.context_dependent_memoize
-def get_cu_module(name, options=None, include_source_directory=True):
- """Returns a pycuda.compiler.SourceModule object from a CUDA source file
- located in the chroma src directory at src/[name]."""
- if options is None:
- options = []
- elif isinstance(options, tuple):
- options = list(options)
- else:
- raise TypeError('`options` must be a tuple.')
-
- srcdir = os.path.dirname(os.path.abspath(chroma.src.__file__))
-
- if include_source_directory:
- options += ['-I' + srcdir]
-
- with open('%s/%s' % (srcdir, name)) as f:
- source = f.read()
-
- return pycuda.compiler.SourceModule(source, options=options,
- no_extern_c=True)
-
-@pycuda.tools.memoize
-def get_cu_source(name):
- """Get the source code for a CUDA source file located in the chroma src
- directory at src/[name]."""
- srcdir = os.path.dirname(os.path.abspath(chroma.src.__file__))
- with open('%s/%s' % (srcdir, name)) as f:
- source = f.read()
- return source
-
-class GPUFuncs(object):
- """Simple container class for GPU functions as attributes."""
- def __init__(self, module):
- self.module = module
- self.funcs = {}
-
- def __getattr__(self, name):
- try:
- return self.funcs[name]
- except KeyError:
- f = self.module.get_function(name)
- self.funcs[name] = f
- return f
-
-init_rng_src = """
-#include <curand_kernel.h>
-
-extern "C"
-{
-
-__global__ void init_rng(int nthreads, curandState *s, unsigned long long seed, unsigned long long offset)
-{
- int id = blockIdx.x*blockDim.x + threadIdx.x;
-
- if (id >= nthreads)
- return;
-
- curand_init(seed, id, offset, &s[id]);
-}
-
-} // extern "C"
-"""
-
-def get_rng_states(size, seed=1):
- "Return `size` number of CUDA random number generator states."
- rng_states = cuda.mem_alloc(size*characterize.sizeof('curandStateXORWOW', '#include <curand_kernel.h>'))
-
- module = pycuda.compiler.SourceModule(init_rng_src, no_extern_c=True)
- init_rng = module.get_function('init_rng')
-
- init_rng(np.int32(size), rng_states, np.uint64(seed), np.uint64(0), block=(64,1,1), grid=(size//64+1,1))
-
- return rng_states
-
-def to_float3(arr):
- "Returns an pycuda.gpuarray.vec.float3 array from an (N,3) array."
- if not arr.flags['C_CONTIGUOUS']:
- arr = np.asarray(arr, order='c')
- return arr.astype(np.float32).view(ga.vec.float3)[:,0]
-
-def to_uint3(arr):
- "Returns a pycuda.gpuarray.vec.uint3 array from an (N,3) array."
- if not arr.flags['C_CONTIGUOUS']:
- arr = np.asarray(arr, order='c')
- return arr.astype(np.uint32).view(ga.vec.uint3)[:,0]
-
-def chunk_iterator(nelements, nthreads_per_block=64, max_blocks=1024):
- """Iterator that yields tuples with the values requried to process
- a long array in multiple kernel passes on the GPU.
-
- Each yielded value is of the form,
- (first_index, elements_this_iteration, nblocks_this_iteration)
-
- Example:
- >>> list(chunk_iterator(300, 32, 2))
- [(0, 64, 2), (64, 64, 2), (128, 64, 2), (192, 64, 2), (256, 9, 1)]
- """
- first = 0
- while first < nelements:
- elements_left = nelements - first
- blocks = int(elements_left // nthreads_per_block)
- if elements_left % nthreads_per_block != 0:
- blocks += 1 # Round up only if needed
- blocks = min(max_blocks, blocks)
- elements_this_round = min(elements_left, blocks * nthreads_per_block)
-
- yield (first, elements_this_round, blocks)
- first += elements_this_round
-
-class GPUPhotons(object):
- def __init__(self, photons, ncopies=1):
- """Load ``photons`` onto the GPU, replicating as requested.
-
- Args:
- - photons: chroma.Event.Photons
- Photon state information to load onto GPU
- - ncopies: int, *optional*
- Number of times to replicate the photons
- on the GPU. This is used if you want
- to propagate the same event many times,
- for example in a likelihood calculation.
-
- The amount of GPU storage will be proportionally
- larger if ncopies > 1, so be careful.
- """
- nphotons = len(photons)
- self.pos = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
- self.dir = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
- self.pol = ga.empty(shape=nphotons*ncopies, dtype=ga.vec.float3)
- self.wavelengths = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
- self.t = ga.empty(shape=nphotons*ncopies, dtype=np.float32)
- self.last_hit_triangles = ga.empty(shape=nphotons*ncopies, dtype=np.int32)
- self.flags = ga.empty(shape=nphotons*ncopies, dtype=np.uint32)
-
- # Assign the provided photons to the beginning (possibly
- # the entire array if ncopies is 1
- self.pos[:nphotons].set(to_float3(photons.pos))
- self.dir[:nphotons].set(to_float3(photons.dir))
- self.pol[:nphotons].set(to_float3(photons.pol))
- self.wavelengths[:nphotons].set(photons.wavelengths.astype(np.float32))
- self.t[:nphotons].set(photons.t.astype(np.float32))
- self.last_hit_triangles[:nphotons].set(photons.last_hit_triangles.astype(np.int32))
- self.flags[:nphotons].set(photons.flags.astype(np.uint32))
-
- module = get_cu_module('propagate.cu', options=cuda_options)
- self.gpu_funcs = GPUFuncs(module)
-
- # Replicate the photons to the rest of the slots if needed
- if ncopies > 1:
- max_blocks = 1024
- nthreads_per_block = 64
- for first_photon, photons_this_round, blocks in \
- chunk_iterator(nphotons, nthreads_per_block, max_blocks):
- self.gpu_funcs.photon_duplicate(np.int32(first_photon), np.int32(photons_this_round),
- self.pos, self.dir, self.wavelengths, self.pol, self.t,
- self.flags, self.last_hit_triangles,
- np.int32(ncopies-1),
- np.int32(nphotons),
- block=(nthreads_per_block,1,1), grid=(blocks, 1))
-
-
- # Save the duplication information for the iterate_copies() method
- self.true_nphotons = nphotons
- self.ncopies = ncopies
-
- def get(self):
- pos = self.pos.get().view(np.float32).reshape((len(self.pos),3))
- dir = self.dir.get().view(np.float32).reshape((len(self.dir),3))
- pol = self.pol.get().view(np.float32).reshape((len(self.pol),3))
- wavelengths = self.wavelengths.get()
- t = self.t.get()
- last_hit_triangles = self.last_hit_triangles.get()
- flags = self.flags.get()
- return event.Photons(pos, dir, pol, wavelengths, t, last_hit_triangles, flags)
-
- def iterate_copies(self):
- '''Returns an iterator that yields GPUPhotonsSlice objects
- corresponding to the event copies stored in ``self``.'''
- for i in xrange(self.ncopies):
- window = slice(self.true_nphotons*i, self.true_nphotons*(i+1))
- yield GPUPhotonsSlice(pos=self.pos[window],
- dir=self.dir[window],
- pol=self.pol[window],
- wavelengths=self.wavelengths[window],
- t=self.t[window],
- last_hit_triangles=self.last_hit_triangles[window],
- flags=self.flags[window])
-
- @profile_if_possible
- def propagate(self, gpu_geometry, rng_states, nthreads_per_block=64,
- max_blocks=1024, max_steps=10):
- """Propagate photons on GPU to termination or max_steps, whichever
- comes first.
-
- May be called repeatedly without reloading photon information if
- single-stepping through photon history.
-
- ..warning::
- `rng_states` must have at least `nthreads_per_block`*`max_blocks`
- number of curandStates.
- """
- nphotons = self.pos.size
- step = 0
- input_queue = np.empty(shape=nphotons+1, dtype=np.uint32)
- input_queue[0] = 0
- # Order photons initially in the queue to put the clones next to each other
- for copy in xrange(self.ncopies):
- input_queue[1+copy::self.ncopies] = np.arange(self.true_nphotons, dtype=np.uint32) + copy * self.true_nphotons
- input_queue_gpu = ga.to_gpu(input_queue)
- output_queue = np.zeros(shape=nphotons+1, dtype=np.uint32)
- output_queue[0] = 1
- output_queue_gpu = ga.to_gpu(output_queue)
-
- while step < max_steps:
- # Just finish the rest of the steps if the # of photons is low
- if nphotons < nthreads_per_block * 16 * 8:
- nsteps = max_steps - step
- else:
- nsteps = 1
-
- for first_photon, photons_this_round, blocks in \
- chunk_iterator(nphotons, nthreads_per_block, max_blocks):
- self.gpu_funcs.propagate(np.int32(first_photon), np.int32(photons_this_round), input_queue_gpu[1:], output_queue_gpu, rng_states, self.pos, self.dir, self.wavelengths, self.pol, self.t, self.flags, self.last_hit_triangles, np.int32(nsteps), gpu_geometry.gpudata, block=(nthreads_per_block,1,1), grid=(blocks, 1))
-
- step += nsteps
-
- if step < max_steps:
- temp = input_queue_gpu
- input_queue_gpu = output_queue_gpu
- output_queue_gpu = temp
- output_queue_gpu[:1].set(np.uint32(1))
- nphotons = input_queue_gpu[:1].get()[0] - 1
-
- if ga.max(self.flags).get() & (1 << 31):
- print >>sys.stderr, "WARNING: ABORTED PHOTONS"
-
- def __del__(self):
- del self.pos
- del self.dir
- del self.pol
- del self.wavelengths
- del self.t
- del self.flags
- del self.last_hit_triangles
- # Free up GPU memory quickly if now available
- gc.collect()
-
-class GPUPhotonsSlice(GPUPhotons):
- '''A `slice`-like view of a subrange of another GPU photons array.
- Works exactly like an instance of GPUPhotons, but the GPU storage
- is taken from another GPUPhotons instance.
-
- Returned by the GPUPhotons.iterate_copies() iterator.'''
- def __init__(self, pos, dir, pol, wavelengths, t, last_hit_triangles,
- flags):
- '''Create new object using slices of GPUArrays from an instance
- of GPUPhotons. NOTE THESE ARE NOT CPU ARRAYS!'''
- self.pos = pos
- self.dir = dir
- self.pol = pol
- self.wavelengths = wavelengths
- self.t = t
- self.last_hit_triangles = last_hit_triangles
- self.flags = flags
-
- module = get_cu_module('propagate.cu', options=cuda_options)
- self.gpu_funcs = GPUFuncs(module)
-
- self.true_nphotons = len(pos)
- self.ncopies = 1
-
- def __del__(self):
- pass # Do nothing, because we don't own any of our GPU memory
-
-class GPUChannels(object):
- def __init__(self, t, q, flags):
- self.t = t
- self.q = q
- self.flags = flags
-
- def get(self):
- t = self.t.get()
- q = self.q.get().astype(np.float32)
-
- # For now, assume all channels with small
- # enough hit time were hit.
- return event.Channels(t<1e8, t, q, self.flags.get())
-
-class GPURays(object):
- """The GPURays class holds arrays of ray positions and directions
- on the GPU that are used to render a geometry."""
- def __init__(self, pos, dir, max_alpha_depth=10, nblocks=64):
- self.pos = ga.to_gpu(to_float3(pos))
- self.dir = ga.to_gpu(to_float3(dir))
-
- self.max_alpha_depth = max_alpha_depth
-
- self.nblocks = nblocks
-
- transform_module = get_cu_module('transform.cu', options=cuda_options)
- self.transform_funcs = GPUFuncs(transform_module)
-
- render_module = get_cu_module('render.cu', options=cuda_options)
- self.render_funcs = GPUFuncs(render_module)
-
- self.dx = ga.empty(max_alpha_depth*self.pos.size, dtype=np.float32)
- self.color = ga.empty(self.dx.size, dtype=ga.vec.float4)
- self.dxlen = ga.zeros(self.pos.size, dtype=np.uint32)
-
- def rotate(self, phi, n):
- "Rotate by an angle phi around the axis `n`."
- self.transform_funcs.rotate(np.int32(self.pos.size), self.pos, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
- self.transform_funcs.rotate(np.int32(self.dir.size), self.dir, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.dir.size//self.nblocks+1,1))
-
- def rotate_around_point(self, phi, n, point):
- """"Rotate by an angle phi around the axis `n` passing through
- the point `point`."""
- self.transform_funcs.rotate_around_point(np.int32(self.pos.size), self.pos, np.float32(phi), ga.vec.make_float3(*n), ga.vec.make_float3(*point), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
- self.transform_funcs.rotate(np.int32(self.dir.size), self.dir, np.float32(phi), ga.vec.make_float3(*n), block=(self.nblocks,1,1), grid=(self.dir.size//self.nblocks+1,1))
-
- def translate(self, v):
- "Translate the ray positions by the vector `v`."
- self.transform_funcs.translate(np.int32(self.pos.size), self.pos, ga.vec.make_float3(*v), block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
-
- def render(self, gpu_geometry, pixels, alpha_depth=10,
- keep_last_render=False):
- """Render `gpu_geometry` and fill the GPU array `pixels` with pixel
- colors."""
- if not keep_last_render:
- self.dxlen.fill(0)
-
- if alpha_depth > self.max_alpha_depth:
- raise Exception('alpha_depth > max_alpha_depth')
-
- if not isinstance(pixels, ga.GPUArray):
- raise TypeError('`pixels` must be a %s instance.' % ga.GPUArray)
-
- if pixels.size != self.pos.size:
- raise ValueError('`pixels`.size != number of rays')
-
- self.render_funcs.render(np.int32(self.pos.size), self.pos, self.dir, gpu_geometry.gpudata, np.uint32(alpha_depth), pixels, self.dx, self.dxlen, self.color, block=(self.nblocks,1,1), grid=(self.pos.size//self.nblocks+1,1))
-
- def snapshot(self, gpu_geometry, alpha_depth=10):
- "Render `gpu_geometry` and return a numpy array of pixel colors."
- pixels = ga.empty(self.pos.size, dtype=np.uint32)
- self.render(gpu_geometry, pixels, alpha_depth)
- return pixels.get()
-
-def make_gpu_struct(size, members):
- struct = cuda.mem_alloc(size)
-
- i = 0
- for member in members:
- if isinstance(member, ga.GPUArray):
- member = member.gpudata
-
- if isinstance(member, cuda.DeviceAllocation):
- if i % 8:
- raise Exception('cannot align 64-bit pointer. '
- 'arrange struct member variables in order of '
- 'decreasing size.')
-
- cuda.memcpy_htod(int(struct)+i, np.intp(int(member)))
- i += 8
- elif np.isscalar(member):
- cuda.memcpy_htod(int(struct)+i, member)
- i += member.nbytes
- else:
- raise TypeError('expected a GPU device pointer or scalar type.')
-
- return struct
-
-def format_size(size):
- if size < 1e3:
- return '%.1f%s' % (size, ' ')
- elif size < 1e6:
- return '%.1f%s' % (size/1e3, 'K')
- elif size < 1e9:
- return '%.1f%s' % (size/1e6, 'M')
- else:
- return '%.1f%s' % (size/1e9, 'G')
-
-def format_array(name, array):
- return '%-15s %6s %6s' % \
- (name, format_size(len(array)), format_size(array.nbytes))
-
-class GPUGeometry(object):
- def __init__(self, geometry, wavelengths=None, print_usage=False):
- if wavelengths is None:
- wavelengths = standard_wavelengths
-
- try:
- wavelength_step = np.unique(np.diff(wavelengths)).item()
- except ValueError:
- raise ValueError('wavelengths must be equally spaced apart.')
-
- geometry_source = get_cu_source('geometry.h')
- material_struct_size = characterize.sizeof('Material', geometry_source)
- surface_struct_size = characterize.sizeof('Surface', geometry_source)
- geometry_struct_size = characterize.sizeof('Geometry', geometry_source)
-
- self.material_data = []
- self.material_ptrs = []
-
- def interp_material_property(wavelengths, property):
- # note that it is essential that the material properties be
- # interpolated linearly. this fact is used in the propagation
- # code to guarantee that probabilities still sum to one.
- return np.interp(wavelengths, property[:,0], property[:,1]).astype(np.float32)
-
- for i in range(len(geometry.unique_materials)):
- material = geometry.unique_materials[i]
-
- if material is None:
- raise Exception('one or more triangles is missing a material.')
-
- refractive_index = interp_material_property(wavelengths, material.refractive_index)
- refractive_index_gpu = ga.to_gpu(refractive_index)
- absorption_length = interp_material_property(wavelengths, material.absorption_length)
- absorption_length_gpu = ga.to_gpu(absorption_length)
- scattering_length = interp_material_property(wavelengths, material.scattering_length)
- scattering_length_gpu = ga.to_gpu(scattering_length)
-
- self.material_data.append(refractive_index_gpu)
- self.material_data.append(absorption_length_gpu)
- self.material_data.append(scattering_length_gpu)
-
- material_gpu = \
- make_gpu_struct(material_struct_size,
- [refractive_index_gpu, absorption_length_gpu,
- scattering_length_gpu,
- np.uint32(len(wavelengths)),
- np.float32(wavelength_step),
- np.float32(wavelengths[0])])
-
- self.material_ptrs.append(material_gpu)
-
- self.material_pointer_array = \
- make_gpu_struct(8*len(self.material_ptrs), self.material_ptrs)
-
- self.surface_data = []
- self.surface_ptrs = []
-
- for i in range(len(geometry.unique_surfaces)):
- surface = geometry.unique_surfaces[i]
-
- if surface is None:
- # need something to copy to the surface array struct
- # that is the same size as a 64-bit pointer.
- # this pointer will never be used by the simulation.
- self.surface_ptrs.append(np.uint64(0))
- continue
-
- detect = interp_material_property(wavelengths, surface.detect)
- detect_gpu = ga.to_gpu(detect)
- absorb = interp_material_property(wavelengths, surface.absorb)
- absorb_gpu = ga.to_gpu(absorb)
- reflect_diffuse = interp_material_property(wavelengths, surface.reflect_diffuse)
- reflect_diffuse_gpu = ga.to_gpu(reflect_diffuse)
- reflect_specular = interp_material_property(wavelengths, surface.reflect_specular)
- reflect_specular_gpu = ga.to_gpu(reflect_specular)
-
- self.surface_data.append(detect_gpu)
- self.surface_data.append(absorb_gpu)
- self.surface_data.append(reflect_diffuse_gpu)
- self.surface_data.append(reflect_specular_gpu)
-
- surface_gpu = \
- make_gpu_struct(surface_struct_size,
- [detect_gpu, absorb_gpu,
- reflect_diffuse_gpu,
- reflect_specular_gpu,
- np.uint32(len(wavelengths)),
- np.float32(wavelength_step),
- np.float32(wavelengths[0])])
-
- self.surface_ptrs.append(surface_gpu)
-
- self.surface_pointer_array = \
- make_gpu_struct(8*len(self.surface_ptrs), self.surface_ptrs)
-
- self.vertices = ga.to_gpu(to_float3(geometry.mesh.vertices))
- self.triangles = ga.to_gpu(to_uint3(geometry.mesh.triangles))
-
- material_codes = (((geometry.material1_index & 0xff) << 24) |
- ((geometry.material2_index & 0xff) << 16) |
- ((geometry.surface_index & 0xff) << 8)).astype(np.uint32)
-
- self.material_codes = ga.to_gpu(material_codes)
-
- self.lower_bounds = ga.to_gpu(to_float3(geometry.lower_bounds))
- self.upper_bounds = ga.to_gpu(to_float3(geometry.upper_bounds))
- self.colors = ga.to_gpu(geometry.colors.astype(np.uint32))
- self.node_map = ga.to_gpu(geometry.node_map.astype(np.uint32))
- self.node_map_end = ga.to_gpu(geometry.node_map_end.astype(np.uint32))
- self.solid_id_map = ga.to_gpu(geometry.solid_id.astype(np.uint32))
-
- self.gpudata = make_gpu_struct(geometry_struct_size,
- [self.vertices, self.triangles,
- self.material_codes,
- self.colors, self.lower_bounds,
- self.upper_bounds, self.node_map,
- self.node_map_end,
- self.material_pointer_array,
- self.surface_pointer_array,
- np.uint32(geometry.start_node),
- np.uint32(geometry.first_node)])
-
- self.geometry = geometry
-
- if print_usage:
- self.print_device_usage()
-
- def print_device_usage(self):
- print 'device usage:'
- print '-'*10
- print format_array('vertices', self.vertices)
- print format_array('triangles', self.triangles)
- print format_array('lower_bounds', self.lower_bounds)
- print format_array('upper_bounds', self.upper_bounds)
- print format_array('node_map', self.node_map)
- print format_array('node_map_end', self.node_map_end)
- print '%-15s %6s %6s' % ('total', '', format_size(self.vertices.nbytes + self.triangles.nbytes + self.lower_bounds.nbytes + self.upper_bounds.nbytes + self.node_map.nbytes + self.node_map_end.nbytes))
- print '-'*10
- free, total = cuda.mem_get_info()
- print '%-15s %6s %6s' % ('device total', '', format_size(total))
- print '%-15s %6s %6s' % ('device used', '', format_size(total-free))
- print '%-15s %6s %6s' % ('device free', '', format_size(free))
- print
-
- def reset_colors(self):
- self.colors.set_async(self.geometry.colors.astype(np.uint32))
-
- def color_solids(self, solid_hit, colors, nblocks_per_thread=64,
- max_blocks=1024):
- solid_hit_gpu = ga.to_gpu(np.array(solid_hit, dtype=np.bool))
- solid_colors_gpu = ga.to_gpu(np.array(colors, dtype=np.uint32))
-
- module = get_cu_module('mesh.h', options=cuda_options)
- color_solids = module.get_function('color_solids')
-
- for first_triangle, triangles_this_round, blocks in \
- chunk_iterator(self.triangles.size, nblocks_per_thread,
- max_blocks):
- color_solids(np.int32(first_triangle),
- np.int32(triangles_this_round), self.solid_id_map,
- solid_hit_gpu, solid_colors_gpu, self.gpudata,
- block=(nblocks_per_thread,1,1),
- grid=(blocks,1))
-
-class GPUDaq(object):
- def __init__(self, gpu_geometry, max_pmt_id, pmt_rms=1.2e-9):
- self.earliest_time_gpu = ga.empty(max_pmt_id+1, dtype=np.float32)
- self.earliest_time_int_gpu = ga.empty(max_pmt_id+1, dtype=np.uint32)
- self.channel_history_gpu = ga.zeros_like(self.earliest_time_int_gpu)
- self.channel_q_gpu = ga.zeros_like(self.earliest_time_int_gpu)
- self.daq_pmt_rms = pmt_rms
- self.solid_id_map_gpu = gpu_geometry.solid_id_map
-
- self.module = get_cu_module('daq.cu', options=cuda_options,
- include_source_directory=False)
- self.gpu_funcs = GPUFuncs(self.module)
-
- def acquire(self, gpuphotons, rng_states, nthreads_per_block=64, max_blocks=1024):
- self.gpu_funcs.reset_earliest_time_int(np.float32(1e9), np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, block=(nthreads_per_block,1,1), grid=(len(self.earliest_time_int_gpu)//nthreads_per_block+1,1))
- self.channel_q_gpu.fill(0)
- self.channel_history_gpu.fill(0)
-
- n = len(gpuphotons.pos)
-
- for first_photon, photons_this_round, blocks in \
- chunk_iterator(n, nthreads_per_block, max_blocks):
- self.gpu_funcs.run_daq(rng_states, np.uint32(0x1 << 2), np.float32(self.daq_pmt_rms), np.int32(first_photon), np.int32(photons_this_round), gpuphotons.t, gpuphotons.flags, gpuphotons.last_hit_triangles, self.solid_id_map_gpu, np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, self.channel_q_gpu, self.channel_history_gpu, block=(nthreads_per_block,1,1), grid=(blocks,1))
-
- self.gpu_funcs.convert_sortable_int_to_float(np.int32(len(self.earliest_time_int_gpu)), self.earliest_time_int_gpu, self.earliest_time_gpu, block=(nthreads_per_block,1,1), grid=(len(self.earliest_time_int_gpu)//nthreads_per_block+1,1))
-
- return GPUChannels(self.earliest_time_gpu, self.channel_q_gpu, self.channel_history_gpu)
-
-class GPUPDF(object):
- def __init__(self):
- self.module = get_cu_module('daq.cu', options=cuda_options,
- include_source_directory=False)
- self.gpu_funcs = GPUFuncs(self.module)
-
- def setup_pdf(self, max_pmt_id, tbins, trange, qbins, qrange):
- """Setup GPU arrays to hold PDF information.
-
- max_pmt_id: int, largest PMT id #
- tbins: number of time bins
- trange: tuple of (min, max) time in PDF
- qbins: number of charge bins
- qrange: tuple of (min, max) charge in PDF
- """
- self.events_in_histogram = 0
- self.hitcount_gpu = ga.zeros(max_pmt_id+1, dtype=np.uint32)
- self.pdf_gpu = ga.zeros(shape=(max_pmt_id+1, tbins, qbins),
- dtype=np.uint32)
- self.tbins = tbins
- self.trange = trange
- self.qbins = qbins
- self.qrange = qrange
-
- def clear_pdf(self):
- """Rezero the PDF counters."""
- self.hitcount_gpu.fill(0)
- self.pdf_gpu.fill(0)
-
- def add_hits_to_pdf(self, gpuchannels, nthreads_per_block=64):
- self.gpu_funcs.bin_hits(np.int32(len(self.hitcount_gpu)),
- gpuchannels.q,
- gpuchannels.t,
- self.hitcount_gpu,
- np.int32(self.tbins),
- np.float32(self.trange[0]),
- np.float32(self.trange[1]),
- np.int32(self.qbins),
- np.float32(self.qrange[0]),
- np.float32(self.qrange[1]),
- self.pdf_gpu,
- block=(nthreads_per_block,1,1),
- grid=(len(gpuchannels.t)//nthreads_per_block+1,1))
-
-
- self.events_in_histogram += 1
-
- def get_pdfs(self):
- """Returns the 1D hitcount array and the 3D [channel, time, charge]
- histogram."""
- return self.hitcount_gpu.get(), self.pdf_gpu.get()
-
- def setup_pdf_eval(self, event_hit, event_time, event_charge, min_twidth,
- trange, min_qwidth, qrange, min_bin_content=10,
- time_only=True):
- """Setup GPU arrays to compute PDF values for the given event.
- The pdf_eval calculation allows the PDF to be evaluated at a
- single point for each channel as the Monte Carlo is run. The
- effective bin size will be as small as (`min_twidth`,
- `min_qwidth`) around the point of interest, but will be large
- enough to ensure that `min_bin_content` Monte Carlo events
- fall into the bin.
-
- event_hit: ndarray
- Hit or not-hit status for each channel in the detector.
- event_time: ndarray
- Hit time for each channel in the detector. If channel
- not hit, the time will be ignored.
- event_charge: ndarray
- Integrated charge for each channel in the detector.
- If channel not hit, the charge will be ignored.
-
- min_twidth: float
- Minimum bin size in the time dimension
- trange: (float, float)
- Range of time dimension in PDF
- min_qwidth: float
- Minimum bin size in charge dimension
- qrange: (float, float)
- Range of charge dimension in PDF
- min_bin_content: int
- The bin will be expanded to include at least this many events
- time_only: bool
- If True, only the time observable will be used in the PDF.
- """
- self.event_hit_gpu = ga.to_gpu(event_hit.astype(np.uint32))
- self.event_time_gpu = ga.to_gpu(event_time.astype(np.float32))
- self.event_charge_gpu = ga.to_gpu(event_charge.astype(np.float32))
-
- self.eval_hitcount_gpu = ga.zeros(len(event_hit), dtype=np.uint32)
- self.eval_bincount_gpu = ga.zeros(len(event_hit), dtype=np.uint32)
- self.nearest_mc_gpu = ga.empty(shape=len(event_hit) * min_bin_content,
- dtype=np.float32)
- self.nearest_mc_gpu.fill(1e9)
-
- self.min_twidth = min_twidth
- self.trange = trange
- self.min_qwidth = min_qwidth
- self.qrange = qrange
- self.min_bin_content = min_bin_content
- self.time_only = time_only
-
- def clear_pdf_eval(self):
- "Reset PDF evaluation counters to start accumulating new Monte Carlo."
- self.eval_hitcount_gpu.fill(0)
- self.eval_bincount_gpu.fill(0)
- self.nearest_mc_gpu.fill(1e9)
-
- def accumulate_pdf_eval(self, gpuchannels, nthreads_per_block=64):
- "Add the most recent results of run_daq() to the PDF evaluation."
- self.gpu_funcs.accumulate_pdf_eval(np.int32(self.time_only),
- np.int32(len(self.event_hit_gpu)),
- self.event_hit_gpu,
- self.event_time_gpu,
- self.event_charge_gpu,
- gpuchannels.t,
- gpuchannels.q,
- self.eval_hitcount_gpu,
- self.eval_bincount_gpu,
- np.float32(self.min_twidth),
- np.float32(self.trange[0]),
- np.float32(self.trange[1]),
- np.float32(self.min_qwidth),
- np.float32(self.qrange[0]),
- np.float32(self.qrange[1]),
- self.nearest_mc_gpu,
- np.int32(self.min_bin_content),
- block=(nthreads_per_block,1,1),
- grid=(len(gpuchannels.t)//nthreads_per_block+1,1))
-
- def get_pdf_eval(self):
- evhit = self.event_hit_gpu.get().astype(bool)
- hitcount = self.eval_hitcount_gpu.get()
- bincount = self.eval_bincount_gpu.get()
-
- pdf_value = np.zeros(len(hitcount), dtype=float)
- pdf_frac_uncert = np.zeros_like(pdf_value)
-
- # PDF value for high stats bins
- high_stats = bincount >= self.min_bin_content
- if high_stats.any():
- if self.time_only:
- pdf_value[high_stats] = bincount[high_stats].astype(float) / hitcount[high_stats] / self.min_twidth
- else:
- assert Exception('Unimplemented 2D (time,charge) mode!')
-
- pdf_frac_uncert[high_stats] = 1.0/np.sqrt(bincount[high_stats])
-
- # PDF value for low stats bins
- low_stats = ~high_stats & (hitcount > 0) & evhit
-
- nearest_mc = self.nearest_mc_gpu.get().reshape((len(hitcount), self.min_bin_content))
-
- # Deal with the case where we did not even get min_bin_content events
- # in the PDF but also clamp the lower range to ensure we don't index
- # by a negative number in 2 lines
- last_valid_entry = np.maximum(0, (nearest_mc < 1e9).astype(int).sum(axis=1) - 1)
- distance = nearest_mc[np.arange(len(last_valid_entry)),last_valid_entry]
-
- if low_stats.any():
- if self.time_only:
- pdf_value[low_stats] = (last_valid_entry[low_stats] + 1).astype(float) / hitcount[low_stats] / distance[low_stats] / 2.0
- else:
- assert Exception('Unimplemented 2D (time,charge) mode!')
-
- pdf_frac_uncert[low_stats] = 1.0/np.sqrt(last_valid_entry[low_stats] + 1)
-
- # PDFs with no stats got zero by default during array creation
-
- print 'high_stats:', high_stats.sum(), 'low_stats', low_stats.sum()
-
- return hitcount, pdf_value, pdf_value * pdf_frac_uncert
-
-def create_cuda_context(device_id=None):
- """Initialize and return a CUDA context on the specified device.
- If device_id is None, the default device is used."""
- if device_id is None:
- try:
- context = pycuda.tools.make_default_context()
- except cuda.LogicError:
- # initialize cuda
- cuda.init()
- context = pycuda.tools.make_default_context()
- else:
- try:
- device = cuda.Device(device_id)
- except cuda.LogicError:
- # initialize cuda
- cuda.init()
- device = cuda.Device(device_id)
- context = device.make_context()
-
- context.set_cache_config(cuda.func_cache.PREFER_L1)
-
- return context
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..139ba54
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,35 @@
+import distribute_setup
+distribute_setup.use_setuptools()
+from setuptools import setup, find_packages, Extension
+import subprocess
+
+geant4_cflags = subprocess.check_output(['geant4-config','--cflags']).split()
+geant4_libs = subprocess.check_output(['geant4-config','--libs']).split()
+
+setup(
+ name = 'Chroma',
+ version = '0.5',
+ packages = find_packages(),
+ include_package_data=True,
+
+ scripts = ['bin/chroma-sim', 'bin/chroma-cam'],
+ ext_modules = [
+ Extension('chroma.generator._g4chroma',
+ ['src/G4chroma.cc'],
+ include_dirs=['src'],
+ extra_compile_args=geant4_cflags,
+ extra_link_args=geant4_libs,
+ libraries=['boost_python'],
+ ),
+ Extension('chroma.generator.mute',
+ ['src/mute.cc'],
+ extra_compile_args=geant4_cflags,
+ extra_link_args=geant4_libs,
+ libraries=['boost_python']),
+ ],
+
+ install_requires = ['uncertainties','pyzmq-static','h5py','spnav', 'pycuda',
+ 'numpy>1.6', 'pygame'],
+ test_suite = 'nose.collector',
+
+)
diff --git a/generator/G4chroma.cc b/src/G4chroma.cc
index 1aba133..340cc4c 100644
--- a/generator/G4chroma.cc
+++ b/src/G4chroma.cc
@@ -224,7 +224,7 @@ void export_Chroma()
;
}
-BOOST_PYTHON_MODULE(G4chroma)
+BOOST_PYTHON_MODULE(_g4chroma)
{
export_Chroma();
}
diff --git a/generator/G4chroma.hh b/src/G4chroma.hh
index 4f085aa..4f085aa 100644
--- a/generator/G4chroma.hh
+++ b/src/G4chroma.hh
diff --git a/src/mute.cc b/src/mute.cc
new file mode 100644
index 0000000..e13814f
--- /dev/null
+++ b/src/mute.cc
@@ -0,0 +1,41 @@
+#include <geant4/G4ios.hh>
+
+class discard_streambuf : public std::streambuf {
+public:
+ discard_streambuf() { };
+
+ virtual int_type overflow(int_type c) {
+ // Do nothing with this character
+ return c;
+ };
+};
+
+discard_streambuf discard;
+std::streambuf *g4cout_orig = G4cout.rdbuf();
+std::streambuf *g4cerr_orig = G4cerr.rdbuf();
+
+void mute_g4mute() {
+ G4cout.rdbuf(&discard);
+ G4cerr.rdbuf(&discard);
+}
+
+void mute_g4unmute() {
+ G4cout.rdbuf(g4cout_orig);
+ G4cerr.rdbuf(g4cerr_orig);
+}
+
+
+#include <boost/python.hpp>
+
+using namespace boost::python;
+
+void export_mute()
+{
+ def("g4mute", mute_g4mute, default_call_policies(), "Silence all GEANT4 output");
+ def("g4unmute", mute_g4unmute, default_call_policies(), "Re-enable GEANT4 output after calling ``g4mute()``.");
+}
+
+BOOST_PYTHON_MODULE(mute)
+{
+ export_mute();
+}
diff --git a/src/__init__.py b/test/__init__.py
index e69de29..e69de29 100644
--- a/src/__init__.py
+++ b/test/__init__.py
diff --git a/tests/data/ray_intersection.npz b/test/data/ray_intersection.npz
index fba0e40..fba0e40 100644
--- a/tests/data/ray_intersection.npz
+++ b/test/data/ray_intersection.npz
Binary files differ
diff --git a/tests/linalg_test.cu b/test/linalg_test.cu
index 4e9c983..4e9c983 100644
--- a/tests/linalg_test.cu
+++ b/test/linalg_test.cu
diff --git a/tests/linalg_test.py b/test/linalg_test.py
index 31688d9..23ebfa7 100644
--- a/tests/linalg_test.py
+++ b/test/linalg_test.py
@@ -10,7 +10,7 @@ float3 = gpuarray.vec.float3
print 'device %s' % autoinit.device.name()
current_directory = os.path.split(os.path.realpath(__file__))[0]
-source_directory = current_directory + '/../src'
+from chroma.cuda import srcdir as source_directory
source = open(current_directory + '/linalg_test.cu').read()
diff --git a/tests/matrix_test.cu b/test/matrix_test.cu
index d64cb34..d64cb34 100644
--- a/tests/matrix_test.cu
+++ b/test/matrix_test.cu
diff --git a/tests/matrix_test.py b/test/matrix_test.py
index c843025..3499945 100644
--- a/tests/matrix_test.py
+++ b/test/matrix_test.py
@@ -10,7 +10,8 @@ float3 = gpuarray.vec.float3
print 'device %s' % autoinit.device.name()
current_directory = os.path.split(os.path.realpath(__file__))[0]
-source_directory = current_directory + '/../src'
+
+from chroma.cuda import srcdir as source_directory
source = open(current_directory + '/matrix_test.cu').read()
diff --git a/tests/rotate_test.cu b/test/rotate_test.cu
index 6cafc12..6cafc12 100644
--- a/tests/rotate_test.cu
+++ b/test/rotate_test.cu
diff --git a/tests/rotate_test.py b/test/rotate_test.py
index 92eff84..7ac7804 100644
--- a/tests/rotate_test.py
+++ b/test/rotate_test.py
@@ -18,7 +18,7 @@ def rotate(x, phi, n):
print 'device %s' % autoinit.device.name()
current_directory = os.path.split(os.path.realpath(__file__))[0]
-source_directory = current_directory + '/../src'
+from chroma.cuda import srcdir as source_directory
source = open(current_directory + '/rotate_test.cu').read()
diff --git a/tests/test_generator_photon.py b/test/test_generator_photon.py
index 13501fe..13501fe 100644
--- a/tests/test_generator_photon.py
+++ b/test/test_generator_photon.py
diff --git a/tests/test_generator_vertex.py b/test/test_generator_vertex.py
index cec363d..cec363d 100644
--- a/tests/test_generator_vertex.py
+++ b/test/test_generator_vertex.py
diff --git a/tests/test_fileio.py b/test/test_io.py
index 3869a9f..3553058 100644
--- a/tests/test_fileio.py
+++ b/test/test_io.py
@@ -1,9 +1,9 @@
import unittest
-from chroma.fileio import root
+from chroma.io import root
from chroma import event
import numpy as np
-class TestFileIO(unittest.TestCase):
+class TestRootIO(unittest.TestCase):
def test_file_write_and_read(self):
ev = event.Event(1, event.Vertex('e-', pos=(0,0,1), dir=(1,0,0),
ke=15.0, pol=(0,1,0)))
diff --git a/test/test_parabola.py b/test/test_parabola.py
new file mode 100644
index 0000000..7d7e7b2
--- /dev/null
+++ b/test/test_parabola.py
@@ -0,0 +1,66 @@
+import chroma.parabola as parabola
+import numpy
+from uncertainties import ufloat, unumpy
+import unittest
+from numpy.testing import assert_almost_equal
+
+class Test1D(unittest.TestCase):
+ def setUp(self):
+ self.x = numpy.array([[-1.0], [0.0], [1.0]])
+ self.y = unumpy.uarray(([2.0, 1.0, 6.0], [0.1, 0.1, 0.1]))
+ self.a = 1.0
+ self.b = numpy.array([2.0])
+ self.c = numpy.array([[3.0]])
+
+ def test_parabola_eval(self):
+ y = parabola.parabola_eval(self.x, self.a, self.b, self.c)
+ assert_almost_equal(y, unumpy.nominal_values(self.y))
+
+ def test_solve(self):
+ points = zip(self.x, self.y)
+ a, b, c, chi2, prob = parabola.parabola_fit(points)
+
+ assert_almost_equal(a.nominal_value, 1.0)
+ assert_almost_equal(b[0].nominal_value, 2.0)
+ assert_almost_equal(c[0,0].nominal_value, 3.0)
+
+ # Compare to ROOT TGraph fitting uncerts
+ assert_almost_equal(a.std_dev(), 0.1)
+ assert_almost_equal(b[0].std_dev(), 0.0707107)
+ assert_almost_equal(c[0,0].std_dev(), 0.122474, decimal=5)
+
+
+class Test2D(unittest.TestCase):
+ def setUp(self):
+ self.x = numpy.array([[-1.0,-1.0], [-1.0, 0.0], [-1.0, 1.0],
+ [ 0.0,-1.0], [ 0.0, 0.0], [ 0.0, 1.0],
+ [ 1.0,-1.0], [ 1.0, 0.0], [ 1.0, 1.0]])
+
+ self.a = 1.0
+ self.b = numpy.array([2.0, 3.0])
+ self.c = numpy.array([[3.0, 1.0],[1.0, 4.0]])
+
+ self.y = numpy.zeros(len(self.x), dtype=object)
+ for i, (x0, x1) in enumerate(self.x):
+ value = self.a \
+ + x0 * self.b[0] + x1 * self.b[1] \
+ + x0**2 * self.c[0,0] + x0 * x1 * self.c[0,1] \
+ + x1 * x0 * self.c[1,0] + x1**2 * self.c[1,1]
+ self.y[i] = ufloat((value, 0.1))
+
+ def test_parabola_eval(self):
+ y = parabola.parabola_eval(self.x, self.a, self.b, self.c)
+ assert_almost_equal(y, unumpy.nominal_values(self.y))
+
+ def test_solve(self):
+ points = zip(self.x, self.y)
+ a, b, c, chi2, prob = parabola.parabola_fit(points)
+ assert_almost_equal(chi2, 0.0)
+ assert_almost_equal(a.nominal_value, 1.0)
+ assert_almost_equal(b[0].nominal_value, 2.0)
+ assert_almost_equal(b[1].nominal_value, 3.0)
+ assert_almost_equal(c[0,0].nominal_value, 3.0)
+ assert_almost_equal(c[0,1].nominal_value, 1.0)
+ assert_almost_equal(c[1,0].nominal_value, 1.0)
+ assert_almost_equal(c[1,1].nominal_value, 4.0)
+
diff --git a/tests/test_pdf.py b/test/test_pdf.py
index 571cbd4..571cbd4 100644
--- a/tests/test_pdf.py
+++ b/test/test_pdf.py
diff --git a/tests/test_propagation.py b/test/test_propagation.py
index 43ecb9b..43ecb9b 100644
--- a/tests/test_propagation.py
+++ b/test/test_propagation.py
diff --git a/tests/test_ray_intersection.py b/test/test_ray_intersection.py
index 7d0c53c..ce364e0 100644
--- a/tests/test_ray_intersection.py
+++ b/test/test_ray_intersection.py
@@ -15,7 +15,7 @@ class TestRayIntersection(unittest.TestCase):
self.pos_gpu = ga.to_gpu(chroma.gpu.to_float3(pos))
self.dir_gpu = ga.to_gpu(chroma.gpu.to_float3(dir))
- testdir = os.path.dirname(os.path.abspath(chroma.tests.__file__))
+ testdir = os.path.dirname(os.path.abspath(__file__))
self.dx_standard = np.load(os.path.join(testdir,
'data/ray_intersection.npz'))
def test_intersection_distance(self):
diff --git a/tests/test_rayleigh.py b/test/test_rayleigh.py
index 02ccb41..02ccb41 100644
--- a/tests/test_rayleigh.py
+++ b/test/test_rayleigh.py
diff --git a/tests/test_sample_cdf.cu b/test/test_sample_cdf.cu
index 1401772..1401772 100644
--- a/tests/test_sample_cdf.cu
+++ b/test/test_sample_cdf.cu
diff --git a/test/test_sample_cdf.py b/test/test_sample_cdf.py
new file mode 100644
index 0000000..1510768
--- /dev/null
+++ b/test/test_sample_cdf.py
@@ -0,0 +1,67 @@
+import pycuda.driver as cuda
+from pycuda.compiler import SourceModule
+from pycuda import gpuarray
+import numpy as np
+import ROOT
+import os
+import unittest
+import chroma
+
+class TestSampling(unittest.TestCase):
+ def setUp(self):
+ self.context = chroma.gpu.create_cuda_context()
+ current_directory = os.path.split(os.path.realpath(__file__))[0]
+ from chroma.cuda import srcdir as source_directory
+ source = open(current_directory + '/test_sample_cdf.cu').read()
+ self.mod = SourceModule(source, options=['-I' + source_directory], no_extern_c=True, cache_dir=False)
+ self.test_sample_cdf = self.mod.get_function('test_sample_cdf')
+
+ def compare_sampling(self, hist, reps=10):
+ nbins = hist.GetNbinsX();
+ xaxis = hist.GetXaxis()
+ intg = hist.GetIntegral()
+ cdf_y = np.empty(nbins+1, dtype=float)
+ cdf_x = np.empty_like(cdf_y)
+
+ cdf_x[0] = xaxis.GetBinLowEdge(1)
+ cdf_y[0] = 0.0
+ for i in xrange(1,len(cdf_x)):
+ cdf_y[i] = intg[i]
+ cdf_x[i] = xaxis.GetBinUpEdge(i)
+
+ cdf_x_gpu = gpuarray.to_gpu(cdf_x.astype(np.float32))
+ cdf_y_gpu = gpuarray.to_gpu(cdf_y.astype(np.float32))
+ block =(128,1,1)
+ grid = (128, 1)
+ out_gpu = gpuarray.empty(shape=int(block[0]*grid[0]), dtype=np.float32)
+
+ out_h = ROOT.TH1D('out_h', '', hist.GetNbinsX(),
+ xaxis.GetXmin(),
+ xaxis.GetXmax())
+ out_h.SetLineColor(ROOT.kGreen)
+
+ for i in xrange(reps):
+ self.test_sample_cdf(np.int32(i),
+ np.int32(len(cdf_x_gpu)),
+ cdf_x_gpu, cdf_y_gpu, out_gpu, block=block, grid=grid)
+ out = out_gpu.get()
+ for v in out:
+ out_h.Fill(v)
+
+ prob = out_h.KolmogorovTest(hist)
+ return prob, out_h
+
+ def test_sampling(self):
+ '''Verify that the CDF-based sampler on the GPU reproduces a binned
+ Gaussian distribution'''
+ f = ROOT.TF1('f_gaussian', 'gaus(0)', -5, 5)
+ f.SetParameters(1.0/np.sqrt(np.pi * 2), 0.0, 1.0)
+ gaussian = ROOT.TH1D('gaussian', '', 100, -5, 5)
+ gaussian.Add(f)
+
+ prob, out_h = self.compare_sampling(gaussian, reps=50)
+
+ assert prob > 0.01
+
+ def tearDown(self):
+ self.context.pop()
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tests/__init__.py
+++ /dev/null
diff --git a/tests/test_sample_cdf.py b/tests/test_sample_cdf.py
deleted file mode 100644
index 2baa2ac..0000000
--- a/tests/test_sample_cdf.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pycuda.autoinit
-import pycuda.driver as cuda
-from pycuda.compiler import SourceModule
-from pycuda import gpuarray
-import numpy as np
-import ROOT
-import os
-
-current_directory = os.path.split(os.path.realpath(__file__))[0]
-source_directory = current_directory + '/../src'
-
-source = open(current_directory + '/test_sample_cdf.cu').read()
-
-mod = SourceModule(source, options=['-I' + source_directory], no_extern_c=True, cache_dir=False)
-
-test_sample_cdf = mod.get_function('test_sample_cdf')
-
-def compare_sampling(hist, reps=10):
- nbins = hist.GetNbinsX();
- xaxis = hist.GetXaxis()
- intg = hist.GetIntegral()
- cdf_y = np.empty(nbins+1, dtype=float)
- cdf_x = np.empty_like(cdf_y)
-
- cdf_x[0] = xaxis.GetBinLowEdge(1)
- cdf_y[0] = 0.0
- for i in xrange(1,len(cdf_x)):
- cdf_y[i] = intg[i]
- cdf_x[i] = xaxis.GetBinUpEdge(i)
-
- cdf_x_gpu = gpuarray.to_gpu(cdf_x.astype(np.float32))
- cdf_y_gpu = gpuarray.to_gpu(cdf_y.astype(np.float32))
- block =(128,1,1)
- grid = (128, 1)
- out_gpu = gpuarray.empty(shape=int(block[0]*grid[0]), dtype=np.float32)
-
- out_h = ROOT.TH1D('out_h', '', hist.GetNbinsX(),
- xaxis.GetXmin(),
- xaxis.GetXmax())
- out_h.SetLineColor(ROOT.kGreen)
-
- for i in xrange(reps):
- test_sample_cdf(np.int32(i),
- np.int32(len(cdf_x_gpu)),
- cdf_x_gpu, cdf_y_gpu, out_gpu, block=block, grid=grid)
- out = out_gpu.get()
- for v in out:
- out_h.Fill(v)
-
- prob = out_h.KolmogorovTest(hist)
- return prob, out_h
-
-def test_sampling():
- '''Verify that the CDF-based sampler on the GPU reproduces a binned
- Gaussian distribution'''
- f = ROOT.TF1('f_gaussian', 'gaus(0)', -5, 5)
- f.SetParameters(1.0/np.sqrt(np.pi * 2), 0.0, 1.0)
- gaussian = ROOT.TH1D('gaussian', '', 100, -5, 5)
- gaussian.Add(f)
-
- prob, out_h = compare_sampling(gaussian, reps=50)
-
- assert prob > 0.01
-