summaryrefslogtreecommitdiff
path: root/chroma/gpu/tools.py
diff options
context:
space:
mode:
authorStan Seibert <stan@mtrr.org>2012-01-18 23:10:21 -0500
committertlatorre <tlatorre@uchicago.edu>2021-05-09 08:42:38 -0700
commit14309ab8618a80c7f67c7d80d43bbb4779f0bb2f (patch)
tree73bf89f3910eb39f92daf0793b81161be1c90b36 /chroma/gpu/tools.py
parent4c212dec68cd154577299b825aff00c0ed765813 (diff)
downloadchroma-14309ab8618a80c7f67c7d80d43bbb4779f0bb2f.tar.gz
chroma-14309ab8618a80c7f67c7d80d43bbb4779f0bb2f.tar.bz2
chroma-14309ab8618a80c7f67c7d80d43bbb4779f0bb2f.zip
Simple BVH generator using new infrastructure
Diffstat (limited to 'chroma/gpu/tools.py')
-rw-r--r--chroma/gpu/tools.py33
1 files changed, 33 insertions, 0 deletions
diff --git a/chroma/gpu/tools.py b/chroma/gpu/tools.py
index 707a45d..b151f80 100644
--- a/chroma/gpu/tools.py
+++ b/chroma/gpu/tools.py
@@ -181,3 +181,36 @@ def format_size(size):
def format_array(name, array):
return '%-15s %6s %6s' % \
(name, format_size(len(array)), format_size(array.nbytes))
+
+def Mapped(array):
+ '''Analog to pycuda.driver.InOut(), but indicates this array
+ is memory mapped to the device space and should not be copied.'''
+ return np.intp(array.base.get_device_pointer())
+
+def mapped_alloc(pagelocked_alloc_func, shape, dtype, write_combined):
+ '''Returns a pagelocked host array mapped into the CUDA device
+ address space, with a gpudata field set so it just works with CUDA
+ functions.'''
+ flags = cuda.host_alloc_flags.DEVICEMAP
+ if write_combined:
+ flags |= cuda.host_alloc_flags.WRITECOMBINED
+ array = pagelocked_alloc_func(shape=shape, dtype=dtype, mem_flags=flags)
+ return array
+
+def mapped_empty(shape, dtype, write_combined=False):
+ '''See mapped_alloc()'''
+ return mapped_alloc(cuda.pagelocked_empty, shape, dtype, write_combined)
+
+def mapped_empty_like(other, write_combined=False):
+ '''See mapped_alloc()'''
+ return mapped_alloc(cuda.pagelocked_empty, other.shape, other.dtype,
+ write_combined)
+
+def mapped_zeros(shape, dtype, write_combined=False):
+ '''See mapped_alloc()'''
+ return mapped_alloc(cuda.pagelocked_zeros, shape, dtype, write_combined)
+
+def mapped_zeros_like(other, write_combined=False):
+ '''See mapped_alloc()'''
+ return mapped_alloc(cuda.pagelocked_zeros, other.shape, other.dtype,
+ write_combined)