summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStan Seibert <stan@mtrr.org>2012-01-19 21:36:19 -0500
committertlatorre <tlatorre@uchicago.edu>2021-05-09 08:42:38 -0700
commitbf46d6b9f42330b42f4634ea6ac0e2b318942786 (patch)
tree5b18635fcc5228c8326ab5cd43a35d38575c1215
parent3aa00b69bf01f6b2a2f920642f8faa6a52bbb1c4 (diff)
downloadchroma-bf46d6b9f42330b42f4634ea6ac0e2b318942786.tar.gz
chroma-bf46d6b9f42330b42f4634ea6ac0e2b318942786.tar.bz2
chroma-bf46d6b9f42330b42f4634ea6ac0e2b318942786.zip
Implement new loader function that turns a string into a Geometry,
searching through files, named geometries in the cache, and geometry creation functions. The loader function also is responsible for fetching or creating a BVH to go with the geometry. This commit also removes some code that has been replaced by the new system. Other bits will come back in future commits.
-rwxr-xr-xbin/chroma-sim13
-rw-r--r--chroma/geometry.py215
-rw-r--r--chroma/gpu/geometry.py257
-rw-r--r--chroma/loader.py137
4 files changed, 159 insertions, 463 deletions
diff --git a/bin/chroma-sim b/bin/chroma-sim
index 854817a..ca1463c 100755
--- a/bin/chroma-sim
+++ b/bin/chroma-sim
@@ -19,6 +19,7 @@ def main():
from chroma.io import root
from chroma.rootimport import ROOT
from chroma.tools import enable_debug_on_crash
+ import chroma.loader
ROOT.gROOT.SetBatch()
@@ -78,17 +79,7 @@ def main():
if options.debug:
enable_debug_on_crash()
- module_name, function_name = args[0].rsplit('.', 1)
-
- try:
- module = __import__(module_name, fromlist=[function_name])
- except ImportError:
- raise
-
- detector = getattr(module, function_name)
-
- if inspect.isfunction(detector):
- detector = detector()
+ detector = chroma.loader.load_geometry_from_string(args[0])
pos = np.array([float(s) for s in options.pos.split(',')], dtype=float)
dir = np.array([float(s) for s in options.dir.split(',')], dtype=float)
diff --git a/chroma/geometry.py b/chroma/geometry.py
index 1d2175c..a7e85ee 100644
--- a/chroma/geometry.py
+++ b/chroma/geometry.py
@@ -7,7 +7,8 @@ import numpy as np
import time
from chroma.itertoolset import *
-from chroma.tools import timeit, profile_if_possible, filled_array
+from chroma.tools import timeit, profile_if_possible, filled_array, \
+ memoize_method_with_dictionary_arg
from chroma.log import logger
# all material/surface properties are interpolated at these
@@ -84,31 +85,6 @@ class Mesh(object):
checksum.update(self.triangles)
return checksum.hexdigest()
-def memoize_method_with_dictionary_arg(func):
- def lookup(*args):
- # based on function by Michele Simionato
- # http://www.phyast.pitt.edu/~micheles/python/
- # Modified to work for class method with dictionary argument
-
- assert len(args) == 2
- # create hashable arguments by replacing dictionaries with tuples of items
- dict_items = args[1].items()
- dict_items.sort()
- hashable_args = (args[0], tuple(dict_items))
- try:
- return func._memoize_dic[hashable_args]
- except AttributeError:
- # _memoize_dic doesn't exist yet.
-
- result = func(*args)
- func._memoize_dic = {hashable_args: result}
- return result
- except KeyError:
- result = func(*args)
- func._memoize_dic[hashable_args] = result
- return result
- return lookup
-
class Solid(object):
"""Solid object attaches materials, surfaces, and colors to each triangle
in a Mesh object."""
@@ -214,44 +190,6 @@ class Surface(object):
def __repr__(self):
return '<Surface %s>' % self.name
-def interleave(arr, bits):
- """
- Interleave the bits of quantized three-dimensional points in space.
-
- Example
- >>> interleave(np.identity(3, dtype=np.int))
- array([4, 2, 1], dtype=uint64)
- """
- if len(arr.shape) != 2 or arr.shape[1] != 3:
- raise Exception('shape mismatch')
-
- z = np.zeros(arr.shape[0], dtype=np.uint64)
- for i in range(bits):
- z |= (arr[:,2] & 1 << i) << (2*i) | \
- (arr[:,1] & 1 << i) << (2*i+1) | \
- (arr[:,0] & 1 << i) << (2*i+2)
- return z
-
-def morton_order(mesh, bits):
- """
- Return a list of zvalues for triangles in `mesh` by interleaving the
- bits of the quantized center coordinates of each triangle. Each coordinate
- axis is quantized into 2**bits bins.
- """
- lower_bound, upper_bound = mesh.get_bounds()
-
- if bits <= 0 or bits > 21:
- raise Exception('number of bits must be in the range (0,21].')
-
- max_value = 2**bits - 1
-
- def quantize(x):
- return np.uint64((x-lower_bound)*max_value/(upper_bound-lower_bound))
-
- mean_positions = quantize(np.mean(mesh.assemble(), axis=1))
-
- return interleave(mean_positions, bits)
-
class Geometry(object):
"Geometry object."
def __init__(self, detector_material=None):
@@ -259,6 +197,7 @@ class Geometry(object):
self.solids = []
self.solid_rotations = []
self.solid_displacements = []
+ self.bvh = None
def add_solid(self, solid, rotation=None, displacement=None):
"""
@@ -341,151 +280,3 @@ class Geometry(object):
self.surface_index[self.surface_index == surface_lookup[None]] = -1
except KeyError:
pass
-
-
-
- @profile_if_possible
- def build(self, bits=11, shift=3, use_cache=True):
- """
- Build the bounding volume hierarchy, material/surface code arrays, and
- color array for this geometry. If the bounding volume hierarchy is
- cached, load the cache instead of rebuilding, else build and cache it.
-
- Args:
- - bits: int, *optional*
- The number of bits to quantize each linear dimension with when
- morton ordering the triangle centers for building the bounding
- volume hierarchy. Defaults to 8.
- - shift: int, *optional*
- The number of bits to shift the zvalue of each node when
- building the next layer of the bounding volume hierarchy.
- Defaults to 3.
- - use_cache: bool, *optional*
- If true, the on-disk cache in ~/.chroma/ will be checked for
- a previously built version of this geometry, otherwise the
- BVH will be computed and saved to the cache. If false,
- the cache is ignored and also not updated.
- """
- self.flatten()
-
- checksum = md5(str(bits))
- checksum.update(str(shift))
- checksum.update(self.mesh.vertices)
- checksum.update(self.mesh.triangles)
-
- cache_dir = os.path.expanduser('~/.chroma')
- cache_file = checksum.hexdigest()+'.npz'
- cache_path = os.path.join(cache_dir, cache_file)
-
- if use_cache:
- try:
- npz_file = np.load(cache_path)
- except IOError:
- pass
- else:
- logger.info('Loading BVH from cache.')
- data = dict(npz_file)
-
- # take() is faster than fancy indexing by 5x!
- # tip from http://wesmckinney.com/blog/?p=215
- reorder = data.pop('reorder')
- self.mesh.triangles = self.mesh.triangles.take(reorder, axis=0)
- self.material1_index = self.material1_index.take(reorder, axis=0)
- self.material2_index = self.material2_index.take(reorder, axis=0)
- self.surface_index = self.surface_index.take(reorder, axis=0)
- self.colors = self.colors.take(reorder, axis=0)
- self.solid_id = self.solid_id.take(reorder, axis=0)
-
- for key, value in data.iteritems():
- setattr(self, key, value)
-
- logger.info(' nodes: %d' % len(self.upper_bounds))
- return
-
- logger.info('Constructing new BVH from mesh. This may take several minutes.')
-
- start_time = time.time()
-
- zvalues_mesh = morton_order(self.mesh, bits)
- reorder = np.argsort(zvalues_mesh)
- zvalues_mesh = zvalues_mesh[reorder]
-
- if (np.diff(zvalues_mesh) < 0).any():
- raise Exception('zvalues_mesh out of order.')
-
- self.mesh.triangles = self.mesh.triangles[reorder]
-
- self.material1_index = self.material1_index[reorder]
- self.material2_index = self.material2_index[reorder]
- self.surface_index = self.surface_index[reorder]
- self.colors = self.colors[reorder]
- self.solid_id = self.solid_id[reorder]
-
- unique_zvalues = np.unique(zvalues_mesh)
-
- while unique_zvalues.size > zvalues_mesh.size/np.e:
- zvalues_mesh = zvalues_mesh >> shift
- unique_zvalues = np.unique(zvalues_mesh)
-
- self.lower_bounds = np.empty((unique_zvalues.size,3), dtype=np.float32)
- self.upper_bounds = np.empty((unique_zvalues.size,3), dtype=np.float32)
-
- assembled_mesh = self.mesh.assemble(group=False)
- self.node_map = np.searchsorted(zvalues_mesh, unique_zvalues)
- self.node_map_end = np.searchsorted(zvalues_mesh, unique_zvalues, side='right')
-
- for i, (zi1, zi2) in enumerate(izip(self.node_map, self.node_map_end)):
- self.lower_bounds[i] = assembled_mesh[zi1*3:zi2*3].min(axis=0)
- self.upper_bounds[i] = assembled_mesh[zi1*3:zi2*3].max(axis=0)
-
- self.layers = np.zeros(unique_zvalues.size, dtype=np.uint32)
- self.first_node = unique_zvalues.size
-
- begin_last_layer = 0
-
- layer_offsets = [begin_last_layer]
-
- for layer in count(1):
- bit_shifted_zvalues = unique_zvalues >> shift
- unique_zvalues = np.unique(bit_shifted_zvalues)
-
- i0 = begin_last_layer + bit_shifted_zvalues.size
- layer_offsets.append(i0)
- self.node_map.resize(self.node_map.size+unique_zvalues.size)
- self.node_map[i0:] = np.searchsorted(bit_shifted_zvalues, unique_zvalues) + begin_last_layer
- self.node_map_end.resize(self.node_map_end.size+unique_zvalues.size)
- self.node_map_end[i0:] = np.searchsorted(bit_shifted_zvalues, unique_zvalues, side='right') + begin_last_layer
-
- self.layers.resize(self.layers.size+unique_zvalues.size)
- self.layers[i0:] = layer
-
- self.lower_bounds.resize((self.lower_bounds.shape[0]+unique_zvalues.size,3))
- self.upper_bounds.resize((self.upper_bounds.shape[0]+unique_zvalues.size,3))
-
- for i, zi1, zi2 in izip(count(i0), self.node_map[i0:], self.node_map_end[i0:]):
- self.lower_bounds[i] = self.lower_bounds[zi1:zi2].min(axis=0)
- self.upper_bounds[i] = self.upper_bounds[zi1:zi2].max(axis=0)
-
- begin_last_layer += bit_shifted_zvalues.size
-
- if unique_zvalues.size == 1:
- break
-
- self.layer_offsets = layer_offsets
- self.start_node = self.node_map.size - 1
-
- logger.info('BVH construction completed in %1.1f seconds.' % (time.time() - start_time))
- logger.info(' nodes: %d' % len(self.upper_bounds))
-
- if use_cache:
- logger.info('Writing BVH to ~/.chroma cache directory...')
- sys.stdout.flush()
-
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
-
- data = {}
- for key in ['lower_bounds', 'upper_bounds', 'node_map', 'node_map_end', 'layers', 'first_node', 'start_node']:
- data[key] = getattr(self, key)
- data['reorder'] = reorder
- np.savez_compressed(cache_path, **data)
diff --git a/chroma/gpu/geometry.py b/chroma/gpu/geometry.py
index 6cb991c..77d33b2 100644
--- a/chroma/gpu/geometry.py
+++ b/chroma/gpu/geometry.py
@@ -6,228 +6,12 @@ from pycuda import characterize
from chroma.geometry import standard_wavelengths
from chroma.gpu.tools import get_cu_module, get_cu_source, cuda_options, \
chunk_iterator, format_array, format_size, to_uint3, to_float3, \
- make_gpu_struct, GPUFuncs
+ make_gpu_struct, GPUFuncs, mapped_empty, Mapped
from chroma.log import logger
-def round_up_to_multiple(x, multiple):
- remainder = x % multiple
- if remainder == 0:
- return x
- else:
- return x + multiple - remainder
-
-def compute_layer_configuration(n, branch_degree):
- if n == 1:
- # Special case for root
- return [ (1, 1) ]
- else:
- layer_conf = [ (n, round_up_to_multiple(n, branch_degree)) ]
-
- while layer_conf[0][1] > 1:
- nparent = int(np.ceil( float(layer_conf[0][1]) / branch_degree ))
- if nparent == 1:
- layer_conf = [ (1, 1) ] + layer_conf
- else:
- layer_conf = [ (nparent, round_up_to_multiple(nparent, branch_degree)) ] + layer_conf
-
- return layer_conf
-
-def optimize_bvh_layer(layer, bvh_funcs):
- n = len(layer)
- areas = ga.empty(shape=n, dtype=np.uint32)
- union_areas = ga.empty(shape=n, dtype=np.uint32)
- nthreads_per_block = 128
- min_areas = ga.empty(shape=int(np.ceil(n/float(nthreads_per_block))), dtype=np.uint32)
- min_index = ga.empty_like(min_areas)
-
- update = 50000
-
- skip_size = 1
- flag = cuda.pagelocked_empty(shape=skip_size, dtype=np.uint32, mem_flags=cuda.host_alloc_flags.DEVICEMAP)
- flag_gpu = np.intp(flag.base.get_device_pointer())
- print 'starting optimization'
-
- i = 0
- skips = 0
- while i < (n/2 - 1):
- # How are we doing?
- if i % update == 0:
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(n-1, nthreads_per_block, max_blocks=10000):
-
- bvh_funcs.distance_to_prev(np.uint32(first_index + 1),
- np.uint32(elements_this_iter),
- layer,
- union_areas,
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter,1))
-
- union_areas_host = union_areas.get()[1::2]
- print 'Area of parent layer: %1.12e' % union_areas_host.astype(float).sum()
- print 'Area of parent layer so far (%d): %1.12e' % (i*2, union_areas_host.astype(float)[:i].\
-sum())
- print 'Skips:', skips
-
- test_index = i * 2
-
- blocks = 0
- look_forward = min(8192*400, n - test_index - 2)
- skip_this_round = min(skip_size, n - test_index - 1)
- flag[:] = 0
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(look_forward, nthreads_per_block, max_blocks=10000):
- bvh_funcs.min_distance_to(np.uint32(first_index + test_index + 2),
- np.uint32(elements_this_iter),
- np.uint32(test_index),
- layer,
- np.uint32(blocks),
- min_areas,
- min_index,
- flag_gpu,
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter, skip_this_round))
- blocks += nblocks_this_iter
- cuda.Context.get_current().synchronize()
-
- if flag[0] == 0:
- flag_nonzero = flag.nonzero()[0]
- if len(flag_nonzero) == 0:
- no_swap_required = skip_size
- else:
- no_swap_required = flag_nonzero[0]
- i += no_swap_required
- skips += no_swap_required
- continue
-
- areas_host = min_areas[:blocks].get()
- min_index_host = min_index[:blocks].get()
- best_block = areas_host.argmin()
- better_i = min_index_host[best_block]
-
- if i % update == 0:
- print 'swapping %d and %d' % (test_index + 1, better_i)
-
- bvh_funcs.swap(np.uint32(test_index+1), np.uint32(better_i),
- layer, block=(1,1,1), grid=(1,1))
- i += 1
-
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(n-1, nthreads_per_block, max_blocks=10000):
-
- bvh_funcs.distance_to_prev(np.uint32(first_index + 1),
- np.uint32(elements_this_iter),
- layer,
- union_areas,
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter,1))
-
- union_areas_host = union_areas.get()[1::2]
- print 'Final area of parent layer: %1.12e' % union_areas_host.sum()
- print 'Skips:', skips
-
-def make_bvh(vertices, gpu_vertices, ntriangles, gpu_triangles, branch_degree):
- assert branch_degree > 1
- bvh_module = get_cu_module('bvh.cu', options=cuda_options,
- include_source_directory=True)
- bvh_funcs = GPUFuncs(bvh_module)
-
- world_min = vertices.min(axis=0)
- # Full scale at 2**16 - 2 in order to ensure there is dynamic range to round
- # up by one count after quantization
- world_scale = np.max((vertices.max(axis=0) - world_min)) / (2**16 - 2)
-
- world_origin = ga.vec.make_float3(*world_min)
- world_scale = np.float32(world_scale)
-
- layer_conf = compute_layer_configuration(ntriangles, branch_degree)
- layer_offsets = list(np.cumsum([npad for n, npad in layer_conf]))
-
- # Last entry is number of nodes, trim off and add zero to get offset of each layer
- n_nodes = int(layer_offsets[-1])
- layer_offsets = [0] + layer_offsets[:-1]
-
- leaf_nodes = ga.empty(shape=ntriangles, dtype=ga.vec.uint4)
- morton_codes = ga.empty(shape=ntriangles, dtype=np.uint64)
-
- # Step 1: Make leaves
- nthreads_per_block=256
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(ntriangles, nthreads_per_block, max_blocks=10000):
- bvh_funcs.make_leaves(np.uint32(first_index),
- np.uint32(elements_this_iter),
- gpu_triangles, gpu_vertices,
- world_origin, world_scale,
- leaf_nodes, morton_codes,
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter,1))
-
- # argsort on the CPU because I'm too lazy to do it on the GPU
- argsort = morton_codes.get().argsort().astype(np.uint32)
- del morton_codes
- local_leaf_nodes = leaf_nodes.get()[argsort]
- del leaf_nodes
- #del remap_order
- #
- #remap_order = ga.to_gpu(argsort)
- #m = morton_codes.get()
- #m.sort()
- #print m
- #assert False
- # Step 2: sort leaf nodes into full node list
- #print cuda.mem_get_info(), leaf_nodes.nbytes
- nodes = ga.zeros(shape=n_nodes, dtype=ga.vec.uint4)
- areas = ga.zeros(shape=n_nodes, dtype=np.uint32)
- cuda.memcpy_htod(int(nodes.gpudata)+int(layer_offsets[-1]), local_leaf_nodes)
-
- #for first_index, elements_this_iter, nblocks_this_iter in \
- # chunk_iterator(ntriangles, nthreads_per_block, max_blocks=10000):
- # bvh_funcs.reorder_leaves(np.uint32(first_index),
- # np.uint32(elements_this_iter),
- # leaf_nodes, nodes[layer_offsets[-1]:], remap_order,
- # block=(nthreads_per_block,1,1),
- # grid=(nblocks_this_iter,1))
-
-
- # Step 3: Create parent layers in reverse order
- layer_parameters = zip(layer_offsets[:-1], layer_offsets[1:], layer_conf)
- layer_parameters.reverse()
-
- i = len(layer_parameters)
- for parent_offset, child_offset, (nparent, nparent_pad) in layer_parameters:
- #if i < 30:
- # optimize_bvh_layer(nodes[child_offset:child_offset+nparent*branch_degree],
- # bvh_funcs)
-
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(nparent * branch_degree, nthreads_per_block,
- max_blocks=10000):
- bvh_funcs.node_area(np.uint32(first_index+child_offset),
- np.uint32(elements_this_iter),
- nodes,
- areas,
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter,1))
-
- print 'area', i, nparent * branch_degree, '%e' % areas[child_offset:child_offset+nparent*branch_degree].get().astype(float).sum()
-
- for first_index, elements_this_iter, nblocks_this_iter in \
- chunk_iterator(nparent, nthreads_per_block, max_blocks=10000):
- bvh_funcs.build_layer(np.uint32(first_index),
- np.uint32(elements_this_iter),
- np.uint32(branch_degree),
- nodes,
- np.uint32(parent_offset),
- np.uint32(child_offset),
- block=(nthreads_per_block,1,1),
- grid=(nblocks_this_iter,1))
-
- i -= 1
-
- return world_origin, world_scale, nodes
-
class GPUGeometry(object):
- def __init__(self, geometry, wavelengths=None, print_usage=False, branch_degree=2):
+ def __init__(self, geometry, wavelengths=None, print_usage=False):
if wavelengths is None:
wavelengths = standard_wavelengths
@@ -321,26 +105,18 @@ class GPUGeometry(object):
self.surface_pointer_array = \
make_gpu_struct(8*len(self.surface_ptrs), self.surface_ptrs)
- self.pagelocked_vertices = cuda.pagelocked_empty(shape=len(geometry.mesh.vertices),
- dtype=ga.vec.float3,
- mem_flags=cuda.host_alloc_flags.DEVICEMAP | cuda.host_alloc_flags.WRITECOMBINED)
- self.pagelocked_triangles = cuda.pagelocked_empty(shape=len(geometry.mesh.triangles),
- dtype=ga.vec.uint3,
- mem_flags=cuda.host_alloc_flags.DEVICEMAP | cuda.host_alloc_flags.WRITECOMBINED)
- self.pagelocked_vertices[:] = to_float3(geometry.mesh.vertices)
- self.pagelocked_triangles[:] = to_uint3(geometry.mesh.triangles)
- self.vertices = np.intp(self.pagelocked_vertices.base.get_device_pointer())
- self.triangles = np.intp(self.pagelocked_triangles.base.get_device_pointer())
-
-
- self.branch_degree = branch_degree
- print 'bvh', cuda.mem_get_info()
- self.world_origin, self.world_scale, self.nodes = make_bvh(geometry.mesh.vertices,
- self.vertices,
- len(geometry.mesh.triangles),
- self.triangles,
- self.branch_degree)
- print 'bvh after', cuda.mem_get_info()
+ self.vertices = mapped_empty(shape=len(geometry.mesh.vertices),
+ dtype=ga.vec.float3,
+ write_combined=True)
+ self.triangles = mapped_empty(shape=len(geometry.mesh.triangles),
+ dtype=ga.vec.uint3,
+ write_combined=True)
+ self.vertices[:] = to_float3(geometry.mesh.vertices)
+ self.triangles[:] = to_uint3(geometry.mesh.triangles)
+
+ self.nodes = ga.to_gpu(geometry.bvh.nodes)
+ self.world_origin = ga.vec.make_float3(*geometry.bvh.world_coords.world_origin)
+ self.world_scale = np.float32(geometry.bvh.world_coords.world_scale)
material_codes = (((geometry.material1_index & 0xff) << 24) |
((geometry.material2_index & 0xff) << 16) |
@@ -351,14 +127,15 @@ class GPUGeometry(object):
self.solid_id_map = ga.to_gpu(geometry.solid_id.astype(np.uint32))
self.gpudata = make_gpu_struct(geometry_struct_size,
- [self.vertices, self.triangles,
+ [Mapped(self.vertices),
+ Mapped(self.triangles),
self.material_codes,
self.colors, self.nodes,
self.material_pointer_array,
self.surface_pointer_array,
self.world_origin,
self.world_scale,
- np.uint32(self.branch_degree)])
+ np.uint32(geometry.bvh.degree)])
self.geometry = geometry
diff --git a/chroma/loader.py b/chroma/loader.py
new file mode 100644
index 0000000..fc98728
--- /dev/null
+++ b/chroma/loader.py
@@ -0,0 +1,137 @@
+import sys
+import os
+import time
+
+from chroma.log import logger
+from chroma.cache import Cache
+from chroma.bvh import make_simple_bvh
+from chroma.geometry import Geometry, Solid
+from chroma.stl import mesh_from_stl
+from chroma.gpu import create_cuda_context
+
+def load_geometry_from_string(geometry_str,
+ auto_build_bvh=True, read_bvh_cache=True,
+ update_bvh_cache=True, cache_dir=None,
+ cuda_device=None):
+ '''Create or load a geometry and optionally load/build a BVH for it.
+
+ This is a convenience interface to the geometry and BVH construction code,
+ as well as the Chroma caching layer. Most applications should use
+ this function rather than manually building a Geometry and BVH.
+
+ The geometry string passed to this function has several forms:
+
+ "" (empty string) - Load the default geometry from the cache and
+ the default BVH for that geometry.
+
+ "filename.stl" or "filename.stl.bz2" - Create a geometry from a
+ 3D mesh on disk. This model will not be cached, but the
+ BVH can be, depending on whether update_bvh_cache is True.
+
+ "geometry_name" - Load a geometry from the cache with this name
+ and the default BVH for that geometry.
+
+ "geometry_name:bvh_name" - Load a geometry from the cache and
+ the requested BVH by name.
+
+ "@chroma.models.lionsolid" - Run this function inside a Python
+ module, found in the current $PYTHONPATH, to create the
+ geometry, and load the default BVH. For convenience, the
+ current directory is also added to the $PYTHONPATH.
+
+ "@chroma.models.lionsolid:bvh_name" - Run this function to
+ create the Geometry and load a BVH by name.
+
+ By default, the Chroma cache in the user's home directory is
+ consulted for both the geometry and the BVH. A different cache
+ directory can be selected by passing the path in via the
+ ``cache_dir`` parameter.
+
+ If ``read_bvh_cache`` is set to False, then the BVH cache will not
+ be inspected for BVH objects.
+
+ If the requested BVH (default, or named) does not exist for this
+ geometry (checked by MD5 hashing the geometry mesh) and
+ ``auto_build_bvh`` is true, then a BVH will be automatically
+ generated using the "simple" BVH algorithm. The simple algorithm
+ is very fast, but produces a poor quality BVH.
+
+ Any newly created BVH will be saved in the Chroma cache if the
+ ``update_cache_bvh`` parameter is True.
+
+ BVH construction requires a GPU, so the CUDA device number can be
+ specified with the ``cuda_device`` parameter.
+
+ Returns: a Geometry object (or subclass) with the ``bvh`` property
+ set if the options allow.
+ '''
+ # Find BVH id if given
+ bvh_name = 'default'
+ if ':' in geometry_str:
+ geometry_id, bvh_name = geometry_id.split(':')
+ else:
+ geometry_id = geometry_str
+
+ if cache_dir is None:
+ cache = Cache()
+ else:
+ cache = Cache(cache_dir)
+
+ # Where is the geometry coming from?
+ if os.path.exists(geometry_id) and \
+ geometry_id.lower().endswith(('.stl', '.bz2')):
+ # Load from file
+ mesh = mesh_from_stl(geometry_id)
+ geometry = Geometry()
+ geometry.add_solid(Solid(mesh, vacuum, vacuum, color=0x33ffffff))
+ geometry.flatten()
+
+ elif geometry_id.startswith('@'):
+ # Load from function
+ function_path = geometry_id[1:]
+
+ module_name, function_name = function_path.rsplit('.', 1)
+ orig_sys_path = list(sys.path)
+ try:
+ sys.path.append('.')
+ module = __import__(module_name, fromlist=[function_name])
+ sys.path = orig_sys_path
+ except ImportError:
+ sys.path = orig_sys_path
+ raise
+
+ function = getattr(module, function_name)
+ geometry = function()
+ geometry.flatten()
+
+ else:
+ # Load from cache
+ if geometry_id == '':
+ geometry = cache.load_default_geometry()
+ else:
+ geometry = cache.load_geometry(geometry_id)
+ # Cached geometries are flattened already
+
+ # Figure out the BVH situation
+ mesh_hash = geometry.mesh.md5()
+ bvh = None
+ if read_bvh_cache and cache.exist_bvh(mesh_hash, bvh_name):
+ logger.info('Loading BVH for geometry from cache.')
+ bvh = cache.load_bvh(mesh_hash, bvh_name)
+ elif auto_build_bvh:
+ logger.info('Building new BVH using simple algorithm.')
+
+ start = time.time()
+
+ context = create_cuda_context(cuda_device)
+ bvh = make_simple_bvh(geometry.mesh, degree=3)
+ context.pop()
+
+ logger.info('BVH generated in %1.1f seconds.' % (time.time() - start))
+
+ if update_bvh_cache:
+ logger.info('Saving BVH (%s:%s) to cache.' % (mesh_hash, bvh_name))
+ cache.save_bvh(bvh, mesh_hash, bvh_name)
+
+ geometry.bvh = bvh
+ return geometry