| 1 | // Copyright 2009-2021 Intel Corporation |
| 2 | // SPDX-License-Identifier: Apache-2.0 |
| 3 | |
| 4 | #pragma once |
| 5 | |
| 6 | #include "../common/default.h" |
| 7 | #include "../common/alloc.h" |
| 8 | #include "../common/accel.h" |
| 9 | #include "../common/device.h" |
| 10 | #include "../common/scene.h" |
| 11 | #include "../geometry/primitive.h" |
| 12 | #include "../common/ray.h" |
| 13 | |
| 14 | namespace embree |
| 15 | { |
| 16 | /* BVH node reference with bounds */ |
| 17 | template<typename NodeRef> |
| 18 | struct BVHNodeRecord |
| 19 | { |
| 20 | __forceinline BVHNodeRecord() {} |
| 21 | __forceinline BVHNodeRecord(NodeRef ref, const BBox3fa& bounds) : ref(ref), bounds((BBox3fx)bounds) {} |
| 22 | __forceinline BVHNodeRecord(NodeRef ref, const BBox3fx& bounds) : ref(ref), bounds(bounds) {} |
| 23 | |
| 24 | NodeRef ref; |
| 25 | BBox3fx bounds; |
| 26 | }; |
| 27 | |
| 28 | template<typename NodeRef> |
| 29 | struct BVHNodeRecordMB |
| 30 | { |
| 31 | __forceinline BVHNodeRecordMB() {} |
| 32 | __forceinline BVHNodeRecordMB(NodeRef ref, const LBBox3fa& lbounds) : ref(ref), lbounds(lbounds) {} |
| 33 | |
| 34 | NodeRef ref; |
| 35 | LBBox3fa lbounds; |
| 36 | }; |
| 37 | |
| 38 | template<typename NodeRef> |
| 39 | struct BVHNodeRecordMB4D |
| 40 | { |
| 41 | __forceinline BVHNodeRecordMB4D() {} |
| 42 | __forceinline BVHNodeRecordMB4D(NodeRef ref, const LBBox3fa& lbounds, const BBox1f& dt) : ref(ref), lbounds(lbounds), dt(dt) {} |
| 43 | |
| 44 | NodeRef ref; |
| 45 | LBBox3fa lbounds; |
| 46 | BBox1f dt; |
| 47 | }; |
| 48 | |
| 49 | template<typename NodeRef, int N> struct BaseNode_t; |
| 50 | template<typename NodeRef, int N> struct AABBNode_t; |
| 51 | template<typename NodeRef, int N> struct AABBNodeMB_t; |
| 52 | template<typename NodeRef, int N> struct AABBNodeMB4D_t; |
| 53 | template<typename NodeRef, int N> struct OBBNode_t; |
| 54 | template<typename NodeRef, int N> struct OBBNodeMB_t; |
| 55 | template<typename NodeRef, int N> struct QuantizedNode_t; |
| 56 | template<typename NodeRef, int N> struct QuantizedNodeMB_t; |
| 57 | |
| 58 | /*! Pointer that points to a node or a list of primitives */ |
| 59 | template<int N> |
| 60 | struct NodeRefPtr |
| 61 | { |
| 62 | //template<int NN> friend class BVHN; |
| 63 | |
| 64 | /*! Number of bytes the nodes and primitives are minimally aligned to.*/ |
| 65 | static const size_t byteAlignment = 16; |
| 66 | static const size_t byteNodeAlignment = 4*N; |
| 67 | |
| 68 | /*! highest address bit is used as barrier for some algorithms */ |
| 69 | static const size_t barrier_mask = (1LL << (8*sizeof(size_t)-1)); |
| 70 | |
| 71 | /*! Masks the bits that store the number of items per leaf. */ |
| 72 | static const size_t align_mask = byteAlignment-1; |
| 73 | static const size_t items_mask = byteAlignment-1; |
| 74 | |
| 75 | /*! different supported node types */ |
| 76 | static const size_t tyAABBNode = 0; |
| 77 | static const size_t tyAABBNodeMB = 1; |
| 78 | static const size_t tyAABBNodeMB4D = 6; |
| 79 | static const size_t tyOBBNode = 2; |
| 80 | static const size_t tyOBBNodeMB = 3; |
| 81 | static const size_t tyQuantizedNode = 5; |
| 82 | static const size_t tyLeaf = 8; |
| 83 | |
| 84 | /*! Empty node */ |
| 85 | static const size_t emptyNode = tyLeaf; |
| 86 | |
| 87 | /*! Invalid node, used as marker in traversal */ |
| 88 | static const size_t invalidNode = (((size_t)-1) & (~items_mask)) | (tyLeaf+0); |
| 89 | static const size_t popRay = (((size_t)-1) & (~items_mask)) | (tyLeaf+1); |
| 90 | |
| 91 | /*! Maximum number of primitive blocks in a leaf. */ |
| 92 | static const size_t maxLeafBlocks = items_mask-tyLeaf; |
| 93 | |
| 94 | /*! Default constructor */ |
| 95 | __forceinline NodeRefPtr () {} |
| 96 | |
| 97 | /*! Construction from integer */ |
| 98 | __forceinline NodeRefPtr (size_t ptr) : ptr(ptr) {} |
| 99 | |
| 100 | /*! Cast to size_t */ |
| 101 | __forceinline operator size_t() const { return ptr; } |
| 102 | |
| 103 | /*! Sets the barrier bit. */ |
| 104 | __forceinline void setBarrier() { |
| 105 | #if defined(__64BIT__) |
| 106 | assert(!isBarrier()); |
| 107 | ptr |= barrier_mask; |
| 108 | #else |
| 109 | assert(false); |
| 110 | #endif |
| 111 | } |
| 112 | |
| 113 | /*! Clears the barrier bit. */ |
| 114 | __forceinline void clearBarrier() { |
| 115 | #if defined(__64BIT__) |
| 116 | ptr &= ~barrier_mask; |
| 117 | #else |
| 118 | assert(false); |
| 119 | #endif |
| 120 | } |
| 121 | |
| 122 | /*! Checks if this is an barrier. A barrier tells the top level tree rotations how deep to enter the tree. */ |
| 123 | __forceinline bool isBarrier() const { return (ptr & barrier_mask) != 0; } |
| 124 | |
| 125 | /*! checks if this is a leaf */ |
| 126 | __forceinline size_t isLeaf() const { return ptr & tyLeaf; } |
| 127 | |
| 128 | /*! returns node type */ |
| 129 | __forceinline int type() const { return ptr & (size_t)align_mask; } |
| 130 | |
| 131 | /*! checks if this is a node */ |
| 132 | __forceinline int isAABBNode() const { return (ptr & (size_t)align_mask) == tyAABBNode; } |
| 133 | |
| 134 | /*! checks if this is a motion blur node */ |
| 135 | __forceinline int isAABBNodeMB() const { return (ptr & (size_t)align_mask) == tyAABBNodeMB; } |
| 136 | |
| 137 | /*! checks if this is a 4D motion blur node */ |
| 138 | __forceinline int isAABBNodeMB4D() const { return (ptr & (size_t)align_mask) == tyAABBNodeMB4D; } |
| 139 | |
| 140 | /*! checks if this is a node with unaligned bounding boxes */ |
| 141 | __forceinline int isOBBNode() const { return (ptr & (size_t)align_mask) == tyOBBNode; } |
| 142 | |
| 143 | /*! checks if this is a motion blur node with unaligned bounding boxes */ |
| 144 | __forceinline int isOBBNodeMB() const { return (ptr & (size_t)align_mask) == tyOBBNodeMB; } |
| 145 | |
| 146 | /*! checks if this is a quantized node */ |
| 147 | __forceinline int isQuantizedNode() const { return (ptr & (size_t)align_mask) == tyQuantizedNode; } |
| 148 | |
| 149 | /*! Encodes a node */ |
| 150 | static __forceinline NodeRefPtr encodeNode(AABBNode_t<NodeRefPtr,N>* node) { |
| 151 | assert(!((size_t)node & align_mask)); |
| 152 | return NodeRefPtr((size_t) node); |
| 153 | } |
| 154 | |
| 155 | static __forceinline NodeRefPtr encodeNode(AABBNodeMB_t<NodeRefPtr,N>* node) { |
| 156 | assert(!((size_t)node & align_mask)); |
| 157 | return NodeRefPtr((size_t) node | tyAABBNodeMB); |
| 158 | } |
| 159 | |
| 160 | static __forceinline NodeRefPtr encodeNode(AABBNodeMB4D_t<NodeRefPtr,N>* node) { |
| 161 | assert(!((size_t)node & align_mask)); |
| 162 | return NodeRefPtr((size_t) node | tyAABBNodeMB4D); |
| 163 | } |
| 164 | |
| 165 | /*! Encodes an unaligned node */ |
| 166 | static __forceinline NodeRefPtr encodeNode(OBBNode_t<NodeRefPtr,N>* node) { |
| 167 | return NodeRefPtr((size_t) node | tyOBBNode); |
| 168 | } |
| 169 | |
| 170 | /*! Encodes an unaligned motion blur node */ |
| 171 | static __forceinline NodeRefPtr encodeNode(OBBNodeMB_t<NodeRefPtr,N>* node) { |
| 172 | return NodeRefPtr((size_t) node | tyOBBNodeMB); |
| 173 | } |
| 174 | |
| 175 | /*! Encodes a leaf */ |
| 176 | static __forceinline NodeRefPtr encodeLeaf(void* tri, size_t num) { |
| 177 | assert(!((size_t)tri & align_mask)); |
| 178 | assert(num <= maxLeafBlocks); |
| 179 | return NodeRefPtr((size_t)tri | (tyLeaf+min(num,(size_t)maxLeafBlocks))); |
| 180 | } |
| 181 | |
| 182 | /*! Encodes a leaf */ |
| 183 | static __forceinline NodeRefPtr encodeTypedLeaf(void* ptr, size_t ty) { |
| 184 | assert(!((size_t)ptr & align_mask)); |
| 185 | return NodeRefPtr((size_t)ptr | (tyLeaf+ty)); |
| 186 | } |
| 187 | |
| 188 | /*! returns base node pointer */ |
| 189 | __forceinline BaseNode_t<NodeRefPtr,N>* baseNode() |
| 190 | { |
| 191 | assert(!isLeaf()); |
| 192 | return (BaseNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); |
| 193 | } |
| 194 | __forceinline const BaseNode_t<NodeRefPtr,N>* baseNode() const |
| 195 | { |
| 196 | assert(!isLeaf()); |
| 197 | return (const BaseNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); |
| 198 | } |
| 199 | |
| 200 | /*! returns node pointer */ |
| 201 | __forceinline AABBNode_t<NodeRefPtr,N>* getAABBNode() { assert(isAABBNode()); return ( AABBNode_t<NodeRefPtr,N>*)ptr; } |
| 202 | __forceinline const AABBNode_t<NodeRefPtr,N>* getAABBNode() const { assert(isAABBNode()); return (const AABBNode_t<NodeRefPtr,N>*)ptr; } |
| 203 | |
| 204 | /*! returns motion blur node pointer */ |
| 205 | __forceinline AABBNodeMB_t<NodeRefPtr,N>* getAABBNodeMB() { assert(isAABBNodeMB() || isAABBNodeMB4D()); return ( AABBNodeMB_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 206 | __forceinline const AABBNodeMB_t<NodeRefPtr,N>* getAABBNodeMB() const { assert(isAABBNodeMB() || isAABBNodeMB4D()); return (const AABBNodeMB_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 207 | |
| 208 | /*! returns 4D motion blur node pointer */ |
| 209 | __forceinline AABBNodeMB4D_t<NodeRefPtr,N>* getAABBNodeMB4D() { assert(isAABBNodeMB4D()); return ( AABBNodeMB4D_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 210 | __forceinline const AABBNodeMB4D_t<NodeRefPtr,N>* getAABBNodeMB4D() const { assert(isAABBNodeMB4D()); return (const AABBNodeMB4D_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 211 | |
| 212 | /*! returns unaligned node pointer */ |
| 213 | __forceinline OBBNode_t<NodeRefPtr,N>* ungetAABBNode() { assert(isOBBNode()); return ( OBBNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 214 | __forceinline const OBBNode_t<NodeRefPtr,N>* ungetAABBNode() const { assert(isOBBNode()); return (const OBBNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 215 | |
| 216 | /*! returns unaligned motion blur node pointer */ |
| 217 | __forceinline OBBNodeMB_t<NodeRefPtr,N>* ungetAABBNodeMB() { assert(isOBBNodeMB()); return ( OBBNodeMB_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 218 | __forceinline const OBBNodeMB_t<NodeRefPtr,N>* ungetAABBNodeMB() const { assert(isOBBNodeMB()); return (const OBBNodeMB_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask); } |
| 219 | |
| 220 | /*! returns quantized node pointer */ |
| 221 | __forceinline QuantizedNode_t<NodeRefPtr,N>* quantizedNode() { assert(isQuantizedNode()); return ( QuantizedNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask ); } |
| 222 | __forceinline const QuantizedNode_t<NodeRefPtr,N>* quantizedNode() const { assert(isQuantizedNode()); return (const QuantizedNode_t<NodeRefPtr,N>*)(ptr & ~(size_t)align_mask ); } |
| 223 | |
| 224 | /*! returns leaf pointer */ |
| 225 | __forceinline char* leaf(size_t& num) const { |
| 226 | assert(isLeaf()); |
| 227 | num = (ptr & (size_t)items_mask)-tyLeaf; |
| 228 | return (char*)(ptr & ~(size_t)align_mask); |
| 229 | } |
| 230 | |
| 231 | /*! clear all bit flags */ |
| 232 | __forceinline void clearFlags() { |
| 233 | ptr &= ~(size_t)align_mask; |
| 234 | } |
| 235 | |
| 236 | /*! returns the wideness */ |
| 237 | __forceinline size_t getN() const { return N; } |
| 238 | |
| 239 | public: |
| 240 | size_t ptr; |
| 241 | }; |
| 242 | } |
| 243 | |