| 1 | public: |
| 2 | BVHHandle item_add(T *p_userdata, bool p_active, const BOUNDS &p_aabb, int32_t p_subindex, uint32_t p_tree_id, uint32_t p_tree_collision_mask, bool p_invisible = false) { |
| 3 | #ifdef BVH_VERBOSE_TREE |
| 4 | VERBOSE_PRINT("\nitem_add BEFORE" ); |
| 5 | _debug_recursive_print_tree(p_tree_id); |
| 6 | VERBOSE_PRINT("\n" ); |
| 7 | #endif |
| 8 | |
| 9 | BVHABB_CLASS abb; |
| 10 | abb.from(p_aabb); |
| 11 | |
| 12 | // NOTE that we do not expand the AABB for the first create even if |
| 13 | // leaf expansion is switched on. This is for two reasons: |
| 14 | // (1) We don't know if this object will move in future, in which case a non-expanded |
| 15 | // bound would be better... |
| 16 | // (2) We don't yet know how many objects will be paired, which is used to modify |
| 17 | // the expansion margin. |
| 18 | |
| 19 | // handle to be filled with the new item ref |
| 20 | BVHHandle handle; |
| 21 | |
| 22 | // ref id easier to pass around than handle |
| 23 | uint32_t ref_id; |
| 24 | |
| 25 | // this should never fail |
| 26 | ItemRef *ref = _refs.request(ref_id); |
| 27 | |
| 28 | // the extra data should be parallel list to the references |
| 29 | uint32_t ; |
| 30 | ItemExtra * = _extra.request(extra_id); |
| 31 | BVH_ASSERT(extra_id == ref_id); |
| 32 | |
| 33 | // pairs info |
| 34 | if (USE_PAIRS) { |
| 35 | uint32_t pairs_id; |
| 36 | ItemPairs *pairs = _pairs.request(pairs_id); |
| 37 | pairs->clear(); |
| 38 | BVH_ASSERT(pairs_id == ref_id); |
| 39 | } |
| 40 | |
| 41 | extra->subindex = p_subindex; |
| 42 | extra->userdata = p_userdata; |
| 43 | extra->last_updated_tick = 0; |
| 44 | |
| 45 | // add an active reference to the list for slow incremental optimize |
| 46 | // this list must be kept in sync with the references as they are added or removed. |
| 47 | extra->active_ref_id = _active_refs.size(); |
| 48 | _active_refs.push_back(ref_id); |
| 49 | |
| 50 | extra->tree_id = p_tree_id; |
| 51 | extra->tree_collision_mask = p_tree_collision_mask; |
| 52 | |
| 53 | // assign to handle to return |
| 54 | handle.set_id(ref_id); |
| 55 | |
| 56 | create_root_node(p_tree_id); |
| 57 | |
| 58 | // we must choose where to add to tree |
| 59 | if (p_active) { |
| 60 | ref->tnode_id = _logic_choose_item_add_node(_root_node_id[p_tree_id], abb); |
| 61 | |
| 62 | bool refit = _node_add_item(ref->tnode_id, ref_id, abb); |
| 63 | |
| 64 | if (refit) { |
| 65 | // only need to refit from the parent |
| 66 | const TNode &add_node = _nodes[ref->tnode_id]; |
| 67 | if (add_node.parent_id != BVHCommon::INVALID) { |
| 68 | refit_upward_and_balance(add_node.parent_id, p_tree_id); |
| 69 | } |
| 70 | } |
| 71 | } else { |
| 72 | ref->set_inactive(); |
| 73 | } |
| 74 | |
| 75 | #ifdef BVH_VERBOSE |
| 76 | // memory use |
| 77 | int mem = _refs.estimate_memory_use(); |
| 78 | mem += _nodes.estimate_memory_use(); |
| 79 | |
| 80 | String sz = _debug_aabb_to_string(abb); |
| 81 | VERBOSE_PRINT("\titem_add [" + itos(ref_id) + "] " + itos(_refs.used_size()) + " refs,\t" + itos(_nodes.used_size()) + " nodes " + sz); |
| 82 | VERBOSE_PRINT("mem use : " + itos(mem) + ", num nodes reserved : " + itos(_nodes.reserved_size())); |
| 83 | |
| 84 | #endif |
| 85 | |
| 86 | return handle; |
| 87 | } |
| 88 | |
| 89 | void _debug_print_refs() { |
| 90 | #ifdef BVH_VERBOSE_TREE |
| 91 | print_line("refs....." ); |
| 92 | for (int n = 0; n < _refs.size(); n++) { |
| 93 | const ItemRef &ref = _refs[n]; |
| 94 | print_line("tnode_id " + itos(ref.tnode_id) + ", item_id " + itos(ref.item_id)); |
| 95 | } |
| 96 | |
| 97 | #endif |
| 98 | } |
| 99 | |
| 100 | // returns false if noop |
| 101 | bool item_move(BVHHandle p_handle, const BOUNDS &p_aabb) { |
| 102 | uint32_t ref_id = p_handle.id(); |
| 103 | |
| 104 | // get the reference |
| 105 | ItemRef &ref = _refs[ref_id]; |
| 106 | if (!ref.is_active()) { |
| 107 | return false; |
| 108 | } |
| 109 | |
| 110 | BVHABB_CLASS abb; |
| 111 | abb.from(p_aabb); |
| 112 | |
| 113 | #ifdef BVH_EXPAND_LEAF_AABBS |
| 114 | if (USE_PAIRS) { |
| 115 | // scale the pairing expansion by the number of pairs. |
| 116 | abb.expand(_pairs[ref_id].scale_expansion_margin(_pairing_expansion)); |
| 117 | } else { |
| 118 | abb.expand(_pairing_expansion); |
| 119 | } |
| 120 | #endif |
| 121 | |
| 122 | BVH_ASSERT(ref.tnode_id != BVHCommon::INVALID); |
| 123 | TNode &tnode = _nodes[ref.tnode_id]; |
| 124 | |
| 125 | // does it fit within the current leaf aabb? |
| 126 | if (tnode.aabb.is_other_within(abb)) { |
| 127 | // do nothing .. fast path .. not moved enough to need refit |
| 128 | |
| 129 | // however we WILL update the exact aabb in the leaf, as this will be needed |
| 130 | // for accurate collision detection |
| 131 | TLeaf &leaf = _node_get_leaf(tnode); |
| 132 | |
| 133 | BVHABB_CLASS &leaf_abb = leaf.get_aabb(ref.item_id); |
| 134 | |
| 135 | // no change? |
| 136 | #ifdef BVH_EXPAND_LEAF_AABBS |
| 137 | BOUNDS leaf_aabb; |
| 138 | leaf_abb.to(leaf_aabb); |
| 139 | |
| 140 | // This test should pass in a lot of cases, and by returning false we can avoid |
| 141 | // collision pairing checks later, which greatly reduces processing. |
| 142 | if (expanded_aabb_encloses_not_shrink(leaf_aabb, p_aabb)) { |
| 143 | return false; |
| 144 | } |
| 145 | #else |
| 146 | if (leaf_abb == abb) { |
| 147 | return false; |
| 148 | } |
| 149 | #endif |
| 150 | |
| 151 | #ifdef BVH_VERBOSE_MOVES |
| 152 | print_line("item_move " + itos(p_handle.id()) + "(within tnode aabb) : " + _debug_aabb_to_string(abb)); |
| 153 | #endif |
| 154 | |
| 155 | leaf_abb = abb; |
| 156 | _integrity_check_all(); |
| 157 | |
| 158 | return true; |
| 159 | } |
| 160 | |
| 161 | #ifdef BVH_VERBOSE_MOVES |
| 162 | print_line("item_move " + itos(p_handle.id()) + "(outside tnode aabb) : " + _debug_aabb_to_string(abb)); |
| 163 | #endif |
| 164 | |
| 165 | uint32_t tree_id = _handle_get_tree_id(p_handle); |
| 166 | |
| 167 | // remove and reinsert |
| 168 | node_remove_item(ref_id, tree_id); |
| 169 | |
| 170 | // we must choose where to add to tree |
| 171 | ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb); |
| 172 | |
| 173 | // add to the tree |
| 174 | bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb); |
| 175 | |
| 176 | // only need to refit from the PARENT |
| 177 | if (needs_refit) { |
| 178 | // only need to refit from the parent |
| 179 | const TNode &add_node = _nodes[ref.tnode_id]; |
| 180 | if (add_node.parent_id != BVHCommon::INVALID) { |
| 181 | // not sure we need to rebalance all the time, this can be done less often |
| 182 | refit_upward(add_node.parent_id); |
| 183 | } |
| 184 | //refit_upward_and_balance(add_node.parent_id); |
| 185 | } |
| 186 | |
| 187 | return true; |
| 188 | } |
| 189 | |
| 190 | void item_remove(BVHHandle p_handle) { |
| 191 | uint32_t ref_id = p_handle.id(); |
| 192 | |
| 193 | uint32_t tree_id = _handle_get_tree_id(p_handle); |
| 194 | |
| 195 | VERBOSE_PRINT("item_remove [" + itos(ref_id) + "] " ); |
| 196 | |
| 197 | //////////////////////////////////////// |
| 198 | // remove the active reference from the list for slow incremental optimize |
| 199 | // this list must be kept in sync with the references as they are added or removed. |
| 200 | uint32_t active_ref_id = _extra[ref_id].active_ref_id; |
| 201 | uint32_t ref_id_moved_back = _active_refs[_active_refs.size() - 1]; |
| 202 | |
| 203 | // swap back and decrement for fast unordered remove |
| 204 | _active_refs[active_ref_id] = ref_id_moved_back; |
| 205 | _active_refs.resize(_active_refs.size() - 1); |
| 206 | |
| 207 | // keep the moved active reference up to date |
| 208 | _extra[ref_id_moved_back].active_ref_id = active_ref_id; |
| 209 | //////////////////////////////////////// |
| 210 | |
| 211 | // remove the item from the node (only if active) |
| 212 | if (_refs[ref_id].is_active()) { |
| 213 | node_remove_item(ref_id, tree_id); |
| 214 | } |
| 215 | |
| 216 | // remove the item reference |
| 217 | _refs.free(ref_id); |
| 218 | _extra.free(ref_id); |
| 219 | if (USE_PAIRS) { |
| 220 | _pairs.free(ref_id); |
| 221 | } |
| 222 | |
| 223 | // don't think refit_all is necessary? |
| 224 | //refit_all(_tree_id); |
| 225 | |
| 226 | #ifdef BVH_VERBOSE_TREE |
| 227 | _debug_recursive_print_tree(tree_id); |
| 228 | #endif |
| 229 | } |
| 230 | |
| 231 | // returns success |
| 232 | bool item_activate(BVHHandle p_handle, const BOUNDS &p_aabb) { |
| 233 | uint32_t ref_id = p_handle.id(); |
| 234 | ItemRef &ref = _refs[ref_id]; |
| 235 | if (ref.is_active()) { |
| 236 | // noop |
| 237 | return false; |
| 238 | } |
| 239 | |
| 240 | // add to tree |
| 241 | BVHABB_CLASS abb; |
| 242 | abb.from(p_aabb); |
| 243 | |
| 244 | uint32_t tree_id = _handle_get_tree_id(p_handle); |
| 245 | |
| 246 | // we must choose where to add to tree |
| 247 | ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb); |
| 248 | _node_add_item(ref.tnode_id, ref_id, abb); |
| 249 | |
| 250 | refit_upward_and_balance(ref.tnode_id, tree_id); |
| 251 | |
| 252 | return true; |
| 253 | } |
| 254 | |
| 255 | // returns success |
| 256 | bool item_deactivate(BVHHandle p_handle) { |
| 257 | uint32_t ref_id = p_handle.id(); |
| 258 | ItemRef &ref = _refs[ref_id]; |
| 259 | if (!ref.is_active()) { |
| 260 | // noop |
| 261 | return false; |
| 262 | } |
| 263 | |
| 264 | uint32_t tree_id = _handle_get_tree_id(p_handle); |
| 265 | |
| 266 | // remove from tree |
| 267 | BVHABB_CLASS abb; |
| 268 | node_remove_item(ref_id, tree_id, &abb); |
| 269 | |
| 270 | // mark as inactive |
| 271 | ref.set_inactive(); |
| 272 | return true; |
| 273 | } |
| 274 | |
| 275 | bool item_get_active(BVHHandle p_handle) const { |
| 276 | uint32_t ref_id = p_handle.id(); |
| 277 | const ItemRef &ref = _refs[ref_id]; |
| 278 | return ref.is_active(); |
| 279 | } |
| 280 | |
| 281 | // during collision testing, we want to set the mask and whether pairable for the item testing from |
| 282 | void item_fill_cullparams(BVHHandle p_handle, CullParams &r_params) const { |
| 283 | uint32_t ref_id = p_handle.id(); |
| 284 | const ItemExtra & = _extra[ref_id]; |
| 285 | |
| 286 | // which trees does this item want to collide detect against? |
| 287 | r_params.tree_collision_mask = extra.tree_collision_mask; |
| 288 | |
| 289 | // The testing user defined object is passed to the user defined cull check function |
| 290 | // for masks etc. This is usually a dummy object of type T with masks set. |
| 291 | // However, if not using the cull_check callback (i.e. returning true), you can pass |
| 292 | // a nullptr instead of dummy object, as it will not be used. |
| 293 | r_params.tester = extra.userdata; |
| 294 | } |
| 295 | |
| 296 | bool item_is_pairable(const BVHHandle &p_handle) { |
| 297 | uint32_t ref_id = p_handle.id(); |
| 298 | const ItemExtra & = _extra[ref_id]; |
| 299 | return extra.pairable != 0; |
| 300 | } |
| 301 | |
| 302 | void item_get_ABB(const BVHHandle &p_handle, BVHABB_CLASS &r_abb) { |
| 303 | // change tree? |
| 304 | uint32_t ref_id = p_handle.id(); |
| 305 | const ItemRef &ref = _refs[ref_id]; |
| 306 | |
| 307 | TNode &tnode = _nodes[ref.tnode_id]; |
| 308 | TLeaf &leaf = _node_get_leaf(tnode); |
| 309 | |
| 310 | r_abb = leaf.get_aabb(ref.item_id); |
| 311 | } |
| 312 | |
| 313 | bool item_set_tree(const BVHHandle &p_handle, uint32_t p_tree_id, uint32_t p_tree_collision_mask) { |
| 314 | // change tree? |
| 315 | uint32_t ref_id = p_handle.id(); |
| 316 | |
| 317 | ItemExtra &ex = _extra[ref_id]; |
| 318 | ItemRef &ref = _refs[ref_id]; |
| 319 | |
| 320 | bool active = ref.is_active(); |
| 321 | bool tree_changed = ex.tree_id != p_tree_id; |
| 322 | bool mask_changed = ex.tree_collision_mask != p_tree_collision_mask; |
| 323 | bool state_changed = tree_changed | mask_changed; |
| 324 | |
| 325 | // Keep an eye on this for bugs of not noticing changes to objects, |
| 326 | // especially when changing client user masks that will not be detected as a change |
| 327 | // in the BVH. You may need to force a collision check in this case with recheck_pairs(). |
| 328 | |
| 329 | if (active && (tree_changed | mask_changed)) { |
| 330 | // record abb |
| 331 | TNode &tnode = _nodes[ref.tnode_id]; |
| 332 | TLeaf &leaf = _node_get_leaf(tnode); |
| 333 | BVHABB_CLASS abb = leaf.get_aabb(ref.item_id); |
| 334 | |
| 335 | // make sure current tree is correct prior to changing |
| 336 | uint32_t tree_id = _handle_get_tree_id(p_handle); |
| 337 | |
| 338 | // remove from old tree |
| 339 | node_remove_item(ref_id, tree_id); |
| 340 | |
| 341 | // we must set the pairable AFTER getting the current tree |
| 342 | // because the pairable status determines which tree |
| 343 | ex.tree_id = p_tree_id; |
| 344 | ex.tree_collision_mask = p_tree_collision_mask; |
| 345 | |
| 346 | // add to new tree |
| 347 | tree_id = _handle_get_tree_id(p_handle); |
| 348 | create_root_node(tree_id); |
| 349 | |
| 350 | // we must choose where to add to tree |
| 351 | ref.tnode_id = _logic_choose_item_add_node(_root_node_id[tree_id], abb); |
| 352 | bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb); |
| 353 | |
| 354 | // only need to refit from the PARENT |
| 355 | if (needs_refit) { |
| 356 | // only need to refit from the parent |
| 357 | const TNode &add_node = _nodes[ref.tnode_id]; |
| 358 | if (add_node.parent_id != BVHCommon::INVALID) { |
| 359 | refit_upward_and_balance(add_node.parent_id, tree_id); |
| 360 | } |
| 361 | } |
| 362 | } else { |
| 363 | // always keep this up to date |
| 364 | ex.tree_id = p_tree_id; |
| 365 | ex.tree_collision_mask = p_tree_collision_mask; |
| 366 | } |
| 367 | |
| 368 | return state_changed; |
| 369 | } |
| 370 | |
| 371 | void incremental_optimize() { |
| 372 | // first update all aabbs as one off step.. |
| 373 | // this is cheaper than doing it on each move as each leaf may get touched multiple times |
| 374 | // in a frame. |
| 375 | for (int n = 0; n < NUM_TREES; n++) { |
| 376 | if (_root_node_id[n] != BVHCommon::INVALID) { |
| 377 | refit_branch(_root_node_id[n]); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | // now do small section reinserting to get things moving |
| 382 | // gradually, and keep items in the right leaf |
| 383 | if (_current_active_ref >= _active_refs.size()) { |
| 384 | _current_active_ref = 0; |
| 385 | } |
| 386 | |
| 387 | // special case |
| 388 | if (!_active_refs.size()) { |
| 389 | return; |
| 390 | } |
| 391 | |
| 392 | uint32_t ref_id = _active_refs[_current_active_ref++]; |
| 393 | |
| 394 | _logic_item_remove_and_reinsert(ref_id); |
| 395 | |
| 396 | #ifdef BVH_VERBOSE |
| 397 | /* |
| 398 | // memory use |
| 399 | int mem_refs = _refs.estimate_memory_use(); |
| 400 | int mem_nodes = _nodes.estimate_memory_use(); |
| 401 | int mem_leaves = _leaves.estimate_memory_use(); |
| 402 | |
| 403 | String sz; |
| 404 | sz += "mem_refs : " + itos(mem_refs) + " "; |
| 405 | sz += "mem_nodes : " + itos(mem_nodes) + " "; |
| 406 | sz += "mem_leaves : " + itos(mem_leaves) + " "; |
| 407 | sz += ", num nodes : " + itos(_nodes.size()); |
| 408 | print_line(sz); |
| 409 | */ |
| 410 | #endif |
| 411 | } |
| 412 | |
| 413 | void update() { |
| 414 | incremental_optimize(); |
| 415 | |
| 416 | // keep the expansion values up to date with the world bound |
| 417 | //#define BVH_ALLOW_AUTO_EXPANSION |
| 418 | #ifdef BVH_ALLOW_AUTO_EXPANSION |
| 419 | if (_auto_node_expansion || _auto_pairing_expansion) { |
| 420 | BVHABB_CLASS world_bound; |
| 421 | world_bound.set_to_max_opposite_extents(); |
| 422 | |
| 423 | bool bound_valid = false; |
| 424 | |
| 425 | for (int n = 0; n < NUM_TREES; n++) { |
| 426 | uint32_t node_id = _root_node_id[n]; |
| 427 | if (node_id != BVHCommon::INVALID) { |
| 428 | world_bound.merge(_nodes[node_id].aabb); |
| 429 | bound_valid = true; |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | // if there are no nodes, do nothing, but if there are... |
| 434 | if (bound_valid) { |
| 435 | BOUNDS bb; |
| 436 | world_bound.to(bb); |
| 437 | real_t size = bb.get_longest_axis_size(); |
| 438 | |
| 439 | // automatic AI decision for best parameters. |
| 440 | // These can be overridden in project settings. |
| 441 | |
| 442 | // these magic numbers are determined by experiment |
| 443 | if (_auto_node_expansion) { |
| 444 | _node_expansion = size * 0.025; |
| 445 | } |
| 446 | if (_auto_pairing_expansion) { |
| 447 | _pairing_expansion = size * 0.009; |
| 448 | } |
| 449 | } |
| 450 | } |
| 451 | #endif |
| 452 | } |
| 453 | |
| 454 | void params_set_pairing_expansion(real_t p_value) { |
| 455 | if (p_value < 0.0) { |
| 456 | #ifdef BVH_ALLOW_AUTO_EXPANSION |
| 457 | _auto_pairing_expansion = true; |
| 458 | #endif |
| 459 | return; |
| 460 | } |
| 461 | #ifdef BVH_ALLOW_AUTO_EXPANSION |
| 462 | _auto_pairing_expansion = false; |
| 463 | #endif |
| 464 | |
| 465 | _pairing_expansion = p_value; |
| 466 | |
| 467 | // calculate shrinking threshold |
| 468 | const real_t fudge_factor = 1.1; |
| 469 | _aabb_shrinkage_threshold = _pairing_expansion * POINT::AXIS_COUNT * 2.0 * fudge_factor; |
| 470 | } |
| 471 | |
| 472 | // This routine is not just an enclose check, it also checks for special case of shrinkage |
| 473 | bool expanded_aabb_encloses_not_shrink(const BOUNDS &p_expanded_aabb, const BOUNDS &p_aabb) const { |
| 474 | if (!p_expanded_aabb.encloses(p_aabb)) { |
| 475 | return false; |
| 476 | } |
| 477 | |
| 478 | // Check for special case of shrinkage. If the aabb has shrunk |
| 479 | // significantly we want to create a new expanded bound, because |
| 480 | // the previous expanded bound will have diverged significantly. |
| 481 | const POINT &exp_size = p_expanded_aabb.size; |
| 482 | const POINT &new_size = p_aabb.size; |
| 483 | |
| 484 | real_t exp_l = 0.0; |
| 485 | real_t new_l = 0.0; |
| 486 | |
| 487 | for (int i = 0; i < POINT::AXIS_COUNT; ++i) { |
| 488 | exp_l += exp_size[i]; |
| 489 | new_l += new_size[i]; |
| 490 | } |
| 491 | |
| 492 | // is difference above some metric |
| 493 | real_t diff = exp_l - new_l; |
| 494 | if (diff < _aabb_shrinkage_threshold) { |
| 495 | return true; |
| 496 | } |
| 497 | |
| 498 | return false; |
| 499 | } |
| 500 | |