Skip to content

Commit

Permalink
Add option to use handles to RID
Browse files Browse the repository at this point in the history
Adds an option to compile an alternative implementation for RIDs, which allows checks for erroneous usage patterns as well as providing leak tests.
  • Loading branch information
lawnjelly committed Dec 6, 2021
1 parent 4c8cc2a commit 3d981b8
Show file tree
Hide file tree
Showing 82 changed files with 876 additions and 247 deletions.
17 changes: 17 additions & 0 deletions SConstruct
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,14 @@ opts.Add(BoolVariable("disable_advanced_gui", "Disable advanced GUI nodes and be
opts.Add(BoolVariable("no_editor_splash", "Don't use the custom splash screen for the editor", True))
opts.Add("system_certs_path", "Use this path as SSL certificates default for editor (for package maintainers)", "")
opts.Add(BoolVariable("use_precise_math_checks", "Math checks use very precise epsilon (debug option)", False))
opts.Add(
EnumVariable(
"rids",
"Server object management technique (debug option)",
"pointers",
("pointers", "handles", "tracked_handles"),
)
)

# Thirdparty libraries
opts.Add(BoolVariable("builtin_bullet", "Use the built-in Bullet library", True))
Expand Down Expand Up @@ -327,6 +335,15 @@ if env_base["no_editor_splash"]:
if not env_base["deprecated"]:
env_base.Append(CPPDEFINES=["DISABLE_DEPRECATED"])

if env_base["rids"] == "handles":
env_base.Append(CPPDEFINES=["RID_HANDLES_ENABLED"])
print("WARNING: Building with RIDs as handles.")

if env_base["rids"] == "tracked_handles":
env_base.Append(CPPDEFINES=["RID_HANDLES_ENABLED"])
env_base.Append(CPPDEFINES=["RID_HANDLE_ALLOCATION_TRACKING_ENABLED"])
print("WARNING: Building with RIDs as tracked handles.")

if selected_platform in platform_list:
tmppath = "./platform/" + selected_platform
sys.path.insert(0, tmppath)
Expand Down
8 changes: 4 additions & 4 deletions core/math/bvh_structs.inc
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,13 @@ struct TNode {

// instead of using linked list we maintain
// item references (for quick lookup)
PooledList<ItemRef, true> _refs;
PooledList<ItemExtra, true> _extra;
PooledList<ItemRef, uint32_t, true> _refs;
PooledList<ItemExtra, uint32_t, true> _extra;
PooledList<ItemPairs> _pairs;

// these 2 are not in sync .. nodes != leaves!
PooledList<TNode, true> _nodes;
PooledList<TLeaf, true> _leaves;
PooledList<TNode, uint32_t, true> _nodes;
PooledList<TLeaf, uint32_t, true> _leaves;

// we can maintain an un-ordered list of which references are active,
// in order to do a slow incremental optimize of the tree over each frame.
Expand Down
104 changes: 72 additions & 32 deletions core/pooled_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,93 +43,133 @@
// that does call constructors / destructors on request / free, this should probably be
// a separate template.

// The zero_on_first_request feature is optional and is useful for e.g. pools of handles,
// which may use a ref count which we want to be initialized to zero the first time a handle is created,
// but left alone on subsequent allocations (as will typically be incremented).

// Note that there is no function to compact the pool - this would
// invalidate any existing pool IDs held externally.
// Compaction can be done but would rely on a more complex method
// of preferentially giving out lower IDs in the freelist first.

#include "core/local_vector.h"

template <class T, bool force_trivial = false>
template <class T, class U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
class PooledList {
LocalVector<T, uint32_t, force_trivial> list;
LocalVector<uint32_t, uint32_t, true> freelist;
LocalVector<T, U, force_trivial> list;
LocalVector<U, U, true> freelist;

// not all list members are necessarily used
int _used_size;
U _used_size;

public:
PooledList() {
_used_size = 0;
}

int estimate_memory_use() const {
return (list.size() * sizeof(T)) + (freelist.size() * sizeof(uint32_t));
// Use with care, in most cases you should make sure to
// free all elements first (i.e. _used_size would be zero),
// although it could also be used without this as an optimization
// in some cases.
void clear() {
list.clear();
freelist.clear();
_used_size = 0;
}

uint64_t estimate_memory_use() const {
return ((uint64_t)list.size() * sizeof(T)) + ((uint64_t)freelist.size() * sizeof(U));
}

const T &operator[](uint32_t p_index) const {
const T &operator[](U p_index) const {
return list[p_index];
}
T &operator[](uint32_t p_index) {
T &operator[](U p_index) {
return list[p_index];
}

int size() const { return _used_size; }
// To be explicit in a pool there is a distinction
// between the number of elements that are currently
// in use, and the number of elements that have been reserved.
// Using size() would be vague.
U used_size() const { return _used_size; }
U reserved_size() const { return list.size(); }

T *request(uint32_t &r_id) {
T *request(U &r_id) {
_used_size++;

if (freelist.size()) {
// pop from freelist
int new_size = freelist.size() - 1;
r_id = freelist[new_size];
freelist.resize(new_size);

return &list[r_id];
}

r_id = list.size();
list.resize(r_id + 1);

static_assert((!zero_on_first_request) || (__is_pod(T)), "zero_on_first_request requires trivial type");
if (zero_on_first_request && __is_pod(T)) {
list[r_id] = {};
}

return &list[r_id];
}
void free(const uint32_t &p_id) {
void free(const U &p_id) {
// should not be on free list already
CRASH_COND(p_id >= list.size());
ERR_FAIL_UNSIGNED_INDEX(p_id, list.size());
freelist.push_back(p_id);
ERR_FAIL_COND_MSG(!_used_size, "_used_size has become out of sync, have you double freed an item?");
_used_size--;
}
};

// a pooled list which automatically keeps a list of the active members
template <class T, bool force_trivial = false>
template <class T, class U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
class TrackedPooledList {
public:
int pool_size() const { return _pool.size(); }
int active_size() const { return _active_list.size(); }
U pool_used_size() const { return _pool.used_size(); }
U pool_reserved_size() const { return _pool.reserved_size(); }
U active_size() const { return _active_list.size(); }

// use with care, see the earlier notes in the PooledList clear()
void clear() {
_pool.clear();
_active_list.clear();
_active_map.clear();
}

uint32_t get_active_id(uint32_t p_index) const {
U get_active_id(U p_index) const {
return _active_list[p_index];
}

const T &get_active(uint32_t p_index) const {
const T &get_active(U p_index) const {
return _pool[get_active_id(p_index)];
}

T &get_active(uint32_t p_index) {
T &get_active(U p_index) {
return _pool[get_active_id(p_index)];
}

const T &operator[](uint32_t p_index) const {
const T &operator[](U p_index) const {
return _pool[p_index];
}
T &operator[](uint32_t p_index) {
T &operator[](U p_index) {
return _pool[p_index];
}

T *request(uint32_t &r_id) {
T *request(U &r_id) {
T *item = _pool.request(r_id);

// add to the active list
uint32_t active_list_id = _active_list.size();
U active_list_id = _active_list.size();
_active_list.push_back(r_id);

// expand the active map (this should be in sync with the pool list
if (_pool.size() > (int)_active_map.size()) {
_active_map.resize(_pool.size());
if (_pool.used_size() > _active_map.size()) {
_active_map.resize(_pool.used_size());
}

// store in the active map
Expand All @@ -138,31 +178,31 @@ class TrackedPooledList {
return item;
}

void free(const uint32_t &p_id) {
void free(const U &p_id) {
_pool.free(p_id);

// remove from the active list.
uint32_t list_id = _active_map[p_id];
U list_id = _active_map[p_id];

// zero the _active map to detect bugs (only in debug?)
_active_map[p_id] = -1;

_active_list.remove_unordered(list_id);

// keep the replacement in sync with the correct list Id
if (list_id < (uint32_t)_active_list.size()) {
if (list_id < _active_list.size()) {
// which pool id has been replaced in the active list
uint32_t replacement_id = _active_list[list_id];
U replacement_id = _active_list[list_id];

// keep that replacements map up to date with the new position
_active_map[replacement_id] = list_id;
}
}

const LocalVector<uint32_t, uint32_t> &get_active_list() const { return _active_list; }
const LocalVector<U, U> &get_active_list() const { return _active_list; }

private:
PooledList<T, force_trivial> _pool;
LocalVector<uint32_t, uint32_t> _active_map;
LocalVector<uint32_t, uint32_t> _active_list;
PooledList<T, U, force_trivial, zero_on_first_request> _pool;
LocalVector<U, U> _active_map;
LocalVector<U, U> _active_list;
};
2 changes: 2 additions & 0 deletions core/rid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@

#include "rid.h"

#ifndef RID_HANDLES_ENABLED
RID_Data::~RID_Data() {
}

Expand All @@ -38,3 +39,4 @@ SafeRefCount RID_OwnerBase::refcount;
void RID_OwnerBase::init_rid() {
refcount.init();
}
#endif // not RID_HANDLES_ENABLED
5 changes: 5 additions & 0 deletions core/rid.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,13 @@

#include "core/list.h"
#include "core/os/memory.h"
#include "core/rid_handle.h"
#include "core/safe_refcount.h"
#include "core/set.h"
#include "core/typedefs.h"

#ifndef RID_HANDLES_ENABLED

class RID_OwnerBase;

class RID_Data {
Expand Down Expand Up @@ -187,4 +190,6 @@ class RID_Owner : public RID_OwnerBase {
}
};

#endif // not handles

#endif
Loading

0 comments on commit 3d981b8

Please sign in to comment.