Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[c++] Add static initialization guards for Cortex-M #346

Merged
merged 5 commits into from
Mar 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions examples/stm32f469_discovery/threadsafe_statics/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* Copyright (c) 2020, Niklas Hauser
*
* This file is part of the modm project.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
// ----------------------------------------------------------------------------

#include <modm/board.hpp>

void constructDummy();
class Dummy
{
public:
Dummy()
{
MODM_LOG_INFO << "Dummy class constructed" << modm::endl;
constructDummy(); // recursive initialization
}
};

void
constructDummy()
{
static Dummy dummy;
MODM_LOG_INFO << "constructDummy() called" << modm::endl;
}

int
main()
{
Board::initialize();
uint32_t counter(0);

while (true)
{
Board::LedGreen::toggle();
modm::delayMilliseconds(Board::Button::read() ? 125 : 500);

MODM_LOG_INFO << "loop: " << counter++ << modm::endl;
constructDummy();
}

return 0;
}
10 changes: 10 additions & 0 deletions examples/stm32f469_discovery/threadsafe_statics/project.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
<library>
<extends>modm:disco-f469ni</extends>
<options>
<option name="modm:build:build.path">../../../build/stm32f469_discovery/threadsafe_statics</option>
</options>
<modules>
<module>modm:platform:gpio</module>
<module>modm:build:scons</module>
</modules>
</library>
2 changes: 1 addition & 1 deletion ext/dlr/scons-build-tools
2 changes: 1 addition & 1 deletion ext/gcc/assert.cpp.in
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include <bits/functexcept.h>
#include <modm/math/utils/bit_constants.hpp>

%% if options["use_modm_assert"]
%% if options["assert_on_exception"]
#include <modm/architecture/interface/assert.hpp>

#define __modm_stdcpp_failure(failure) modm_assert(false, "stdc++", "stdc++", failure);__builtin_abort();
Expand Down
150 changes: 150 additions & 0 deletions ext/gcc/atomics_c11_cortex.cpp.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* Copyright (c) 2020, Niklas Hauser
*
* This file is part of the modm project.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
// ----------------------------------------------------------------------------


#include <cstring>
#include <modm/platform/core/atomic_lock.hpp>

/* Cortex-M0 does not have hardware support for true atomics, like STREX/LDREX.
* The toolchain does not implement the intrinsics, instead linking to them, so
* that an external library can implement them as they wish.
* Here we wrap all operations into an atomic lock, which globally disables
* interrupts. This isn't high performance, but we have no other choice here.
*
* See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
*/

using _a8 = uint8_t;
using _a16 = uint16_t;
using _a32 = unsigned int;
using _a64 = uint64_t;

// =========================== atomics for >64 bits ===========================
%% macro atomic_load(len)
extern "C" _a{{len}}
__atomic_load_{{len//8}}(const volatile void *ptr, int /*memorder*/)
{
modm::atomic::Lock _;
return *reinterpret_cast<const volatile _a{{len}}*>(ptr);
}
%% endmacro

extern "C" void
__atomic_load_c(size_t size, const void *src, void *dest, int /*memorder*/)
{
modm::atomic::Lock _;
std::memcpy(dest, src, size);
}


%% macro atomic_store(len)
extern "C" void
__atomic_store_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/)
{
modm::atomic::Lock _;
*reinterpret_cast<volatile _a{{len}}*>(ptr) = value;
}
%% endmacro

extern "C" void
__atomic_store_c(size_t size, void *dest, const void *src, int /*memorder*/)
{
modm::atomic::Lock _;
std::memcpy(dest, src, size);
}


%% macro atomic_exchange(len)
extern "C" _a{{len}}
__atomic_exchange_{{len//8}}(volatile void *ptr, _a{{len}} desired, int /*memorder*/)
{
modm::atomic::Lock _;
const _a{{len}} previous = *reinterpret_cast<const volatile _a{{len}}*>(ptr);
*reinterpret_cast<volatile _a{{len}}*>(ptr) = desired;
return previous;
}
%% endmacro

extern "C" void
__atomic_exchange_c(size_t size, void *ptr, void *val, void *ret, int /*memorder*/)
{
modm::atomic::Lock _;
std::memcpy(ret, ptr, size);
std::memcpy(ptr, val, size);
}


%% macro atomic_compare_exchange(len)
extern "C" bool
__atomic_compare_exchange_{{len//8}}(volatile void *ptr, void *expected, _a{{len}} desired,
bool /*weak*/, int /*success_memorder*/, int /*failure_memorder*/)
{
modm::atomic::Lock _;
const _a{{len}} current = *reinterpret_cast<const volatile _a{{len}}*>(ptr);
if (current != *reinterpret_cast<_a{{len}}*>(expected))
{
*reinterpret_cast<_a{{len}}*>(expected) = current;
return false;
}
*reinterpret_cast<volatile _a{{len}}*>(ptr) = desired;
return true;
}
%% endmacro

extern "C" bool
__atomic_compare_exchange_c(size_t len, void *ptr, void *expected, void *desired,
bool /*weak*/, int /*success_memorder*/, int /*failure_memorder*/)
{
modm::atomic::Lock _;
if (std::memcmp(ptr, expected, len) == 0)
{
std::memcpy(ptr, desired, len);
return true;
}
std::memcpy(expected, ptr, len);
return false;
}


%% macro atomic_fetch(len)
%% for name, op in [("add", "+"), ("sub", "-"), ("and", "&"), ("or", "|"), ("xor", "^"), ("nand", "&")]
%% set prefix = "~" if name == "nand" else ""
extern "C" _a{{len}}
__atomic_fetch_{{name}}_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/)
{
modm::atomic::Lock _;
const _a{{len}} previous = *reinterpret_cast<const volatile _a{{len}}*>(ptr);
*reinterpret_cast<volatile _a{{len}}*>(ptr) = {{prefix}}(previous {{op}} value);
return previous;
}
extern "C" _a{{len}}
__atomic_{{name}}_fetch_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/)
{
modm::atomic::Lock _;
const _a{{len}} current = {{prefix}}(*reinterpret_cast<const volatile _a{{len}}*>(ptr) {{op}} value);
*reinterpret_cast<volatile _a{{len}}*>(ptr) = current;
return current;
}
%% endfor
%% endmacro

%% for length in [8, 16, 32, 64]
// ============================ atomics for {{length}} bits ============================
{{ atomic_load(length) }}

{{ atomic_store(length) }}

{{ atomic_exchange(length) }}

{{ atomic_compare_exchange(length) }}

{{ atomic_fetch(length) }}
%% endfor
7 changes: 2 additions & 5 deletions ext/gcc/libcabi_cortex.cpp → ext/gcc/cabi_cortex.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,13 @@
*/
// ----------------------------------------------------------------------------

#include <modm/architecture/interface/assert.hpp>
#include <modm/architecture/interface/assert.h>

// ------------------------------------------------------------------------
extern "C"
{

extern void _exit(int);
void _exit(int status)
{
modm_assert(false, "libc", "libc", "exit", status);
}

__builtin_trap();
}
59 changes: 46 additions & 13 deletions ext/gcc/cxxabi.cpp.in
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,13 @@
extern "C"
{

/**
* \brief Pure-virtual workaround.
*
* The libc does not support a default implementation for handling
* possible pure-virtual calls. This is a short and empty workaround for this.
*/
void
__cxa_pure_virtual()
{
modm_assert_debug(0, "cxa", "virtual", "pure");
}
void __cxa_pure_virtual()
{ modm_assert_debug(0, "cxa", "virtual", "pure"); __builtin_trap(); }
void __cxa_deleted_virtual()
{ modm_assert_debug(0, "cxa", "virtual", "deleted"); __builtin_trap(); }

%% if target.platform in ["avr"]
void* __dso_handle = (void*) &__dso_handle;
%% if core.startswith("avr")
int __cxa_atexit(void (*)(void *), void *, void *)
%% else
// ARM EABI specifies __aeabi_atexit instead of __cxa_atexit
Expand All @@ -38,7 +32,46 @@ int __aeabi_atexit(void (*)(void *), void *, void *)
{
return 0;
}
}

void* __dso_handle = (void*) &__dso_handle;
%% if with_threadsafe_statics
#include <atomic>
/* One-time construction API, see ARM IHI0041D section 3.2.3.
* The ARM C++ ABI mandates the guard to be 32-bit aligned, 32-bit values.
*/
enum
{
UNINITIALIZED = 0,
INITIALIZED = 1,
INITIALIZING = 0x100,
};

// This function is only called when `(guard & 1) != 1`!
extern "C" int __cxa_guard_acquire(int *guard)
{
std::atomic_int *atomic_guard = reinterpret_cast<std::atomic_int *>(guard);
if (atomic_guard->exchange(INITIALIZING) == INITIALIZING)
{
modm_assert_debug(0, "cxa", "guard", "recursion", guard);
return 0;
}
return 1;
}

// After this function the compiler expects `(guard & 1) == 1`!
extern "C" void __cxa_guard_release(int *guard) noexcept
{
std::atomic_int *atomic_guard = reinterpret_cast<std::atomic_int *>(guard);
atomic_guard->store(INITIALIZED);
}

// Called if the initialization terminates by throwing an exception.
// After this function the compiler expects `(guard & 3) == 0`!
extern "C" void __cxa_guard_abort([[maybe_unused]] int *guard) noexcept
{
%% if with_exceptions
std::atomic_int *atomic_guard = reinterpret_cast<std::atomic_int *>(guard);
atomic_guard->store(UNINITIALIZED);
%% endif
}
%% endif
Loading