Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Properly support <atomic> on AVR and Cortex-M #1164

Merged
merged 6 commits into from
May 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions ext/gcc/atomic
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
/*
* Copyright (c) 2024, Niklas Hauser
*
* This file is part of the modm project.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
// ----------------------------------------------------------------------------

#include_next <atomic>

#include <modm_atomic.hpp>
100 changes: 100 additions & 0 deletions ext/gcc/atomic.cpp.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
* Copyright (c) 2020, 2024, Niklas Hauser
*
* This file is part of the modm project.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
// ----------------------------------------------------------------------------

#include <modm_atomic.hpp>

/* We are implementing the libary interface described here:
* See https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary
*
* We ignore the memory order, since the runtime switching takes longer than
* the DMB instruction.
*/

// ============================ atomics for arrays ============================
// These functions cannot be inlined, since the compiler builtins are named the
// same. Terrible design really.
extern "C" void
__atomic_load(unsigned int size, const volatile void *src, void *dest, int /*memorder*/)
{
__modm_atomic_pre_barrier(__ATOMIC_SEQ_CST);
{
modm::atomic::Lock _;
__builtin_memcpy(dest, (const void*)src, size);
}
__modm_atomic_post_barrier(__ATOMIC_SEQ_CST);
}

extern "C" void
__atomic_store(unsigned int size, volatile void *dest, void *src, int /*memorder*/)
{
__modm_atomic_pre_barrier(__ATOMIC_SEQ_CST);
{
modm::atomic::Lock _;
__builtin_memcpy((void*)dest, src, size);
}
__modm_atomic_post_barrier(__ATOMIC_SEQ_CST);
}

extern "C" void
__atomic_exchange(unsigned int size, volatile void *ptr, void *val, void *ret, int /*memorder*/)
{
__modm_atomic_pre_barrier(__ATOMIC_SEQ_CST);
{
modm::atomic::Lock _;
__builtin_memcpy(ret, (void*)ptr, size);
__builtin_memcpy((void*)ptr, val, size);
}
__modm_atomic_post_barrier(__ATOMIC_SEQ_CST);
}

extern "C" bool
__atomic_compare_exchange(unsigned int len, volatile void *ptr, void *expected, void *desired,
int /*success_memorder*/, int /*failure_memorder*/)
{
bool retval{false};
__modm_atomic_pre_barrier(__ATOMIC_SEQ_CST);
{
modm::atomic::Lock _;
if (__builtin_memcmp((void*)ptr, expected, len) == 0) [[likely]]
{
__builtin_memcpy((void*)ptr, desired, len);
retval = true;
}
else __builtin_memcpy(expected, (void*)ptr, len);
}
__modm_atomic_post_barrier(__ATOMIC_SEQ_CST);
return retval;
}

%% macro atomic_fetch(len)
%% for name, op in [("add", "+"), ("sub", "-")]
extern "C" {{len|u}}
__atomic_fetch_{{name}}_{{len//8}}(volatile void *ptr, {{len|u}} value, int /*memorder*/)
{
{{len|u}} previous{};
__modm_atomic_pre_barrier(__ATOMIC_SEQ_CST);
{
modm::atomic::Lock _;
previous = *reinterpret_cast<volatile {{len|u}}*>(ptr);
*reinterpret_cast<volatile {{len|u}}*>(ptr) = (previous {{op}} value);
}
__modm_atomic_post_barrier(__ATOMIC_SEQ_CST);
return previous;
}
%% endfor
%% endmacro

%% for length in bit_lengths
// ========================= atomics for {{length}} bit integers =========================
// These functions cannot be inlined since the compiler refuses to find these
// functions even if they are declared right at the call site. Unclear why.
{{ atomic_fetch(length) }}
%% endfor
150 changes: 0 additions & 150 deletions ext/gcc/atomics_c11_cortex.cpp.in

This file was deleted.

62 changes: 45 additions & 17 deletions ext/gcc/cxxabi.cpp.in
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Copyright (c) 2009-2011, Fabian Greif
* Copyright (c) 2010, Martin Rosekeit
* Copyright (c) 2012, Sascha Schade
* Copyright (c) 2012-2014, 2020, Niklas Hauser
* Copyright (c) 2012-2014, 2020, 2024, Niklas Hauser
*
* This file is part of the modm project.
*
Expand All @@ -27,44 +27,72 @@ void __cxa_deleted_virtual()

%% if with_threadsafe_statics
#include <atomic>
/* One-time construction API, see ARM IHI0041D section 3.2.3.
* The ARM C++ ABI mandates the guard to be 32-bit aligned, 32-bit values.
*/
%% if is_avr
%#
// Even thought the actual guard size is uint64_t on AVR, we only need to access
// the first uint8_t and thus can significantly reduce the code size of the
// atomic access implementation.
using guard_type = uint8_t;
%% elif is_cortex_m
#include <modm/platform/device.hpp>

// One-time construction API, see ARM IHI0041D section 3.2.3.
// The ARM C++ ABI mandates the guard to be 32-bit aligned, 32-bit values.
using guard_type = uint32_t;
%% else
%#
using guard_type = uint64_t;
%% endif
%#
enum
{
UNINITIALIZED = 0,
INITIALIZED = 1,
INITIALIZING = 0x100,
INITIALIZING = 0x10,
};

// This function returns 1 only if the object needs to be initialized
extern "C" int __cxa_guard_acquire(int *guard)
extern "C" int
__cxa_guard_acquire(guard_type *guard)
{
auto atomic_guard = std::atomic_ref(*guard);
if (atomic_guard.load() == INITIALIZED)
return 0;
if (atomic_guard.exchange(INITIALIZING) == INITIALIZING)
guard_type value = atomic_guard.load(std::memory_order_relaxed);
do
{
modm_assert(0, "stat.rec",
"Recursive initialization of a function static!", guard);
if (value == INITIALIZED) return 0;
if (value == INITIALIZING)
{
%% if is_cortex_m
const bool is_in_irq = __get_IPSR();
%% else
// The hardware cannot tell us, we must assume it to be true
constexpr bool is_in_irq = true;
%% endif
// We got called from inside an interrupt, but we cannot yield back
modm_assert(not is_in_irq, "stat.rec",
"Recursive initialization of a function static!", guard);
}
value = UNINITIALIZED;
}
while(not atomic_guard.compare_exchange_weak(value, INITIALIZING,
std::memory_order_acquire, std::memory_order_relaxed));
return 1;
}

// After this function the compiler expects `(guard & 1) == 1`!
extern "C" void __cxa_guard_release(int *guard) noexcept
extern "C" void
__cxa_guard_release(guard_type *guard)
{
auto atomic_guard = std::atomic_ref(*guard);
atomic_guard.store(INITIALIZED);
atomic_guard.store(INITIALIZED, std::memory_order_release);
}

// Called if the initialization terminates by throwing an exception.
// After this function the compiler expects `(guard & 3) == 0`!
extern "C" void __cxa_guard_abort([[maybe_unused]] int *guard) noexcept
extern "C" void
__cxa_guard_abort([[maybe_unused]] guard_type *guard)
{
%% if with_exceptions
auto atomic_guard = std::atomic_ref(*guard);
atomic_guard.store(UNINITIALIZED);
%% endif
atomic_guard.store(UNINITIALIZED, std::memory_order_release);
}
%% endif
Loading
Loading