Skip to content
Snippets Groups Projects
Commit 224264ad authored by Kuba Brecka's avatar Kuba Brecka
Browse files

[libcxx] Fix a data race in call_once

call_once is using relaxed atomic load to perform double-checked locking, which contains a data race. The fast-path load has to be an acquire atomic load.

Differential Revision: https://reviews.llvm.org/D24028

llvm-svn: 280621
parent ccd44939
No related branches found
No related tags found
No related merge requests found
...@@ -663,6 +663,18 @@ _ValueType __libcpp_relaxed_load(_ValueType const* __value) { ...@@ -663,6 +663,18 @@ _ValueType __libcpp_relaxed_load(_ValueType const* __value) {
#endif #endif
} }
template <class _ValueType>
inline _LIBCPP_ALWAYS_INLINE
_ValueType __libcpp_acquire_load(_ValueType const* __value) {
#if !defined(_LIBCPP_HAS_NO_THREADS) && \
defined(__ATOMIC_ACQUIRE) && \
(__has_builtin(__atomic_load_n) || _GNUC_VER >= 407)
return __atomic_load_n(__value, __ATOMIC_ACQUIRE);
#else
return *__value;
#endif
}
// addressof moved to <__functional_base> // addressof moved to <__functional_base>
template <class _Tp> class allocator; template <class _Tp> class allocator;
......
...@@ -574,7 +574,7 @@ inline _LIBCPP_INLINE_VISIBILITY ...@@ -574,7 +574,7 @@ inline _LIBCPP_INLINE_VISIBILITY
void void
call_once(once_flag& __flag, _Callable&& __func, _Args&&... __args) call_once(once_flag& __flag, _Callable&& __func, _Args&&... __args)
{ {
if (__libcpp_relaxed_load(&__flag.__state_) != ~0ul) if (__libcpp_acquire_load(&__flag.__state_) != ~0ul)
{ {
typedef tuple<_Callable&&, _Args&&...> _Gp; typedef tuple<_Callable&&, _Args&&...> _Gp;
_Gp __f(_VSTD::forward<_Callable>(__func), _VSTD::forward<_Args>(__args)...); _Gp __f(_VSTD::forward<_Callable>(__func), _VSTD::forward<_Args>(__args)...);
...@@ -590,7 +590,7 @@ inline _LIBCPP_INLINE_VISIBILITY ...@@ -590,7 +590,7 @@ inline _LIBCPP_INLINE_VISIBILITY
void void
call_once(once_flag& __flag, _Callable& __func) call_once(once_flag& __flag, _Callable& __func)
{ {
if (__libcpp_relaxed_load(&__flag.__state_) != ~0ul) if (__libcpp_acquire_load(&__flag.__state_) != ~0ul)
{ {
__call_once_param<_Callable> __p(__func); __call_once_param<_Callable> __p(__func);
__call_once(__flag.__state_, &__p, &__call_once_proxy<_Callable>); __call_once(__flag.__state_, &__p, &__call_once_proxy<_Callable>);
......
...@@ -199,9 +199,6 @@ static __libcpp_mutex_t mut = _LIBCPP_MUTEX_INITIALIZER; ...@@ -199,9 +199,6 @@ static __libcpp_mutex_t mut = _LIBCPP_MUTEX_INITIALIZER;
static __libcpp_condvar_t cv = _LIBCPP_CONDVAR_INITIALIZER; static __libcpp_condvar_t cv = _LIBCPP_CONDVAR_INITIALIZER;
#endif #endif
/// NOTE: Changes to flag are done via relaxed atomic stores
/// even though the accesses are protected by a mutex because threads
/// just entering 'call_once` concurrently read from flag.
void void
__call_once(volatile unsigned long& flag, void* arg, void(*func)(void*)) __call_once(volatile unsigned long& flag, void* arg, void(*func)(void*))
{ {
...@@ -238,7 +235,7 @@ __call_once(volatile unsigned long& flag, void* arg, void(*func)(void*)) ...@@ -238,7 +235,7 @@ __call_once(volatile unsigned long& flag, void* arg, void(*func)(void*))
__libcpp_mutex_unlock(&mut); __libcpp_mutex_unlock(&mut);
func(arg); func(arg);
__libcpp_mutex_lock(&mut); __libcpp_mutex_lock(&mut);
__libcpp_relaxed_store(&flag, ~0ul); __libcpp_atomic_store(&flag, ~0ul, _AO_Release);
__libcpp_mutex_unlock(&mut); __libcpp_mutex_unlock(&mut);
__libcpp_condvar_broadcast(&cv); __libcpp_condvar_broadcast(&cv);
#ifndef _LIBCPP_NO_EXCEPTIONS #ifndef _LIBCPP_NO_EXCEPTIONS
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment