I recently discovered the atomic wait/notify mechanism in C++20 and wrote this readers-writer lock in the style of Linux kernel Seqlock. Writes have to be inside a lock/unlock, while reads are optimistic. It is very fast, but I am wondering if it's wrong in some way, e.g. could it lock up beyond hypothetical timing issues (e.g. reads taking a lot longer than writes).
#include <atomic>
class RwSeqLock
{
// *** NB this can indeed lock up and is dangerous. see stackexchange replies ***
std::atomic<uint64_t> m_count = { 0 };
std::atomic<uint64_t> m_waiting = { 0 };
public:
void lock()
{
uint64_t count = m_count.load();
if (!(count & 1) && m_count.compare_exchange_weak(count, count + 1))
return;
count = m_count.load();
m_waiting.fetch_add(1);
while (1)
{
if (!(count & 1))
{
if (m_count.compare_exchange_weak(count, count + 1))
{
m_waiting.fetch_sub(1);
return;
}
}
else
{
m_count.wait(count);
count = m_count.load();
}
}
}
void unlock()
{
m_count.fetch_add(1);
if (m_waiting.load())
m_count.notify_one();
}
template<class Func>
auto read(const Func& func)
{
uint64_t count = m_count.load();
if (!(count & 1))
{
auto val = func();
if (m_count.load() == count)
return val;
}
count = m_count.load();
m_waiting.fetch_add(1);
while (1)
{
if (!(count & 1))
{
auto val = func();
uint64_t count_after = m_count.load();
if (count_after == count)
{
m_waiting.fetch_sub(1);
return val;
}
else
count = count_after;
}
else
{
m_count.wait(count);
count = m_count.load();
}
}
}
// stats
uint64_t count() const { return m_count.load(); }
uint64_t waiting() const { return m_waiting.load(); }
};