Browse Source

include/qemu/atomic: Drop qatomic_{read,set}_[iu]64

Replace all uses with the normal qatomic_{read,set}.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
pull/316/head
Richard Henderson 3 months ago
parent
commit
71adccb6f7
  1. 4
      accel/qtest/qtest.c
  2. 25
      accel/tcg/icount-common.c
  3. 22
      include/qemu/atomic.h
  4. 2
      system/dirtylimit.c
  5. 17
      tests/unit/test-rcu-list.c
  6. 85
      util/atomic64.c
  7. 2
      util/cacheflush.c
  8. 3
      util/meson.build
  9. 8
      util/qsp.c

4
accel/qtest/qtest.c

@ -31,12 +31,12 @@ static int64_t qtest_clock_counter;
static int64_t qtest_get_virtual_clock(void)
{
return qatomic_read_i64(&qtest_clock_counter);
return qatomic_read(&qtest_clock_counter);
}
static void qtest_set_virtual_clock(int64_t count)
{
qatomic_set_i64(&qtest_clock_counter, count);
qatomic_set(&qtest_clock_counter, count);
}
static int qtest_init_accel(AccelState *as, MachineState *ms)

25
accel/tcg/icount-common.c

@ -86,8 +86,8 @@ static void icount_update_locked(CPUState *cpu)
int64_t executed = icount_get_executed(cpu);
cpu->icount_budget -= executed;
qatomic_set_i64(&timers_state.qemu_icount,
timers_state.qemu_icount + executed);
qatomic_set(&timers_state.qemu_icount,
timers_state.qemu_icount + executed);
}
/*
@ -116,15 +116,14 @@ static int64_t icount_get_raw_locked(void)
/* Take into account what has run */
icount_update_locked(cpu);
}
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
return qatomic_read_i64(&timers_state.qemu_icount);
/* The read is protected by the seqlock, but needs atomic to avoid UB */
return qatomic_read(&timers_state.qemu_icount);
}
static int64_t icount_get_locked(void)
{
int64_t icount = icount_get_raw_locked();
return qatomic_read_i64(&timers_state.qemu_icount_bias) +
icount_to_ns(icount);
return qatomic_read(&timers_state.qemu_icount_bias) + icount_to_ns(icount);
}
int64_t icount_get_raw(void)
@ -201,9 +200,9 @@ static void icount_adjust(void)
timers_state.icount_time_shift + 1);
}
timers_state.last_delta = delta;
qatomic_set_i64(&timers_state.qemu_icount_bias,
cur_icount - (timers_state.qemu_icount
<< timers_state.icount_time_shift));
qatomic_set(&timers_state.qemu_icount_bias,
cur_icount - (timers_state.qemu_icount
<< timers_state.icount_time_shift));
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
}
@ -269,8 +268,8 @@ static void icount_warp_rt(void)
}
warp_delta = MIN(warp_delta, delta);
}
qatomic_set_i64(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + warp_delta);
qatomic_set(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + warp_delta);
}
timers_state.vm_clock_warp_start = -1;
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
@ -361,8 +360,8 @@ void icount_start_warp_timer(void)
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
qatomic_set_i64(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + deadline);
qatomic_set(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + deadline);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);

22
include/qemu/atomic.h

@ -247,26 +247,4 @@
typedef int64_t aligned_int64_t __attribute__((aligned(8)));
typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
#ifdef CONFIG_ATOMIC64
/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
#define qatomic_read_i64(P) \
_Generic(*(P), int64_t: qatomic_read__nocheck(P))
#define qatomic_read_u64(P) \
_Generic(*(P), uint64_t: qatomic_read__nocheck(P))
#define qatomic_set_i64(P, V) \
_Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
#define qatomic_set_u64(P, V) \
_Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
static inline void qatomic64_init(void)
{
}
#else /* !CONFIG_ATOMIC64 */
int64_t qatomic_read_i64(const int64_t *ptr);
uint64_t qatomic_read_u64(const uint64_t *ptr);
void qatomic_set_i64(int64_t *ptr, int64_t val);
void qatomic_set_u64(uint64_t *ptr, uint64_t val);
void qatomic64_init(void);
#endif /* !CONFIG_ATOMIC64 */
#endif /* QEMU_ATOMIC_H */

2
system/dirtylimit.c

@ -123,7 +123,7 @@ static void *vcpu_dirty_rate_stat_thread(void *opaque)
int64_t vcpu_dirty_rate_get(int cpu_index)
{
DirtyRateVcpu *rates = vcpu_dirty_rate_stat->stat.rates;
return qatomic_read_i64(&rates[cpu_index].dirty_rate);
return qatomic_read(&rates[cpu_index].dirty_rate);
}
void vcpu_dirty_rate_stat_start(void)

17
tests/unit/test-rcu-list.c

@ -105,7 +105,7 @@ static void reclaim_list_el(struct rcu_head *prcu)
struct list_element *el = container_of(prcu, struct list_element, rcu);
g_free(el);
/* Accessed only from call_rcu thread. */
qatomic_set_i64(&n_reclaims, n_reclaims + 1);
qatomic_set(&n_reclaims, n_reclaims + 1);
}
#if TEST_LIST_TYPE == 1
@ -247,7 +247,7 @@ static void *rcu_q_updater(void *arg)
qemu_mutex_lock(&counts_mutex);
n_nodes += n_nodes_local;
n_updates += n_updates_local;
qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
qatomic_set(&n_nodes_removed, n_nodes_removed + n_removed_local);
qemu_mutex_unlock(&counts_mutex);
return NULL;
}
@ -301,23 +301,22 @@ static void rcu_qtest(const char *test, int duration, int nreaders)
n_removed_local++;
}
qemu_mutex_lock(&counts_mutex);
qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local);
qatomic_set(&n_nodes_removed, n_nodes_removed + n_removed_local);
qemu_mutex_unlock(&counts_mutex);
synchronize_rcu();
while (qatomic_read_i64(&n_nodes_removed) >
qatomic_read_i64(&n_reclaims)) {
while (qatomic_read(&n_nodes_removed) > qatomic_read(&n_reclaims)) {
g_usleep(100);
synchronize_rcu();
}
if (g_test_in_charge) {
g_assert_cmpint(qatomic_read_i64(&n_nodes_removed), ==,
qatomic_read_i64(&n_reclaims));
g_assert_cmpint(qatomic_read(&n_nodes_removed), ==,
qatomic_read(&n_reclaims));
} else {
printf("%s: %d readers; 1 updater; nodes read: " \
"%lld, nodes removed: %"PRIi64"; nodes reclaimed: %"PRIi64"\n",
test, nthreadsrunning - 1, n_reads,
qatomic_read_i64(&n_nodes_removed),
qatomic_read_i64(&n_reclaims));
qatomic_read(&n_nodes_removed),
qatomic_read(&n_reclaims));
exit(0);
}
}

85
util/atomic64.c

@ -1,85 +0,0 @@
/*
* Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/atomic.h"
#include "qemu/thread.h"
#include "qemu/cacheinfo.h"
#include "qemu/memalign.h"
#ifdef CONFIG_ATOMIC64
#error This file must only be compiled if !CONFIG_ATOMIC64
#endif
/*
* When !CONFIG_ATOMIC64, we serialize both reads and writes with spinlocks.
* We use an array of spinlocks, with padding computed at run-time based on
* the host's dcache line size.
* We point to the array with a void * to simplify the padding's computation.
* Each spinlock is located every lock_size bytes.
*/
static void *lock_array;
static size_t lock_size;
/*
* Systems without CONFIG_ATOMIC64 are unlikely to have many cores, so we use a
* small array of locks.
*/
#define NR_LOCKS 16
static QemuSpin *addr_to_lock(const void *addr)
{
uintptr_t a = (uintptr_t)addr;
uintptr_t idx;
idx = a >> qemu_dcache_linesize_log;
idx ^= (idx >> 8) ^ (idx >> 16);
idx &= NR_LOCKS - 1;
return lock_array + idx * lock_size;
}
#define GEN_READ(name, type) \
type name(const type *ptr) \
{ \
QemuSpin *lock = addr_to_lock(ptr); \
type ret; \
\
qemu_spin_lock(lock); \
ret = *ptr; \
qemu_spin_unlock(lock); \
return ret; \
}
GEN_READ(qatomic_read_i64, int64_t)
GEN_READ(qatomic_read_u64, uint64_t)
#undef GEN_READ
#define GEN_SET(name, type) \
void name(type *ptr, type val) \
{ \
QemuSpin *lock = addr_to_lock(ptr); \
\
qemu_spin_lock(lock); \
*ptr = val; \
qemu_spin_unlock(lock); \
}
GEN_SET(qatomic_set_i64, int64_t)
GEN_SET(qatomic_set_u64, uint64_t)
#undef GEN_SET
void qatomic64_init(void)
{
int i;
lock_size = ROUND_UP(sizeof(QemuSpin), qemu_dcache_linesize);
lock_array = qemu_memalign(qemu_dcache_linesize, lock_size * NR_LOCKS);
for (i = 0; i < NR_LOCKS; i++) {
QemuSpin *lock = lock_array + i * lock_size;
qemu_spin_init(lock);
}
}

2
util/cacheflush.c

@ -216,8 +216,6 @@ static void __attribute__((constructor)) init_cache_info(void)
qemu_icache_linesize_log = ctz32(isize);
qemu_dcache_linesize = dsize;
qemu_dcache_linesize_log = ctz32(dsize);
qatomic64_init();
}

3
util/meson.build

@ -1,8 +1,5 @@
util_ss.add(files('osdep.c', 'cutils.c', 'unicode.c', 'qemu-timer-common.c'))
util_ss.add(files('thread-context.c'), numa)
if not config_host_data.get('CONFIG_ATOMIC64')
util_ss.add(files('atomic64.c'))
endif
if host_os != 'windows'
util_ss.add(files('aio-posix.c'))
util_ss.add(files('fdmon-poll.c'))

8
util/qsp.c

@ -346,9 +346,9 @@ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line,
*/
static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq)
{
qatomic_set_u64(&e->ns, e->ns + delta);
qatomic_set(&e->ns, e->ns + delta);
if (acq) {
qatomic_set_u64(&e->n_acqs, e->n_acqs + 1);
qatomic_set(&e->n_acqs, e->n_acqs + 1);
}
}
@ -538,8 +538,8 @@ static void qsp_aggregate(void *p, uint32_t h, void *up)
* The entry is in the global hash table; read from it atomically (as in
* "read once").
*/
agg->ns += qatomic_read_u64(&e->ns);
agg->n_acqs += qatomic_read_u64(&e->n_acqs);
agg->ns += qatomic_read(&e->ns);
agg->n_acqs += qatomic_read(&e->n_acqs);
}
static void qsp_iter_diff(void *p, uint32_t hash, void *htp)

Loading…
Cancel
Save