Browse Source

system/physmem: Drop 'cpu_' prefix in Physical Memory API

The functions related to the Physical Memory API declared
in "system/ram_addr.h" do not operate on vCPU. Remove the
'cpu_' prefix.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Message-Id: <20251001175448.18933-18-philmd@linaro.org>
pull/305/head
Philippe Mathieu-Daudé 6 months ago
parent
commit
aa60bdb700
  1. 2
      accel/kvm/kvm-all.c
  2. 12
      accel/tcg/cputlb.c
  3. 8
      hw/vfio/container-legacy.c
  4. 4
      hw/vfio/container.c
  5. 24
      include/system/ram_addr.h
  6. 4
      migration/ram.c
  7. 8
      system/memory.c
  8. 2
      system/memory_ldst.c.inc
  9. 48
      system/physmem.c
  10. 2
      target/arm/tcg/mte_helper.c
  11. 4
      tests/tsan/ignore.tsan

2
accel/kvm/kvm-all.c

@ -758,7 +758,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
static void kvm_slot_reset_dirty_pages(KVMSlot *slot)

12
accel/tcg/cputlb.c

@ -858,7 +858,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
@ -867,7 +867,7 @@ void tlb_protect_code(ram_addr_t ram_addr)
tested for self modifying code */
void tlb_unprotect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
}
@ -1085,7 +1085,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
if (prot & PAGE_WRITE) {
if (section->readonly) {
write_flags |= TLB_DISCARD_WRITE;
} else if (cpu_physical_memory_is_clean(iotlb)) {
} else if (physical_memory_is_clean(iotlb)) {
write_flags |= TLB_NOTDIRTY;
}
}
@ -1341,7 +1341,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
if (!physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
@ -1349,10 +1349,10 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
/* We remove the notdirty callback only if the code has been flushed. */
if (!cpu_physical_memory_is_clean(ram_addr)) {
if (!physical_memory_is_clean(ram_addr)) {
trace_memory_notdirty_set_dirty(mem_vaddr);
tlb_set_dirty(cpu, mem_vaddr);
}

8
hw/vfio/container-legacy.c

@ -92,7 +92,7 @@ static int vfio_dma_unmap_bitmap(const VFIOLegacyContainer *container,
bitmap = (struct vfio_bitmap *)&unmap->data;
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* to qemu_real_host_page_size.
*/
@ -108,7 +108,7 @@ static int vfio_dma_unmap_bitmap(const VFIOLegacyContainer *container,
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
physical_memory_set_dirty_lebitmap(vbmap.bitmap,
iotlb->translated_addr, vbmap.pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
@ -266,7 +266,7 @@ static int vfio_legacy_query_dirty_bitmap(const VFIOContainer *bcontainer,
range->size = size;
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* to qemu_real_host_page_size.
*/
@ -485,7 +485,7 @@ static void vfio_get_iommu_info_migration(VFIOLegacyContainer *container,
header);
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {

4
hw/vfio/container.c

@ -255,7 +255,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
int ret;
if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(translated_addr, size,
physical_memory_set_dirty_range(translated_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
return 0;
@ -280,7 +280,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
goto out;
}
dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
dirty_pages = physical_memory_set_dirty_lebitmap(vbmap.bitmap,
translated_addr,
vbmap.pages);

24
include/system/ram_addr.h

@ -136,39 +136,39 @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
bool cpu_physical_memory_is_clean(ram_addr_t addr);
bool physical_memory_is_clean(ram_addr_t addr);
uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask);
void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client);
void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client);
void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask);
/*
* Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
* Contrary to physical_memory_sync_dirty_bitmap() this function returns
* the number of dirty pages in @bitmap passed as argument. On the other hand,
* cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* weren't set in the global migration bitmap.
*/
uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages);
void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length);
void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length);
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length);

4
migration/ram.c

@ -977,7 +977,7 @@ static uint64_t physical_memory_sync_dirty_bitmap(RAMBlock *rb,
}
}
if (num_dirty) {
cpu_physical_memory_dirty_bits_cleared(start, length);
physical_memory_dirty_bits_cleared(start, length);
}
if (rb->clear_bmap) {
@ -996,7 +996,7 @@ static uint64_t physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t offset = rb->offset;
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
if (physical_memory_test_and_clear_dirty(
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {

8
system/memory.c

@ -2271,7 +2271,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size)
{
assert(mr->ram_block);
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
size,
memory_region_get_dirty_log_mask(mr));
}
@ -2375,7 +2375,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr, false);
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
snapshot = physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync();
return snapshot;
}
@ -2384,7 +2384,7 @@ bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *sna
hwaddr addr, hwaddr size)
{
assert(mr->ram_block);
return cpu_physical_memory_snapshot_get_dirty(snap,
return physical_memory_snapshot_get_dirty(snap,
memory_region_get_ram_addr(mr) + addr, size);
}
@ -2422,7 +2422,7 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->ram_block);
cpu_physical_memory_test_and_clear_dirty(
physical_memory_test_and_clear_dirty(
memory_region_get_ram_addr(mr) + addr, size, client);
}

2
system/memory_ldst.c.inc

@ -287,7 +287,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
4, dirty_log_mask);
r = MEMTX_OK;
}

48
system/physmem.c

@ -899,7 +899,7 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
}
}
void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length)
void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length)
{
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
@ -945,17 +945,17 @@ static bool physical_memory_get_dirty(ram_addr_t start, ram_addr_t length,
return dirty;
}
bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client)
bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client)
{
return physical_memory_get_dirty(addr, 1, client);
}
bool cpu_physical_memory_is_clean(ram_addr_t addr)
bool physical_memory_is_clean(ram_addr_t addr)
{
bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool vga = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
bool code = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool migration =
cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
return !(vga && code && migration);
}
@ -998,7 +998,7 @@ static bool physical_memory_all_dirty(ram_addr_t start, ram_addr_t length,
return dirty;
}
uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
@ -1019,7 +1019,7 @@ uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
return ret;
}
void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
{
unsigned long page, idx, offset;
DirtyMemoryBlocks *blocks;
@ -1037,7 +1037,7 @@ void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
set_bit_atomic(offset, blocks->blocks[idx]);
}
void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask)
{
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
@ -1089,7 +1089,7 @@ void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
}
/* Note: start and end must be within the same ram block. */
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
@ -1131,7 +1131,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
}
if (dirty) {
cpu_physical_memory_dirty_bits_cleared(start, length);
physical_memory_dirty_bits_cleared(start, length);
}
return dirty;
@ -1139,12 +1139,12 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
static void physical_memory_clear_dirty_range(ram_addr_t addr, ram_addr_t length)
{
cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION);
cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA);
cpu_physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE);
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION);
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA);
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE);
}
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
DirtyMemoryBlocks *blocks;
@ -1191,14 +1191,14 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
}
}
cpu_physical_memory_dirty_bits_cleared(start, length);
physical_memory_dirty_bits_cleared(start, length);
memory_region_clear_dirty_bitmap(mr, offset, length);
return snap;
}
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length)
{
@ -1219,7 +1219,7 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
return false;
}
uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{
@ -1312,7 +1312,7 @@ uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
page_number = (i * HOST_LONG_BITS + j) * hpratio;
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
cpu_physical_memory_set_dirty_range(ram_addr,
physical_memory_set_dirty_range(ram_addr,
TARGET_PAGE_SIZE * hpratio, clients);
} while (c != 0);
}
@ -2080,7 +2080,7 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, unaligned_size);
if (block->resized) {
@ -2285,7 +2285,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
ram_list.version++;
qemu_mutex_unlock_ramlist();
cpu_physical_memory_set_dirty_range(new_block->offset,
physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
@ -3134,19 +3134,19 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
addr += ramaddr;
/* No early return if dirty_log_mask is or becomes 0, because
* cpu_physical_memory_set_dirty_range will still call
* physical_memory_set_dirty_range will still call
* xen_modified_memory.
*/
if (dirty_log_mask) {
dirty_log_mask =
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
tb_invalidate_phys_range(NULL, addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
physical_memory_set_dirty_range(addr, length, dirty_log_mask);
}
void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)

2
target/arm/tcg/mte_helper.c

@ -189,7 +189,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
*/
if (tag_access == MMU_DATA_STORE) {
ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
}
return memory_region_get_ram_ptr(mr) + xlat;

4
tests/tsan/ignore.tsan

@ -4,7 +4,7 @@
# The eventual goal would be to fix these warnings.
# TSan is not happy about setting/getting of dirty bits,
# for example, cpu_physical_memory_set_dirty_range,
# and cpu_physical_memory_get_dirty.
# for example, physical_memory_set_dirty_range,
# and physical_memory_get_dirty.
src:bitops.c
src:bitmap.c

Loading…
Cancel
Save