@ -1742,18 +1742,22 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
# endif
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations , or io operations to proceed . Return the host address . */
/*
* Probe for an atomic operation . Do not allow unaligned operations ,
* or io operations to proceed . Return the host address .
*
* @ prot may be PAGE_READ , PAGE_WRITE , or PAGE_READ | PAGE_WRITE .
*/
static void * atomic_mmu_lookup ( CPUArchState * env , target_ulong addr ,
TCGMemOpIdx oi , uintptr_t retaddr )
TCGMemOpIdx oi , int size , int prot ,
uintptr_t retaddr )
{
size_t mmu_idx = get_mmuidx ( oi ) ;
uintptr_t index = tlb_index ( env , mmu_idx , addr ) ;
CPUTLBEntry * tlbe = tlb_entry ( env , mmu_idx , addr ) ;
target_ulong tlb_addr = tlb_addr_write ( tlbe ) ;
MemOp mop = get_memop ( oi ) ;
int a_bits = get_alignment_bits ( mop ) ;
int s_bits = mop & MO_SIZE ;
uintptr_t index ;
CPUTLBEntry * tlbe ;
target_ulong tlb_addr ;
void * hostaddr ;
/* Adjust the given return address. */
@ -1767,7 +1771,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/* Enforce qemu required alignment. */
if ( unlikely ( addr & ( ( 1 < < s_bits ) - 1 ) ) ) {
if ( unlikely ( addr & ( size - 1 ) ) ) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above .
We might widen the access and emulate , but for now
@ -1775,15 +1779,45 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world ;
}
index = tlb_index ( env , mmu_idx , addr ) ;
tlbe = tlb_entry ( env , mmu_idx , addr ) ;
/* Check TLB entry and enforce page permissions. */
if ( ! tlb_hit ( tlb_addr , addr ) ) {
if ( ! VICTIM_TLB_HIT ( addr_write , addr ) ) {
tlb_fill ( env_cpu ( env ) , addr , 1 < < s_bits , MMU_DATA_STORE ,
mmu_idx , retaddr ) ;
index = tlb_index ( env , mmu_idx , addr ) ;
tlbe = tlb_entry ( env , mmu_idx , addr ) ;
if ( prot & PAGE_WRITE ) {
tlb_addr = tlb_addr_write ( tlbe ) ;
if ( ! tlb_hit ( tlb_addr , addr ) ) {
if ( ! VICTIM_TLB_HIT ( addr_write , addr ) ) {
tlb_fill ( env_cpu ( env ) , addr , size ,
MMU_DATA_STORE , mmu_idx , retaddr ) ;
index = tlb_index ( env , mmu_idx , addr ) ;
tlbe = tlb_entry ( env , mmu_idx , addr ) ;
}
tlb_addr = tlb_addr_write ( tlbe ) & ~ TLB_INVALID_MASK ;
}
/* Let the guest notice RMW on a write-only page. */
if ( ( prot & PAGE_READ ) & &
unlikely ( tlbe - > addr_read ! = ( tlb_addr & ~ TLB_NOTDIRTY ) ) ) {
tlb_fill ( env_cpu ( env ) , addr , size ,
MMU_DATA_LOAD , mmu_idx , retaddr ) ;
/*
* Since we don ' t support reads and writes to different addresses ,
* and we do have the proper page loaded for write , this shouldn ' t
* ever return . But just in case , handle via stop - the - world .
*/
goto stop_the_world ;
}
} else /* if (prot & PAGE_READ) */ {
tlb_addr = tlbe - > addr_read ;
if ( ! tlb_hit ( tlb_addr , addr ) ) {
if ( ! VICTIM_TLB_HIT ( addr_write , addr ) ) {
tlb_fill ( env_cpu ( env ) , addr , size ,
MMU_DATA_LOAD , mmu_idx , retaddr ) ;
index = tlb_index ( env , mmu_idx , addr ) ;
tlbe = tlb_entry ( env , mmu_idx , addr ) ;
}
tlb_addr = tlbe - > addr_read & ~ TLB_INVALID_MASK ;
}
tlb_addr = tlb_addr_write ( tlbe ) & ~ TLB_INVALID_MASK ;
}
/* Notice an IO access or a needs-MMU-lookup access */
@ -1793,20 +1827,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
goto stop_the_world ;
}
/* Let the guest notice RMW on a write-only page. */
if ( unlikely ( tlbe - > addr_read ! = ( tlb_addr & ~ TLB_NOTDIRTY ) ) ) {
tlb_fill ( env_cpu ( env ) , addr , 1 < < s_bits , MMU_DATA_LOAD ,
mmu_idx , retaddr ) ;
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write , this shouldn ' t
ever return . But just in case , handle via stop - the - world . */
goto stop_the_world ;
}
hostaddr = ( void * ) ( ( uintptr_t ) addr + tlbe - > addend ) ;
if ( unlikely ( tlb_addr & TLB_NOTDIRTY ) ) {
notdirty_write ( env_cpu ( env ) , addr , 1 < < s_bits ,
notdirty_write ( env_cpu ( env ) , addr , size ,
& env_tlb ( env ) - > d [ mmu_idx ] . iotlb [ index ] , retaddr ) ;
}
@ -2669,7 +2693,12 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
# define ATOMIC_NAME(X) \
HELPER ( glue ( glue ( glue ( atomic_ # # X , SUFFIX ) , END ) , _mmu ) )
# define ATOMIC_MMU_DECLS
# define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
# define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_READ | PAGE_WRITE , retaddr )
# define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_READ , retaddr )
# define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_WRITE , retaddr )
# define ATOMIC_MMU_CLEANUP
# define ATOMIC_MMU_IDX get_mmuidx(oi)
@ -2698,10 +2727,18 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
# undef EXTRA_ARGS
# undef ATOMIC_NAME
# undef ATOMIC_MMU_LOOKUP
# undef ATOMIC_MMU_LOOKUP_RW
# undef ATOMIC_MMU_LOOKUP_R
# undef ATOMIC_MMU_LOOKUP_W
# define EXTRA_ARGS , TCGMemOpIdx oi
# define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
# define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
# define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_READ | PAGE_WRITE , GETPC ( ) )
# define ATOMIC_MMU_LOOKUP_R \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_READ , GETPC ( ) )
# define ATOMIC_MMU_LOOKUP_W \
atomic_mmu_lookup ( env , addr , oi , DATA_SIZE , PAGE_WRITE , GETPC ( ) )
# define DATA_SIZE 1
# include "atomic_template.h"