@ -301,7 +301,7 @@ void hvf_arm_init_debug(void)
# define TMR_CTL_IMASK (1 << 1)
# define TMR_CTL_ISTATUS (1 << 2)
static void hvf_wfi ( CPUState * cpu ) ;
static int hvf_wfi ( CPUState * cpu ) ;
static uint32_t chosen_ipa_bit_size ;
@ -1703,81 +1703,17 @@ static uint64_t hvf_vtimer_val_raw(void)
return mach_absolute_time ( ) - hvf_state - > vtimer_offset ;
}
static uint64_t hvf_vtimer_val ( void )
static int hvf_wfi ( CPUState * cpu )
{
if ( ! runstate_is_running ( ) ) {
/* VM is paused, the vtimer value is in vtimer.vtimer_val */
return vtimer . vtimer_val ;
}
return hvf_vtimer_val_raw ( ) ;
}
static void hvf_wait_for_ipi ( CPUState * cpu , struct timespec * ts )
{
/*
* Use pselect to sleep so that other threads can IPI us while we ' re
* sleeping .
*/
qatomic_set_mb ( & cpu - > thread_kicked , false ) ;
bql_unlock ( ) ;
pselect ( 0 , 0 , 0 , 0 , ts , & cpu - > accel - > unblock_ipi_mask ) ;
bql_lock ( ) ;
}
static void hvf_wfi ( CPUState * cpu )
{
ARMCPU * arm_cpu = ARM_CPU ( cpu ) ;
struct timespec ts ;
hv_return_t r ;
uint64_t ctl ;
uint64_t cval ;
int64_t ticks_to_sleep ;
uint64_t seconds ;
uint64_t nanos ;
uint32_t cntfrq ;
if ( cpu_has_work ( cpu ) ) {
/*
* Don ' t bother to go into our " low power state " if
* we would just wake up immediately .
*/
return ;
}
r = hv_vcpu_get_sys_reg ( cpu - > accel - > fd , HV_SYS_REG_CNTV_CTL_EL0 , & ctl ) ;
assert_hvf_ok ( r ) ;
if ( ! ( ctl & 1 ) | | ( ctl & 2 ) ) {
/* Timer disabled or masked, just wait for an IPI. */
hvf_wait_for_ipi ( cpu , NULL ) ;
return ;
}
r = hv_vcpu_get_sys_reg ( cpu - > accel - > fd , HV_SYS_REG_CNTV_CVAL_EL0 , & cval ) ;
assert_hvf_ok ( r ) ;
ticks_to_sleep = cval - hvf_vtimer_val ( ) ;
if ( ticks_to_sleep < 0 ) {
return ;
}
cntfrq = gt_cntfrq_period_ns ( arm_cpu ) ;
seconds = muldiv64 ( ticks_to_sleep , cntfrq , NANOSECONDS_PER_SECOND ) ;
ticks_to_sleep - = muldiv64 ( seconds , NANOSECONDS_PER_SECOND , cntfrq ) ;
nanos = ticks_to_sleep * cntfrq ;
/*
* Don ' t sleep for less than the time a context switch would take ,
* so that we can satisfy fast timer requests on the same CPU .
* Measurements on M1 show the sweet spot to be ~ 2 ms .
*/
if ( ! seconds & & nanos < ( 2 * SCALE_MS ) ) {
return ;
return 0 ;
}
ts = ( struct timespec ) { seconds , nanos } ;
hvf_wait_for_ipi ( cpu , & ts ) ;
return EXCP_HLT ;
}
/* Must be called by the owning thread */
@ -1967,7 +1903,7 @@ static int hvf_handle_exception(CPUState *cpu, hv_vcpu_exit_exception_t *excp)
case EC_WFX_TRAP :
advance_pc = true ;
if ( ! ( syndrome & WFX_IS_WFE ) ) {
hvf_wfi ( cpu ) ;
ret = hvf_wfi ( cpu ) ;
}
break ;
case EC_AA64_HVC :