@ -222,6 +222,76 @@ static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
}
}
static bool check_for_breakpoints ( CPUState * cpu , target_ulong pc ,
uint32_t * cflags )
{
CPUBreakpoint * bp ;
bool match_page = false ;
if ( likely ( QTAILQ_EMPTY ( & cpu - > breakpoints ) ) ) {
return false ;
}
/*
* Singlestep overrides breakpoints .
* This requirement is visible in the record - replay tests , where
* we would fail to make forward progress in reverse - continue .
*
* TODO : gdb singlestep should only override gdb breakpoints ,
* so that one could ( gdb ) singlestep into the guest kernel ' s
* architectural breakpoint handler .
*/
if ( cpu - > singlestep_enabled ) {
return false ;
}
QTAILQ_FOREACH ( bp , & cpu - > breakpoints , entry ) {
/*
* If we have an exact pc match , trigger the breakpoint .
* Otherwise , note matches within the page .
*/
if ( pc = = bp - > pc ) {
bool match_bp = false ;
if ( bp - > flags & BP_GDB ) {
match_bp = true ;
} else if ( bp - > flags & BP_CPU ) {
# ifdef CONFIG_USER_ONLY
g_assert_not_reached ( ) ;
# else
CPUClass * cc = CPU_GET_CLASS ( cpu ) ;
assert ( cc - > tcg_ops - > debug_check_breakpoint ) ;
match_bp = cc - > tcg_ops - > debug_check_breakpoint ( cpu ) ;
# endif
}
if ( match_bp ) {
cpu - > exception_index = EXCP_DEBUG ;
return true ;
}
} else if ( ( ( pc ^ bp - > pc ) & TARGET_PAGE_MASK ) = = 0 ) {
match_page = true ;
}
}
/*
* Within the same page as a breakpoint , single - step ,
* returning to helper_lookup_tb_ptr after each insn looking
* for the actual breakpoint .
*
* TODO : Perhaps better to record all of the TBs associated
* with a given virtual page that contains a breakpoint , and
* then invalidate them when a new overlapping breakpoint is
* set on the page . Non - overlapping TBs would not be
* invalidated , nor would any TB need to be invalidated as
* breakpoints are removed .
*/
if ( match_page ) {
* cflags = ( * cflags & ~ CF_COUNT_MASK ) | CF_NO_GOTO_TB | 1 ;
}
return false ;
}
/**
* helper_lookup_tb_ptr : quick check for next tb
* @ env : current cpu state
@ -235,11 +305,16 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
CPUState * cpu = env_cpu ( env ) ;
TranslationBlock * tb ;
target_ulong cs_base , pc ;
uint32_t flags ;
uint32_t flags , cflags ;
cpu_get_tb_cpu_state ( env , & pc , & cs_base , & flags ) ;
tb = tb_lookup ( cpu , pc , cs_base , flags , curr_cflags ( cpu ) ) ;
cflags = curr_cflags ( cpu ) ;
if ( check_for_breakpoints ( cpu , pc , & cflags ) ) {
cpu_loop_exit ( cpu ) ;
}
tb = tb_lookup ( cpu , pc , cs_base , flags , cflags ) ;
if ( tb = = NULL ) {
return tcg_code_gen_epilogue ;
}
@ -346,6 +421,12 @@ void cpu_exec_step_atomic(CPUState *cpu)
cflags & = ~ CF_PARALLEL ;
/* After 1 insn, return and release the exclusive lock. */
cflags | = CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1 ;
/*
* No need to check_for_breakpoints here .
* We only arrive in cpu_exec_step_atomic after beginning execution
* of an insn that includes an atomic operation we can ' t handle .
* Any breakpoint for this insn will have been recognized earlier .
*/
tb = tb_lookup ( cpu , pc , cs_base , flags , cflags ) ;
if ( tb = = NULL ) {
@ -837,6 +918,8 @@ int cpu_exec(CPUState *cpu)
target_ulong cs_base , pc ;
uint32_t flags , cflags ;
cpu_get_tb_cpu_state ( cpu - > env_ptr , & pc , & cs_base , & flags ) ;
/*
* When requested , use an exact setting for cflags for the next
* execution . This is used for icount , precise smc , and stop -
@ -851,7 +934,9 @@ int cpu_exec(CPUState *cpu)
cpu - > cflags_next_tb = - 1 ;
}
cpu_get_tb_cpu_state ( cpu - > env_ptr , & pc , & cs_base , & flags ) ;
if ( check_for_breakpoints ( cpu , pc , & cflags ) ) {
break ;
}
tb = tb_lookup ( cpu , pc , cs_base , flags , cflags ) ;
if ( tb = = NULL ) {