@ -38,73 +38,80 @@ void mb_cpu_do_interrupt(CPUState *cs)
env - > regs [ 14 ] = env - > sregs [ SR_PC ] ;
}
int mb_cpu_handle_mmu_fault ( CPUState * cs , vaddr address , int size , int rw ,
int mmu_idx )
bool mb_cpu_tlb_fill ( CPUState * cs , vaddr address , int size ,
MMUAccessType access_type , int mmu_idx ,
bool probe , uintptr_t retaddr )
{
cs - > exception_index = 0xaa ;
cpu_dump_state ( cs , stderr , 0 ) ;
return 1 ;
cpu_loop_exit_restore ( cs , retaddr ) ;
}
# else /* !CONFIG_USER_ONLY */
int mb_cpu_handle_mmu_fault ( CPUState * cs , vaddr address , int size , int rw ,
int mmu_idx )
bool mb_cpu_tlb_fill ( CPUState * cs , vaddr address , int size ,
MMUAccessType access_type , int mmu_idx ,
bool probe , uintptr_t retaddr )
{
MicroBlazeCPU * cpu = MICROBLAZE_CPU ( cs ) ;
CPUMBState * env = & cpu - > env ;
struct microblaze_mmu_lookup lu ;
unsigned int hit ;
int r = 1 ;
int prot ;
/* Translate if the MMU is available and enabled. */
if ( mmu_idx ! = MMU_NOMMU_IDX ) {
uint32_t vaddr , paddr ;
struct microblaze_mmu_lookup lu ;
hit = mmu_translate ( & env - > mmu , & lu , address , rw , mmu_idx ) ;
if ( hit ) {
vaddr = address & TARGET_PAGE_MASK ;
paddr = lu . paddr + vaddr - lu . vaddr ;
qemu_log_mask ( CPU_LOG_MMU , " MMU map mmu=%d v=%x p=%x prot=%x \n " ,
mmu_idx , vaddr , paddr , lu . prot ) ;
tlb_set_page ( cs , vaddr , paddr , lu . prot , mmu_idx , TARGET_PAGE_SIZE ) ;
r = 0 ;
} else {
env - > sregs [ SR_EAR ] = address ;
qemu_log_mask ( CPU_LOG_MMU , " mmu=%d miss v=% " VADDR_PRIx " \n " ,
mmu_idx , address ) ;
switch ( lu . err ) {
case ERR_PROT :
env - > sregs [ SR_ESR ] = rw = = 2 ? 17 : 16 ;
env - > sregs [ SR_ESR ] | = ( rw = = 1 ) < < 10 ;
break ;
case ERR_MISS :
env - > sregs [ SR_ESR ] = rw = = 2 ? 19 : 18 ;
env - > sregs [ SR_ESR ] | = ( rw = = 1 ) < < 10 ;
break ;
default :
abort ( ) ;
break ;
}
if ( cs - > exception_index = = EXCP_MMU ) {
cpu_abort ( cs , " recursive faults \n " ) ;
}
/* TLB miss. */
cs - > exception_index = EXCP_MMU ;
}
} else {
if ( mmu_idx = = MMU_NOMMU_IDX ) {
/* MMU disabled or not available. */
address & = TARGET_PAGE_MASK ;
prot = PAGE_BITS ;
tlb_set_page ( cs , address , address , prot , mmu_idx , TARGET_PAGE_SIZE ) ;
r = 0 ;
return true ;
}
return r ;
hit = mmu_translate ( & env - > mmu , & lu , address , access_type , mmu_idx ) ;
if ( likely ( hit ) ) {
uint32_t vaddr = address & TARGET_PAGE_MASK ;
uint32_t paddr = lu . paddr + vaddr - lu . vaddr ;
qemu_log_mask ( CPU_LOG_MMU , " MMU map mmu=%d v=%x p=%x prot=%x \n " ,
mmu_idx , vaddr , paddr , lu . prot ) ;
tlb_set_page ( cs , vaddr , paddr , lu . prot , mmu_idx , TARGET_PAGE_SIZE ) ;
return true ;
}
/* TLB miss. */
if ( probe ) {
return false ;
}
qemu_log_mask ( CPU_LOG_MMU , " mmu=%d miss v=% " VADDR_PRIx " \n " ,
mmu_idx , address ) ;
env - > sregs [ SR_EAR ] = address ;
switch ( lu . err ) {
case ERR_PROT :
env - > sregs [ SR_ESR ] = access_type = = MMU_INST_FETCH ? 17 : 16 ;
env - > sregs [ SR_ESR ] | = ( access_type = = MMU_DATA_STORE ) < < 10 ;
break ;
case ERR_MISS :
env - > sregs [ SR_ESR ] = access_type = = MMU_INST_FETCH ? 19 : 18 ;
env - > sregs [ SR_ESR ] | = ( access_type = = MMU_DATA_STORE ) < < 10 ;
break ;
default :
abort ( ) ;
}
if ( cs - > exception_index = = EXCP_MMU ) {
cpu_abort ( cs , " recursive faults \n " ) ;
}
/* TLB miss. */
cs - > exception_index = EXCP_MMU ;
cpu_loop_exit_restore ( cs , retaddr ) ;
}
void tlb_fill ( CPUState * cs , target_ulong addr , int size ,
MMUAccessType access_type , int mmu_idx , uintptr_t retaddr )
{
mb_cpu_tlb_fill ( cs , addr , size , access_type , mmu_idx , false , retaddr ) ;
}
void mb_cpu_do_interrupt ( CPUState * cs )