@ -409,11 +409,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
for ( i = 0 ; i < 6 ; i + + ) {
load_seg_vm ( env , i , new_segs [ i ] ) ;
}
/* in vm86, CPL is always 3 */
cpu_x86_set_cpl ( env , 3 ) ;
} else {
/* CPL is set the RPL of CS */
cpu_x86_set_cpl ( env , new_segs [ R_CS ] & 3 ) ;
/* first just selectors as the rest may trigger exceptions */
for ( i = 0 ; i < 6 ; i + + ) {
cpu_x86_load_seg_cache ( env , i , new_segs [ i ] , 0 , 0 , 0 ) ;
@ -763,7 +759,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
get_seg_base ( e1 , e2 ) ,
get_seg_limit ( e1 , e2 ) ,
e2 ) ;
cpu_x86_set_cpl ( env , dpl ) ;
env - > eip = offset ;
}
@ -928,7 +923,6 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
get_seg_base ( e1 , e2 ) ,
get_seg_limit ( e1 , e2 ) ,
e2 ) ;
cpu_x86_set_cpl ( env , dpl ) ;
env - > eip = offset ;
}
# endif
@ -962,7 +956,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
env - > eflags & = ~ env - > fmask ;
cpu_load_eflags ( env , env - > eflags , 0 ) ;
cpu_x86_set_cpl ( env , 0 ) ;
cpu_x86_load_seg_cache ( env , R_CS , selector & 0xfffc ,
0 , 0xffffffff ,
DESC_G_MASK | DESC_P_MASK |
@ -983,7 +976,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
env - > regs [ R_ECX ] = ( uint32_t ) ( env - > eip + next_eip_addend ) ;
env - > eflags & = ~ ( IF_MASK | RF_MASK | VM_MASK ) ;
cpu_x86_set_cpl ( env , 0 ) ;
cpu_x86_load_seg_cache ( env , R_CS , selector & 0xfffc ,
0 , 0xffffffff ,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
@ -1038,7 +1030,6 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | ( 3 < < DESC_DPL_SHIFT ) |
DESC_W_MASK | DESC_A_MASK ) ;
cpu_x86_set_cpl ( env , 3 ) ;
} else {
env - > eflags | = IF_MASK ;
cpu_x86_load_seg_cache ( env , R_CS , selector | 3 ,
@ -1052,7 +1043,6 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | ( 3 < < DESC_DPL_SHIFT ) |
DESC_W_MASK | DESC_A_MASK ) ;
cpu_x86_set_cpl ( env , 3 ) ;
}
}
# endif
@ -1905,7 +1895,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
get_seg_base ( e1 , e2 ) ,
get_seg_limit ( e1 , e2 ) ,
e2 ) ;
cpu_x86_set_cpl ( env , dpl ) ;
SET_ESP ( sp , sp_mask ) ;
env - > eip = offset ;
}
@ -2134,7 +2123,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
get_seg_base ( e1 , e2 ) ,
get_seg_limit ( e1 , e2 ) ,
e2 ) ;
cpu_x86_set_cpl ( env , rpl ) ;
sp = new_esp ;
# ifdef TARGET_X86_64
if ( env - > hflags & HF_CS64_MASK ) {
@ -2185,7 +2173,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK ) ;
load_seg_vm ( env , R_CS , new_cs & 0xffff ) ;
cpu_x86_set_cpl ( env , 3 ) ;
load_seg_vm ( env , R_SS , new_ss & 0xffff ) ;
load_seg_vm ( env , R_ES , new_es & 0xffff ) ;
load_seg_vm ( env , R_DS , new_ds & 0xffff ) ;
@ -2238,7 +2225,6 @@ void helper_sysenter(CPUX86State *env)
raise_exception_err ( env , EXCP0D_GPF , 0 ) ;
}
env - > eflags & = ~ ( VM_MASK | IF_MASK | RF_MASK ) ;
cpu_x86_set_cpl ( env , 0 ) ;
# ifdef TARGET_X86_64
if ( env - > hflags & HF_LMA_MASK ) {
@ -2274,7 +2260,6 @@ void helper_sysexit(CPUX86State *env, int dflag)
if ( env - > sysenter_cs = = 0 | | cpl ! = 0 ) {
raise_exception_err ( env , EXCP0D_GPF , 0 ) ;
}
cpu_x86_set_cpl ( env , 3 ) ;
# ifdef TARGET_X86_64
if ( dflag = = 2 ) {
cpu_x86_load_seg_cache ( env , R_CS , ( ( env - > sysenter_cs + 32 ) & 0xfffc ) |