Fixed SYSENTER, SYSEXIT, SYSCALL, and SYSRET;

SYSCALL and SYSRET are now perfectly in line with AMD's specification;
Improvements to the API call to gracefully stop the emulator.
This commit is contained in:
OBattler
2020-11-16 17:47:05 +01:00
parent e1a19a308e
commit 1d4988221f
6 changed files with 59 additions and 34 deletions

View File

@@ -41,6 +41,9 @@ uint32_t ropMOVD_d_r(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t
{
int src_reg = (fetchdat >> 3) & 7;
if (cpu_iscyrix && in_smm)
return 0;
uop_MMX_ENTER(ir);
codegen_mark_code_present(block, cs+op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0)

View File

@@ -1019,7 +1019,7 @@ enter_smm(int in_hlt)
uint32_t saved_state[SMM_SAVE_STATE_MAP_SIZE], n;
uint32_t smram_state = smbase + 0x10000;
/* If it's a CPU on which SMM is not supporter, do nothing. */
/* If it's a CPU on which SMM is not supported, do nothing. */
if (!is_am486 && !is_pentium && !is_k5 && !is_k6 && !is_p6 && !is_cx6x86)
return;
@@ -1622,10 +1622,8 @@ sysenter(uint32_t fetchdat)
#endif
/* Set VM, RF, and IF to 0. */
flags_rebuild();
cpu_state.eflags &= ~(RF_FLAG | VM_FLAG);
cpu_state.flags &= ~I_FLAG;
cpu_cur_status &= ~CPU_STATUS_V86;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
@@ -1643,6 +1641,7 @@ sysenter(uint32_t fetchdat)
cpu_state.seg_cs.access = 0x9b;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 0;
cpu_state.seg_ss.seg = ((cs_msr + 8) & 0xfffc);
cpu_state.seg_ss.base = 0;
@@ -1653,9 +1652,12 @@ sysenter(uint32_t fetchdat)
cpu_state.seg_ss.access = 0x93;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32/* | CPU_STATUS_PMODE*/);
set_use32(1);
set_stack32(1);
@@ -1737,10 +1739,13 @@ sysexit(uint32_t fetchdat)
cpu_state.seg_ss.limit_raw = 0x000fffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0xf3;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS/* | CPU_STATUS_V86*/);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
flushmmucache_cr3();
set_use32(1);
@@ -1767,23 +1772,16 @@ syscall(uint32_t fetchdat)
x386_common_log("SYSCALL called\n");
#endif
if (!(cr0 & 1)) {
x86gpf("SYSCALL: CPU not in protected mode", 0);
return cpu_state.abrt;
}
if (!AMD_SYSCALL_SB) {
x86gpf("SYSCALL: AMD SYSCALL SB MSR is zero", 0);
return cpu_state.abrt;
}
/* Let's do this by the AMD spec. */
ECX = cpu_state.pc;
/* Set VM and IF to 0. */
cpu_state.eflags &= ~VM_FLAG;
cpu_state.flags &= ~I_FLAG;
flags_rebuild();
cpu_state.eflags &= ~0x0002;
cpu_state.flags &= ~0x0200;
cpu_cur_status &= ~CPU_STATUS_V86;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
ECX = cpu_state.pc;
/* CS */
CS = AMD_SYSCALL_SB & 0xfffc;
@@ -1795,6 +1793,7 @@ syscall(uint32_t fetchdat)
cpu_state.seg_cs.access = 0x9b;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 0;
/* SS */
SS = (AMD_SYSCALL_SB + 8) & 0xfffc;
@@ -1806,6 +1805,9 @@ syscall(uint32_t fetchdat)
cpu_state.seg_ss.access = 0x93;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
@@ -1825,20 +1827,25 @@ sysret(uint32_t fetchdat)
x386_common_log("SYSRET called\n");
#endif
if (!AMD_SYSRET_SB) {
x86gpf("SYSRET: CS MSR is zero", 0);
if (CPL) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSRET: CPL not 0");
#endif
x86gpf("SYSRET: CPL not 0", 0);
return cpu_state.abrt;
}
if (!(cr0 & 1)) {
x86gpf("SYSRET: CPU not in protected mode", 0);
return cpu_state.abrt;
}
cpu_state.flags |= I_FLAG;
/* First instruction after SYSRET will always execute, regardless of whether
there is a pending interrupt, following the STI logic */
cpu_end_block_after_ins = 2;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.pc = ECX;
cpu_state.eflags |= (1 << 1);
/* CS */
CS = (AMD_SYSRET_SB & 0xfffc) | 3;
cpu_state.seg_cs.base = 0;
@@ -1861,8 +1868,11 @@ sysret(uint32_t fetchdat)
cpu_state.seg_ss.access = 0xf3;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS/* | CPU_STATUS_V86*/);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
flushmmucache_cr3();
set_use32(1);

View File

@@ -270,7 +270,8 @@ static void prefetch_flush()
#define CACHE_ON() (!(cr0 & (1 << 30)) && !(cpu_state.flags & T_FLAG))
#ifdef USE_DYNAREC
static int cycles_main = 0, cycles_old = 0;
int cycles_main = 0;
static int cycles_old = 0;
static uint64_t tsc_old = 0;
#ifdef USE_ACYCS

View File

@@ -437,7 +437,7 @@ extern uint32_t esp_msr;
extern uint32_t eip_msr;
/* For the AMD K6. */
extern uint64_t star;
extern uint64_t amd_efer, star;
#define FPU_CW_Reserved_Bits (0xe0c0)
@@ -489,6 +489,7 @@ extern int timing_jmp_rm, timing_jmp_pm, timing_jmp_pm_gate;
extern int timing_misaligned;
extern int in_sys, unmask_a20_in_smm;
extern int cycles_main;
extern uint32_t old_rammask;
#ifdef USE_ACYCS

View File

@@ -16,7 +16,11 @@
static int
opSYSCALL(uint32_t fetchdat)
{
int ret = syscall(fetchdat);
int ret;
ILLEGAL_ON(!(amd_efer & 0x0000000000000001));
ret = syscall(fetchdat);
if (ret <= 1) {
CLOCK_CYCLES(20);
@@ -32,7 +36,11 @@ opSYSCALL(uint32_t fetchdat)
static int
opSYSRET(uint32_t fetchdat)
{
int ret = sysret(fetchdat);
int ret;
ILLEGAL_ON(!(amd_efer & 0x0000000000000001));
ret = sysret(fetchdat);
if (ret <= 1) {
CLOCK_CYCLES(20);

View File

@@ -835,6 +835,8 @@ pc_reset_hard_init(void)
atfullspeed = 0;
pc_full_speed();
cycles = cycles_main = 0;
}