aboutsummaryrefslogtreecommitdiff
path: root/target-i386/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386/cpu.h')
-rw-r--r--target-i386/cpu.h75
1 files changed, 62 insertions, 13 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 3c57d8b6b..bc60cdb06 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -123,8 +123,8 @@
/* hidden flags - used internally by qemu to represent additional cpu
states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
- position to ease oring with eflags. */
+ redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
+ bit positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
@@ -147,10 +147,12 @@
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
#define HF_RF_SHIFT 16 /* must be same as eflags */
#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -168,10 +170,12 @@
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_RF_MASK (1 << HF_RF_SHIFT)
#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
/* hflags2 */
@@ -210,6 +214,13 @@
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
+#define CR4_VMXE_MASK (1 << 13)
+#define CR4_SMXE_MASK (1 << 14)
+#define CR4_FSGSBASE_MASK (1 << 16)
+#define CR4_PCIDE_MASK (1 << 17)
+#define CR4_OSXSAVE_MASK (1 << 18)
+#define CR4_SMEP_MASK (1 << 20)
+#define CR4_SMAP_MASK (1 << 21)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -382,6 +393,7 @@
#define CPUID_PBE (1 << 31)
#define CPUID_EXT_SSE3 (1 << 0)
+#define CPUID_EXT_PCLMULQDQ (1 << 1)
#define CPUID_EXT_DTES64 (1 << 2)
#define CPUID_EXT_MONITOR (1 << 3)
#define CPUID_EXT_DSCPL (1 << 4)
@@ -401,14 +413,34 @@
#define CPUID_EXT_MOVBE (1 << 22)
#define CPUID_EXT_POPCNT (1 << 23)
#define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24)
+#define CPUID_EXT_AES (1 << 25)
#define CPUID_EXT_XSAVE (1 << 26)
#define CPUID_EXT_OSXSAVE (1 << 27)
+#define CPUID_EXT_AVX (1 << 28)
#define CPUID_EXT_HYPERVISOR (1 << 31)
+#define CPUID_EXT2_FPU (1 << 0)
+#define CPUID_EXT2_VME (1 << 1)
+#define CPUID_EXT2_DE (1 << 2)
+#define CPUID_EXT2_PSE (1 << 3)
+#define CPUID_EXT2_TSC (1 << 4)
+#define CPUID_EXT2_MSR (1 << 5)
+#define CPUID_EXT2_PAE (1 << 6)
+#define CPUID_EXT2_MCE (1 << 7)
+#define CPUID_EXT2_CX8 (1 << 8)
+#define CPUID_EXT2_APIC (1 << 9)
#define CPUID_EXT2_SYSCALL (1 << 11)
+#define CPUID_EXT2_MTRR (1 << 12)
+#define CPUID_EXT2_PGE (1 << 13)
+#define CPUID_EXT2_MCA (1 << 14)
+#define CPUID_EXT2_CMOV (1 << 15)
+#define CPUID_EXT2_PAT (1 << 16)
+#define CPUID_EXT2_PSE36 (1 << 17)
#define CPUID_EXT2_MP (1 << 19)
#define CPUID_EXT2_NX (1 << 20)
#define CPUID_EXT2_MMXEXT (1 << 22)
+#define CPUID_EXT2_MMX (1 << 23)
+#define CPUID_EXT2_FXSR (1 << 24)
#define CPUID_EXT2_FFXSR (1 << 25)
#define CPUID_EXT2_PDPE1GB (1 << 26)
#define CPUID_EXT2_RDTSCP (1 << 27)
@@ -416,6 +448,17 @@
#define CPUID_EXT2_3DNOWEXT (1 << 30)
#define CPUID_EXT2_3DNOW (1 << 31)
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
#define CPUID_EXT3_LAHF_LM (1 << 0)
#define CPUID_EXT3_CMP_LEG (1 << 1)
#define CPUID_EXT3_SVM (1 << 2)
@@ -427,7 +470,9 @@
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
#define CPUID_EXT3_OSVW (1 << 9)
#define CPUID_EXT3_IBS (1 << 10)
+#define CPUID_EXT3_XOP (1 << 11)
#define CPUID_EXT3_SKINIT (1 << 12)
+#define CPUID_EXT3_FMA4 (1 << 16)
#define CPUID_SVM_NPT (1 << 0)
#define CPUID_SVM_LBRV (1 << 1)
@@ -440,6 +485,9 @@
#define CPUID_SVM_PAUSEFILTER (1 << 10)
#define CPUID_SVM_PFTHRESHOLD (1 << 12)
+#define CPUID_7_0_EBX_SMEP (1 << 7)
+#define CPUID_7_0_EBX_SMAP (1 << 20)
+
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
@@ -615,7 +663,7 @@ typedef struct {
#define CPU_NB_REGS CPU_NB_REGS32
#endif
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 3
typedef enum TPRAccess {
TPR_ACCESS_READ,
@@ -745,7 +793,7 @@ typedef struct CPUX86State {
uint32_t cpuid_xlevel2;
uint32_t cpuid_ext4_features;
/* Flags from CPUID[EAX=7,ECX=0].EBX */
- uint32_t cpuid_7_0_ebx;
+ uint32_t cpuid_7_0_ebx_features;
/* MTRRs */
uint64_t mtrr_fixed[11];
@@ -792,7 +840,7 @@ typedef struct CPUX86State {
X86CPU *cpu_x86_init(const char *cpu_model);
int cpu_x86_exec(CPUX86State *s);
-void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void x86_cpudef_setup(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
@@ -948,10 +996,6 @@ void cpu_smm_update(CPUX86State *env);
uint64_t cpu_get_tsc(CPUX86State *env);
X86CPU *pc_new_cpu(const char *cpu_model);
-/* used to debug */
-#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
-#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
-
#define TARGET_PAGE_BITS 12
#ifdef TARGET_X86_64
@@ -977,7 +1021,7 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
-#define cpu_list_id x86_cpu_list
+#define cpu_list x86_cpu_list
#define cpudef_setup x86_cpudef_setup
#define CPU_SAVE_VERSION 12
@@ -985,10 +1029,15 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KSMAP_IDX 2
static inline int cpu_mmu_index (CPUX86State *env)
{
- return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK))
+ ? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
}
#undef EAX
@@ -1074,7 +1123,7 @@ static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
*flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
}
void do_cpu_init(X86CPU *cpu);