aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-23 14:33:20 +0000
committerAvi Kivity <avi@qumranet.com>2007-01-23 14:33:20 +0000
commit29710616d00f292ae8b8598d15ff96b13b084c06 (patch)
treea726a64115ccacb413cab3667ddd87bac67bbdf8
parente01db5b3ba259da39d9c75c2b5cf4042730fb3d1 (diff)
kvm: release: merge from trunkkvm-12
........ r4319 | avi | 2007-01-23 15:09:03 +0200 (Tue, 23 Jan 2007) | 9 lines From: Joerg Roedel <joerg.roedel@amd.com> This patch implements forwarding of SHUTDOWN intercepts from the guest on to userspace on AMD SVM. A SHUTDOWN event occurs when the guest produces a triple fault (e.g. on reboot). This also fixes the bug that a guest reboot actually causes a host reboot under some circumstances. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> ........ r4320 | avi | 2007-01-23 16:18:22 +0200 (Tue, 23 Jan 2007) | 8 lines kvm: mmu: report nx faults to the guest with the recent guest page fault change, we perform access checks on our own instead of relying on the cpu. this means we have to perform the nx checks as well. software like the google toolbar on windows appears to rely on this somehow. ........
-rw-r--r--kvm/kernel/include/linux/kvm.h1
-rw-r--r--kvm/kernel/mmu.c6
-rw-r--r--kvm/kernel/paging_tmpl.h15
-rw-r--r--kvm/kernel/svm.c15
4 files changed, 34 insertions, 3 deletions
diff --git a/kvm/kernel/include/linux/kvm.h b/kvm/kernel/include/linux/kvm.h
index bc8b4616b..1be148f0f 100644
--- a/kvm/kernel/include/linux/kvm.h
+++ b/kvm/kernel/include/linux/kvm.h
@@ -46,6 +46,7 @@ enum kvm_exit_reason {
KVM_EXIT_HLT = 5,
KVM_EXIT_MMIO = 6,
KVM_EXIT_IRQ_WINDOW_OPEN = 7,
+ KVM_EXIT_SHUTDOWN = 8,
};
/* for KVM_RUN */
diff --git a/kvm/kernel/mmu.c b/kvm/kernel/mmu.c
index a05d0609d..22c426cd8 100644
--- a/kvm/kernel/mmu.c
+++ b/kvm/kernel/mmu.c
@@ -143,6 +143,7 @@ static int dbg = 1;
#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
+#define PFERR_FETCH_MASK (1U << 4)
#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
@@ -168,6 +169,11 @@ static int is_cpuid_PSE36(void)
return 1;
}
+static int is_nx(struct kvm_vcpu *vcpu)
+{
+ return vcpu->shadow_efer & EFER_NX;
+}
+
static int is_present_pte(unsigned long pte)
{
return pte & PT_PRESENT_MASK;
diff --git a/kvm/kernel/paging_tmpl.h b/kvm/kernel/paging_tmpl.h
index afcd2a8f4..149fa45fd 100644
--- a/kvm/kernel/paging_tmpl.h
+++ b/kvm/kernel/paging_tmpl.h
@@ -71,7 +71,7 @@ struct guest_walker {
*/
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
- int write_fault, int user_fault)
+ int write_fault, int user_fault, int fetch_fault)
{
hpa_t hpa;
struct kvm_memory_slot *slot;
@@ -123,6 +123,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
if (user_fault && !(*ptep & PT_USER_MASK))
goto access_error;
+#if PTTYPE == 64
+ if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
+ goto access_error;
+#endif
+
if (!(*ptep & PT_ACCESSED_MASK))
*ptep |= PT_ACCESSED_MASK; /* avoid rmw */
@@ -169,6 +174,8 @@ err:
walker->error_code |= PFERR_WRITE_MASK;
if (user_fault)
walker->error_code |= PFERR_USER_MASK;
+ if (fetch_fault)
+ walker->error_code |= PFERR_FETCH_MASK;
return 0;
}
@@ -372,6 +379,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
+ int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
int fixed;
@@ -388,7 +396,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
/*
* Look up the shadow pte for the faulting address.
*/
- r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault);
+ r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
+ fetch_fault);
/*
* The page is not mapped by the guest. Let the guest handle it.
@@ -437,7 +446,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
pt_element_t guest_pte;
gpa_t gpa;
- FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0);
+ FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
guest_pte = *walker.ptep;
FNAME(release_walker)(&walker);
diff --git a/kvm/kernel/svm.c b/kvm/kernel/svm.c
index 717aabb01..9c70ff65e 100644
--- a/kvm/kernel/svm.c
+++ b/kvm/kernel/svm.c
@@ -502,6 +502,7 @@ static void init_vmcb(struct vmcb *vmcb)
(1ULL << INTERCEPT_IOIO_PROT) |
(1ULL << INTERCEPT_MSR_PROT) |
(1ULL << INTERCEPT_TASK_SWITCH) |
+ (1ULL << INTERCEPT_SHUTDOWN) |
(1ULL << INTERCEPT_VMRUN) |
(1ULL << INTERCEPT_VMMCALL) |
(1ULL << INTERCEPT_VMLOAD) |
@@ -892,6 +893,19 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
}
+static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ /*
+ * VMCB is undefined after a SHUTDOWN intercept
+ * so reinitialize it.
+ */
+ memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+ init_vmcb(vcpu->svm->vmcb);
+
+ kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+ return 0;
+}
+
static int io_get_override(struct kvm_vcpu *vcpu,
struct vmcb_seg **seg,
int *addr_override)
@@ -1249,6 +1263,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
[SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
+ [SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = invalid_op_interception,
[SVM_EXIT_VMMCALL] = invalid_op_interception,
[SVM_EXIT_VMLOAD] = invalid_op_interception,