aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2006-12-31 13:31:38 +0000
committerAvi Kivity <avi@qumranet.com>2006-12-31 13:31:38 +0000
commit387f4cd42289f00f1876c406ecbbb2210f24a00f (patch)
treebaafc99aba5f57c829d817d97d232cb2713119c9
parentad4ec7d7b48d7b2253c877281adaf1f56dc1ac51 (diff)
kvm: release: merge from trunkkvm-9
........ r4179 | dor | 2006-12-26 00:00:01 +0200 (Tue, 26 Dec 2006) | 21 lines Interactivity improvment. This commit improves guest interactivity response, enables windows installation (no-acpu) full formating, fixes tablet usb mouse disapear in linux, solves networking/nfs stuck. It should also but currently broken allow windows to run with apic support. There were two problems in the current implementation: 1. There is a lack of synchronization of the ppr/tpr values with the injected irqs. The cause is that there are scenarios were an irq is taken from the pic/apic and should be injected to the guest. The vmx/svm can be in interrupt window close state (due to mov ss for example) or the cpu migth receive a physical interrupt that has priority over virtual interrupts. In this case the ppr was changed but the guest did not receive the irq and migth re-enter the vmx/svm mode -> no synchronization between the both 2. The kvm module migth loop inside the kernel while qemu may want to inject a pending interrupt that was'nt injected before due to if fag down. These problems are addressed here. There is a new env variable called ready_for_interrupt_injection. It tells qemu wether the kvm is ready for interrupts. If it is false qemu does not take irqs from pic/apic and ask kvm to come back as soon as possible (request_interrupt_window flag). More over, interrupts are injected one at a time. If a virtual irq failed to inject the ready_for_interrupt_injection turned to false. ........ r4185 | dor | 2006-12-26 17:40:23 +0200 (Tue, 26 Dec 2006) | 19 lines Interactivity improvment: - Fix handling of halt instruction: Previously an idle host consumed 100% cpu. The solution is first do not exit kvm if there is irq pending within the kvm (even if if flag is unset). Second, ignore the ready_for_interrupt_injection in qemu. Now it's about 2.5% for Windows guest. - Add some statistic counters. - Fix comments from Avi: - Inceremnted The API version. - Structure alignment - Rename kvm_run->tpr to cr8 - Fix comments style - Get read of Qemu name inside comments within the kernel (use user space instead) - delete unused save_regs function inside qemu-kvm.c - Unit push_interrupts & *ready_for_interrupt_injection functions into one single function called try_push_interrupts. - Rename post_kvm_run_save to post_kvm_run - Use kvm_exit_reason structure for the KVM_EXIT_IRQ_WINDOW_OPEN cause. ........ r4188 | dor | 2006-12-26 18:35:21 +0200 (Tue, 26 Dec 2006) | 4 lines Interactivity improvment: Update interrupt_window_variable after opening it. It is done allready for Intel. This reduces the halt_exits on AMD. ........ r4209 | uri | 2006-12-28 18:29:15 +0200 (Thu, 28 Dec 2006) | 6 lines qemu migration: sync qemu-kvm state (call kvm_save_registers) upon a savevm Since the interactivity improvement (r 4185) we no longer sync qemu-kvm for each kvm_cpu_exec(). Now we have to do it now upon a savevm. ........ r4212 | uri | 2006-12-28 21:26:29 +0200 (Thu, 28 Dec 2006) | 4 lines qemu migration: call kvm_save_registers() only if using kvm Related to revision 4209. ........ r4213 | uri | 2006-12-28 21:56:34 +0200 (Thu, 28 Dec 2006) | 2 lines web: migration: added table-of-contents ........ r4216 | avi | 2006-12-31 15:30:08 +0200 (Sun, 31 Dec 2006) | 2 lines kvm: undo whitespace changes to reduce lkml patch size ........ r4217 | avi | 2006-12-31 15:30:21 +0200 (Sun, 31 Dec 2006) | 2 lines kvm: clean up some comments ........
-rw-r--r--kvm/kernel/include/linux/kvm.h11
-rw-r--r--kvm/kernel/kvm.h4
-rw-r--r--kvm/kernel/kvm_main.c11
-rw-r--r--kvm/kernel/svm.c94
-rw-r--r--kvm/kernel/vmx.c88
-rw-r--r--kvm/user/kvmctl.c17
-rw-r--r--kvm/user/kvmctl.h2
-rw-r--r--qemu-kvm.c69
-rw-r--r--qemu-kvm.h1
-rw-r--r--target-i386/cpu.h1
-rw-r--r--target-i386/helper2.c3
-rw-r--r--vl.c7
12 files changed, 248 insertions, 60 deletions
diff --git a/kvm/kernel/include/linux/kvm.h b/kvm/kernel/include/linux/kvm.h
index 28fdce1ac..bc8b4616b 100644
--- a/kvm/kernel/include/linux/kvm.h
+++ b/kvm/kernel/include/linux/kvm.h
@@ -11,7 +11,7 @@
#include <asm/types.h>
#include <linux/ioctl.h>
-#define KVM_API_VERSION 1
+#define KVM_API_VERSION 2
/*
* Architectural interrupt line count, and the size of the bitmap needed
@@ -45,6 +45,7 @@ enum kvm_exit_reason {
KVM_EXIT_DEBUG = 4,
KVM_EXIT_HLT = 5,
KVM_EXIT_MMIO = 6,
+ KVM_EXIT_IRQ_WINDOW_OPEN = 7,
};
/* for KVM_RUN */
@@ -53,11 +54,19 @@ struct kvm_run {
__u32 vcpu;
__u32 emulated; /* skip current instruction */
__u32 mmio_completed; /* mmio request completed */
+ __u8 request_interrupt_window;
+ __u8 padding1[3];
/* out */
__u32 exit_type;
__u32 exit_reason;
__u32 instruction_length;
+ __u8 ready_for_interrupt_injection;
+ __u8 if_flag;
+ __u16 padding2;
+ __u64 cr8;
+ __u64 apic_base;
+
union {
/* KVM_EXIT_UNKNOWN */
struct {
diff --git a/kvm/kernel/kvm.h b/kvm/kernel/kvm.h
index 100df6f38..32023d1ac 100644
--- a/kvm/kernel/kvm.h
+++ b/kvm/kernel/kvm.h
@@ -173,6 +173,7 @@ struct kvm_vcpu {
struct mutex mutex;
int cpu;
int launched;
+ int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
unsigned long irq_pending[NR_IRQ_WORDS];
@@ -247,6 +248,9 @@ struct kvm_stat {
u32 io_exits;
u32 mmio_exits;
u32 signal_exits;
+ u32 irq_window_exits;
+ u32 halt_exits;
+ u32 request_irq_exits;
u32 irq_exits;
};
diff --git a/kvm/kernel/kvm_main.c b/kvm/kernel/kvm_main.c
index b54caf0ce..aca14139a 100644
--- a/kvm/kernel/kvm_main.c
+++ b/kvm/kernel/kvm_main.c
@@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item {
{ "io_exits", &kvm_stat.io_exits },
{ "mmio_exits", &kvm_stat.mmio_exits },
{ "signal_exits", &kvm_stat.signal_exits },
+ { "irq_window", &kvm_stat.irq_window_exits },
+ { "halt_exits", &kvm_stat.halt_exits },
+ { "request_irq", &kvm_stat.request_irq_exits },
{ "irq_exits", &kvm_stat.irq_exits },
{ 0, 0 }
};
@@ -1693,12 +1696,12 @@ static long kvm_dev_ioctl(struct file *filp,
if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
goto out;
r = kvm_dev_ioctl_run(kvm, &kvm_run);
- if (r < 0)
+ if (r < 0 && r != -EINTR)
goto out;
- r = -EFAULT;
- if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run))
+ if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) {
+ r = -EFAULT;
goto out;
- r = 0;
+ }
break;
}
case KVM_GET_REGS: {
diff --git a/kvm/kernel/svm.c b/kvm/kernel/svm.c
index fa0428735..855207a9b 100644
--- a/kvm/kernel/svm.c
+++ b/kvm/kernel/svm.c
@@ -235,6 +235,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+
+ vcpu->interrupt_window_open = 1;
}
static int has_svm(void)
@@ -1031,10 +1033,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF))
+ if (vcpu->irq_summary)
return 1;
kvm_run->exit_reason = KVM_EXIT_HLT;
+ ++kvm_stat.halt_exits;
return 0;
}
@@ -1186,6 +1189,24 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return rdmsr_interception(vcpu, kvm_run);
}
+static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ /*
+ * If the user space waits to inject interrupts, exit as soon as
+ * possible
+ */
+ if (kvm_run->request_interrupt_window &&
+ !vcpu->irq_summary &&
+ (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) {
+ ++kvm_stat.irq_window_exits;
+ kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+ return 0;
+ }
+
+ return 1;
+}
+
static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run) = {
[SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -1210,6 +1231,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
[SVM_EXIT_NMI] = nop_on_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
+ [SVM_EXIT_VINTR] = interrupt_window_interception,
/* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_HLT] = halt_interception,
@@ -1278,15 +1300,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
}
-static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
+static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
{
struct vmcb_control_area *control;
- if (!vcpu->irq_summary)
- return;
-
control = &vcpu->svm->vmcb->control;
-
control->int_vector = pop_irq(vcpu);
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
@@ -1301,6 +1319,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
control->int_ctl &= ~V_IRQ_MASK;
push_irq(vcpu, control->int_vector);
}
+
+ vcpu->interrupt_window_open =
+ !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
+}
+
+static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
+
+ vcpu->interrupt_window_open =
+ (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+ (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
+
+ if (vcpu->interrupt_window_open && vcpu->irq_summary)
+ /*
+ * If interrupts enabled, and not blocked by sti or mov ss. Good.
+ */
+ kvm_do_inject_irq(vcpu);
+
+ /*
+ * Interrupts blocked. Wait for unblock.
+ */
+ if (!vcpu->interrupt_window_open &&
+ (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+ control->intercept |= 1ULL << INTERCEPT_VINTR;
+ } else
+ control->intercept &= ~(1ULL << INTERCEPT_VINTR);
+}
+
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
+ vcpu->irq_summary == 0);
+ kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
+ kvm_run->cr8 = vcpu->cr8;
+ kvm_run->apic_base = vcpu->apic_base;
+}
+
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ return (!vcpu->irq_summary &&
+ kvm_run->request_interrupt_window &&
+ vcpu->interrupt_window_open &&
+ (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
}
static void save_db_regs(unsigned long *db_regs)
@@ -1326,7 +1397,7 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u16 ldt_selector;
again:
- kvm_try_inject_irq(vcpu);
+ do_interrupt_requests(vcpu, kvm_run);
clgi();
@@ -1487,17 +1558,26 @@ again:
if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
+ post_kvm_run_save(vcpu, kvm_run);
return 0;
}
if (handle_exit(vcpu, kvm_run)) {
if (signal_pending(current)) {
++kvm_stat.signal_exits;
+ post_kvm_run_save(vcpu, kvm_run);
+ return -EINTR;
+ }
+
+ if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ ++kvm_stat.request_irq_exits;
+ post_kvm_run_save(vcpu, kvm_run);
return -EINTR;
}
kvm_resched(vcpu);
goto again;
}
+ post_kvm_run_save(vcpu, kvm_run);
return 0;
}
diff --git a/kvm/kernel/vmx.c b/kvm/kernel/vmx.c
index 2d204fd45..c55635ddf 100644
--- a/kvm/kernel/vmx.c
+++ b/kvm/kernel/vmx.c
@@ -263,6 +263,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
if (interruptibility & 3)
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
interruptibility & ~3);
+ vcpu->interrupt_window_open = 1;
}
static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -1214,21 +1215,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
+
+static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
{
- if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)
- && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
+ u32 cpu_based_vm_exec_control;
+
+ vcpu->interrupt_window_open =
+ ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+
+ if (vcpu->interrupt_window_open &&
+ vcpu->irq_summary &&
+ !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
/*
- * Interrupts enabled, and not blocked by sti or mov ss. Good.
+ * If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
kvm_do_inject_irq(vcpu);
- else
+
+ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ if (!vcpu->interrupt_window_open &&
+ (vcpu->irq_summary || kvm_run->request_interrupt_window))
/*
* Interrupts blocked. Wait for unblock.
*/
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
- | CPU_BASED_VIRTUAL_INTR_PENDING);
+ cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+ else
+ cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
@@ -1565,23 +1579,41 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
+ kvm_run->cr8 = vcpu->cr8;
+ kvm_run->apic_base = vcpu->apic_base;
+ kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
+ vcpu->irq_summary == 0);
+}
+
static int handle_interrupt_window(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
- /* Turn off interrupt window reporting. */
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
- & ~CPU_BASED_VIRTUAL_INTR_PENDING);
+ /*
+ * If the user space waits to inject interrupts, exit as soon as
+ * possible
+ */
+ if (kvm_run->request_interrupt_window &&
+ !vcpu->irq_summary &&
+ (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) {
+ kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+ ++kvm_stat.irq_window_exits;
+ return 0;
+ }
return 1;
}
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF))
+ if (vcpu->irq_summary)
return 1;
kvm_run->exit_reason = KVM_EXIT_HLT;
+ ++kvm_stat.halt_exits;
return 0;
}
@@ -1632,6 +1664,21 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return 0;
}
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ return (!vcpu->irq_summary &&
+ kvm_run->request_interrupt_window &&
+ vcpu->interrupt_window_open &&
+ (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u8 fail;
@@ -1663,9 +1710,7 @@ again:
vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
#endif
- if (vcpu->irq_summary &&
- !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
- kvm_try_inject_irq(vcpu);
+ do_interrupt_requests(vcpu, kvm_run);
if (vcpu->guest_debug.enabled)
kvm_guest_debug_pre(vcpu);
@@ -1802,6 +1847,7 @@ again:
fx_save(vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image);
+ vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
#ifndef CONFIG_X86_64
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
@@ -1834,12 +1880,22 @@ again:
/* Give scheduler a change to reschedule. */
if (signal_pending(current)) {
++kvm_stat.signal_exits;
+ post_kvm_run_save(vcpu, kvm_run);
+ return -EINTR;
+ }
+
+ if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ ++kvm_stat.request_irq_exits;
+ post_kvm_run_save(vcpu, kvm_run);
return -EINTR;
}
+
kvm_resched(vcpu);
goto again;
}
}
+
+ post_kvm_run_save(vcpu, kvm_run);
return 0;
}
diff --git a/kvm/user/kvmctl.c b/kvm/user/kvmctl.c
index 2837cf2fe..dbef7565e 100644
--- a/kvm/user/kvmctl.c
+++ b/kvm/user/kvmctl.c
@@ -23,7 +23,7 @@
#include <errno.h>
#include "kvmctl.h"
-#define EXPECTED_KVM_API_VERSION 1
+#define EXPECTED_KVM_API_VERSION 2
#if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
#error libkvm: userspace and kernel version mismatch
@@ -522,6 +522,16 @@ static int handle_halt(kvm_context_t kvm, struct kvm_run *kvm_run)
return kvm->callbacks->halt(kvm->opaque, kvm_run->vcpu);
}
+int try_push_interrupts(kvm_context_t kvm)
+{
+ return kvm->callbacks->try_push_interrupts(kvm->opaque);
+}
+
+static void post_kvm_run(kvm_context_t kvm, struct kvm_run *kvm_run)
+{
+ kvm->callbacks->post_kvm_run(kvm->opaque, kvm_run);
+}
+
int kvm_run(kvm_context_t kvm, int vcpu)
{
int r;
@@ -533,7 +543,10 @@ int kvm_run(kvm_context_t kvm, int vcpu)
};
again:
+ kvm_run.request_interrupt_window = try_push_interrupts(kvm);
r = ioctl(fd, KVM_RUN, &kvm_run);
+ post_kvm_run(kvm, &kvm_run);
+
kvm_run.emulated = 0;
kvm_run.mmio_completed = 0;
if (r == -1 && errno != EINTR) {
@@ -579,6 +592,8 @@ again:
case KVM_EXIT_HLT:
r = handle_halt(kvm, &kvm_run);
break;
+ case KVM_EXIT_IRQ_WINDOW_OPEN:
+ break;
default:
fprintf(stderr, "unhandled vm exit: 0x%x\n", kvm_run.exit_reason);
kvm_show_regs(kvm, vcpu);
diff --git a/kvm/user/kvmctl.h b/kvm/user/kvmctl.h
index 49888a39d..384b50fae 100644
--- a/kvm/user/kvmctl.h
+++ b/kvm/user/kvmctl.h
@@ -60,6 +60,8 @@ struct kvm_callbacks {
*/
int (*halt)(void *opaque, int vcpu);
int (*io_window)(void *opaque);
+ int (*try_push_interrupts)(void *opaque);
+ void (*post_kvm_run)(void *opaque, struct kvm_run *kvm_run);
};
/*!
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 8620e25b0..b5eee9361 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -222,6 +222,7 @@ static void load_regs(CPUState *env)
perror("kvm_set_msrs FAILED");
}
+
static void save_regs(CPUState *env)
{
struct kvm_regs regs;
@@ -358,34 +359,32 @@ static void save_regs(CPUState *env)
#include <signal.h>
-static int kvm_interrupt_pending(CPUState *env)
+
+static int try_push_interrupts(void *opaque)
{
- int i;
+ CPUState **envs = opaque, *env;
+ env = envs[0];
- for (i = 0; i < NR_IRQ_WORDS; ++i)
- if (env->kvm_interrupt_bitmap[i])
- return 1;
- return 0;
+ if (env->ready_for_interrupt_injection &&
+ (env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) {
+ env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ // for now using cpu 0
+ kvm_inject_irq(kvm_context, 0, cpu_get_pic_interrupt(env));
+ }
+
+ return (env->interrupt_request & CPU_INTERRUPT_HARD) != 0;
}
-static inline void push_interrupts(CPUState *env)
+static void post_kvm_run(void *opaque, struct kvm_run *kvm_run)
{
- if (!(env->interrupt_request & CPU_INTERRUPT_HARD) ||
- !(env->eflags & IF_MASK) || kvm_interrupt_pending(env)) {
- if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
- env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
- env->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit();
- }
- return;
- }
-
- do {
- env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ CPUState **envs = opaque, *env;
+ env = envs[0];
- // for now using cpu 0
- kvm_inject_irq(kvm_context, 0, cpu_get_pic_interrupt(env));
- } while ( (env->interrupt_request & CPU_INTERRUPT_HARD) && (env->cr[0] & CR0_PG_MASK) );
+ env->eflags = (kvm_run->if_flag) ? env->eflags | IF_MASK:env->eflags & ~IF_MASK;
+ env->ready_for_interrupt_injection = kvm_run->ready_for_interrupt_injection;
+ cpu_set_apic_tpr(env, kvm_run->cr8);
+ cpu_set_apic_base(env, kvm_run->apic_base);
}
void kvm_load_registers(CPUState *env)
@@ -393,18 +392,29 @@ void kvm_load_registers(CPUState *env)
load_regs(env);
}
+void kvm_save_registers(CPUState *env)
+{
+ save_regs(env);
+}
+
int kvm_cpu_exec(CPUState *env)
{
+ int pending = (!env->ready_for_interrupt_injection ||
+ ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)));
- push_interrupts(env);
+ if (!pending && (env->interrupt_request & CPU_INTERRUPT_EXIT)) {
+ env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
+ env->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit();
+ }
+
if (!saved_env[0])
saved_env[0] = env;
kvm_run(kvm_context, 0);
- save_regs(env);
-
return 0;
}
@@ -461,7 +471,6 @@ static int kvm_debug(void *opaque, int vcpu)
CPUState **envs = opaque;
env = envs[0];
- save_regs(env);
env->exception_index = EXCP_DEBUG;
return 1;
}
@@ -561,14 +570,12 @@ static int kvm_halt(void *opaque, int vcpu)
CPUState **envs = opaque, *env;
env = envs[0];
- save_regs(env);
-
- if (!((kvm_interrupt_pending(env) ||
- (env->interrupt_request & CPU_INTERRUPT_HARD)) &&
+ if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK))) {
env->hflags |= HF_HALTED_MASK;
env->exception_index = EXCP_HLT;
}
+
return 1;
}
@@ -591,6 +598,8 @@ static struct kvm_callbacks qemu_kvm_ops = {
.writeq = kvm_writeq,
.halt = kvm_halt,
.io_window = kvm_io_window,
+ .try_push_interrupts = try_push_interrupts,
+ .post_kvm_run = post_kvm_run,
};
int kvm_qemu_init()
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 4fc6de9f6..d512c9743 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -7,6 +7,7 @@ int kvm_qemu_init(void);
int kvm_qemu_create_context(void);
void kvm_qemu_destroy(void);
void kvm_load_registers(CPUState *env);
+void kvm_save_registers(CPUState *env);
int kvm_cpu_exec(CPUState *env);
int kvm_update_debugger(CPUState *env);
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 4aba1b32f..61a209e24 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -494,6 +494,7 @@ typedef struct CPUX86State {
#ifdef USE_KVM
uint64_t tsc; /* time stamp counter */
+ uint8_t ready_for_interrupt_injection;
#endif
uint64_t pat;
diff --git a/target-i386/helper2.c b/target-i386/helper2.c
index 19af159f9..99f914d07 100644
--- a/target-i386/helper2.c
+++ b/target-i386/helper2.c
@@ -141,6 +141,9 @@ CPUX86State *cpu_x86_init(void)
#ifdef USE_KQEMU
kqemu_init(env);
#endif
+#ifdef USE_KVM
+ env->ready_for_interrupt_injection = 1;
+#endif
return env;
}
diff --git a/vl.c b/vl.c
index 988893755..2a7b79739 100644
--- a/vl.c
+++ b/vl.c
@@ -4489,7 +4489,12 @@ void cpu_save(QEMUFile *f, void *opaque)
uint16_t fptag, fpus, fpuc, fpregs_format;
uint32_t hflags;
int i;
-
+
+#ifdef USE_KVM
+ if (kvm_allowed)
+ kvm_save_registers(env);
+#endif
+
for(i = 0; i < CPU_NB_REGS; i++)
qemu_put_betls(f, &env->regs[i]);
qemu_put_betls(f, &env->eip);