aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-21 17:59:35 +0300
committerAvi Kivity <avi@redhat.com>2009-06-21 17:59:35 +0300
commit119f9ed1ad0c3f8c4ab78390bc5d0c55b226dc3d (patch)
tree753ddaed428d5c0071fda562639ab0d06aa68609
parent772555c2e57c82f4fe83208d1e77f4d6b4e61466 (diff)
Fix live migration out-of-memorykvm-87rc6kvm-87rc5kvm-87rc4kvm-87rc3
Upsteam commit 9fa06385f6 ("kvm: Mark full address range dirty on live migration start") requests dirty logging on the entire 64-bit address space. This triggered an allocation in qemu-kvm.c for this range, which oomed. Fix by allocating the buffer when we know the actual slot, thus allocating exactly as much memory as necessary. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--libkvm-all.h2
-rw-r--r--qemu-kvm.c16
2 files changed, 10 insertions, 8 deletions
diff --git a/libkvm-all.h b/libkvm-all.h
index 47855be9d..ecd306500 100644
--- a/libkvm-all.h
+++ b/libkvm-all.h
@@ -548,7 +548,7 @@ int kvm_register_phys_mem(kvm_context_t kvm,
unsigned long len, int log);
int kvm_get_dirty_pages(kvm_context_t, unsigned long phys_addr, void *buf);
int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
- unsigned long end_addr, void *buf, void*opaque,
+ unsigned long end_addr, void*opaque,
int (*cb)(unsigned long start, unsigned long len,
void*bitmap, void *opaque));
int kvm_register_coalesced_mmio(kvm_context_t kvm,
diff --git a/qemu-kvm.c b/qemu-kvm.c
index a2131b79c..3105753ac 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -689,21 +689,26 @@ int kvm_get_dirty_pages(kvm_context_t kvm, unsigned long phys_addr, void *buf)
}
int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
- unsigned long len, void *buf, void *opaque,
+ unsigned long len, void *opaque,
int (*cb)(unsigned long start, unsigned long len,
void*bitmap, void *opaque))
{
int i;
int r;
unsigned long end_addr = phys_addr + len;
+ void *buf;
for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
if ((slots[i].len && (uint64_t)slots[i].phys_addr >= phys_addr)
&& ((uint64_t)slots[i].phys_addr + slots[i].len <= end_addr)) {
+ buf = qemu_malloc((slots[i].len / 4096 + 7) / 8 + 2);
r = kvm_get_map(kvm, KVM_GET_DIRTY_LOG, i, buf);
- if (r)
+ if (r) {
+ qemu_free(buf);
return r;
+ }
r = cb(slots[i].phys_addr, slots[i].len, buf, opaque);
+ qemu_free(buf);
if (r)
return r;
}
@@ -2785,7 +2790,7 @@ int kvm_update_dirty_pages_log(void)
r = kvm_get_dirty_pages_range(kvm_context, 0, -1UL,
- kvm_dirty_bitmap, NULL,
+ NULL,
kvm_get_dirty_bitmap_cb);
return r;
}
@@ -2913,17 +2918,14 @@ void kvm_ioperm(CPUState *env, void *data)
int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
{
#ifndef TARGET_IA64
- void *buf;
#ifdef TARGET_I386
if (must_use_aliases_source(start_addr))
return 0;
#endif
- buf = qemu_malloc((end_addr - start_addr) / 8 + 2);
kvm_get_dirty_pages_range(kvm_context, start_addr, end_addr - start_addr,
- buf, NULL, kvm_get_dirty_bitmap_cb);
- qemu_free(buf);
+ NULL, kvm_get_dirty_bitmap_cb);
#endif
return 0;
}