KVM: PPC: Book3s HV: Implement get_dirty_log using hardware changed bit
This changes the implementation of kvm_vm_ioctl_get_dirty_log() for Book3s HV guests to use the hardware C (changed) bits in the guest hashed page table. Since this makes the implementation quite different from the Book3s PR case, this moves the existing implementation from book3s.c to book3s_pr.c and creates a new implementation in book3s_hv.c. That implementation calls kvmppc_hv_get_dirty_log() to do the actual work by calling kvm_test_clear_dirty on each page. It iterates over the HPTEs, clearing the C bit if set, and returns 1 if any C bit was set (including the saved C bit in the rmap entry). Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
5551489373
commit
82ed36164c
5 changed files with 147 additions and 39 deletions
|
@ -156,6 +156,8 @@ extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
long pte_index, unsigned long pteh, unsigned long ptel);
|
||||
extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
long pte_index, unsigned long pteh, unsigned long ptel);
|
||||
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
|
|
|
@ -477,45 +477,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get (and clear) the dirty memory log for a memory slot.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm_vcpu *vcpu;
|
||||
ulong ga, ga_end;
|
||||
int is_dirty = 0;
|
||||
int r;
|
||||
unsigned long n;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
/* If nothing is dirty, don't bother messing with page tables. */
|
||||
if (is_dirty) {
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
|
||||
ga = memslot->base_gfn << PAGE_SHIFT;
|
||||
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
||||
|
||||
kvm_for_each_vcpu(n, vcpu, kvm)
|
||||
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
|
||||
|
||||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
memset(memslot->dirty_bitmap, 0, n);
|
||||
}
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_decrementer_func(unsigned long data)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
|
|
@ -870,6 +870,75 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|||
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
|
||||
}
|
||||
|
||||
static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
|
||||
{
|
||||
struct revmap_entry *rev = kvm->arch.revmap;
|
||||
unsigned long head, i, j;
|
||||
unsigned long *hptep;
|
||||
int ret = 0;
|
||||
|
||||
retry:
|
||||
lock_rmap(rmapp);
|
||||
if (*rmapp & KVMPPC_RMAP_CHANGED) {
|
||||
*rmapp &= ~KVMPPC_RMAP_CHANGED;
|
||||
ret = 1;
|
||||
}
|
||||
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
|
||||
unlock_rmap(rmapp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
i = head = *rmapp & KVMPPC_RMAP_INDEX;
|
||||
do {
|
||||
hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
|
||||
j = rev[i].forw;
|
||||
|
||||
if (!(hptep[1] & HPTE_R_C))
|
||||
continue;
|
||||
|
||||
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
|
||||
/* unlock rmap before spinning on the HPTE lock */
|
||||
unlock_rmap(rmapp);
|
||||
while (hptep[0] & HPTE_V_HVLOCK)
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* Now check and modify the HPTE */
|
||||
if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
|
||||
/* need to make it temporarily absent to clear C */
|
||||
hptep[0] |= HPTE_V_ABSENT;
|
||||
kvmppc_invalidate_hpte(kvm, hptep, i);
|
||||
hptep[1] &= ~HPTE_R_C;
|
||||
eieio();
|
||||
hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
|
||||
rev[i].guest_rpte |= HPTE_R_C;
|
||||
ret = 1;
|
||||
}
|
||||
hptep[0] &= ~HPTE_V_HVLOCK;
|
||||
} while ((i = j) != head);
|
||||
|
||||
unlock_rmap(rmapp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long *rmapp, *map;
|
||||
|
||||
preempt_disable();
|
||||
rmapp = memslot->rmap;
|
||||
map = memslot->dirty_bitmap;
|
||||
for (i = 0; i < memslot->npages; ++i) {
|
||||
if (kvm_test_clear_dirty(kvm, rmapp))
|
||||
__set_bit_le(i, map);
|
||||
++rmapp;
|
||||
}
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
||||
unsigned long *nb_ret)
|
||||
{
|
||||
|
|
|
@ -1072,6 +1072,43 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
|
|||
return fd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get (and clear) the dirty memory log for a memory slot.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
int r;
|
||||
unsigned long n;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = -EINVAL;
|
||||
if (log->slot >= KVM_MEMORY_SLOTS)
|
||||
goto out;
|
||||
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
r = -ENOENT;
|
||||
if (!memslot->dirty_bitmap)
|
||||
goto out;
|
||||
|
||||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
memset(memslot->dirty_bitmap, 0, n);
|
||||
|
||||
r = kvmppc_hv_get_dirty_log(kvm, memslot);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
|
||||
goto out;
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static unsigned long slb_pgsize_encoding(unsigned long psize)
|
||||
{
|
||||
unsigned long senc = 0;
|
||||
|
|
|
@ -1056,6 +1056,45 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get (and clear) the dirty memory log for a memory slot.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm_vcpu *vcpu;
|
||||
ulong ga, ga_end;
|
||||
int is_dirty = 0;
|
||||
int r;
|
||||
unsigned long n;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
/* If nothing is dirty, don't bother messing with page tables. */
|
||||
if (is_dirty) {
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
|
||||
ga = memslot->base_gfn << PAGE_SHIFT;
|
||||
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
||||
|
||||
kvm_for_each_vcpu(n, vcpu, kvm)
|
||||
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
|
||||
|
||||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
memset(memslot->dirty_bitmap, 0, n);
|
||||
}
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue