KVM: s390: pv: add mmu_notifier

Add an mmu_notifier for protected VMs. The callback function is
triggered when the mm is torn down, and will attempt to convert all
protected vCPUs to non-protected. This allows the mm teardown to use
the destroy page UVC instead of export.

Also make KVM select CONFIG_MMU_NOTIFIER, needed to use mmu_notifiers.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
Reviewed-by: Nico Boehr <nrb@linux.ibm.com>
Link: https://lore.kernel.org/r/20220628135619.32410-10-imbrenda@linux.ibm.com
Message-Id: <20220628135619.32410-10-imbrenda@linux.ibm.com>
[frankja@linux.ibm.com: Conflict resolution for mmu_notifier.h include
and struct kvm_s390_pv]
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
This commit is contained in:
Claudio Imbrenda
2022-06-28 15:56:10 +02:00
committed by Janosch Frank
parent be48d86f77
commit ca2fd0609b
4 changed files with 39 additions and 0 deletions

View File

@@ -20,6 +20,7 @@
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/mmu_notifier.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
@@ -929,6 +930,7 @@ struct kvm_s390_pv {
unsigned long stor_base; unsigned long stor_base;
void *stor_var; void *stor_var;
bool dumping; bool dumping;
struct mmu_notifier mmu_notifier;
}; };
struct kvm_arch{ struct kvm_arch{

View File

@@ -34,6 +34,7 @@ config KVM
select SRCU select SRCU
select KVM_VFIO select KVM_VFIO
select INTERVAL_TREE select INTERVAL_TREE
select MMU_NOTIFIER
help help
Support hosting paravirtualized guest machines using the SIE Support hosting paravirtualized guest machines using the SIE
virtualization capability on the mainframe. This should work virtualization capability on the mainframe. This should work

View File

@@ -31,6 +31,7 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/mmu_notifier.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
@@ -3198,6 +3199,15 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
*/ */
if (kvm_s390_pv_get_handle(kvm)) if (kvm_s390_pv_get_handle(kvm))
kvm_s390_pv_deinit_vm(kvm, &rc, &rrc); kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
/*
* Remove the mmu notifier only when the whole KVM VM is torn down,
* and only if one was registered to begin with. If the VM is
* currently not protected, but has been previously been protected,
* then it's possible that the notifier is still registered.
*/
if (kvm->arch.pv.mmu_notifier.ops)
mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2); free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm)) if (!kvm_is_ucontrol(kvm))

View File

@@ -15,6 +15,7 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <linux/pagewalk.h> #include <linux/pagewalk.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
#include "kvm-s390.h" #include "kvm-s390.h"
static void kvm_s390_clear_pv_state(struct kvm *kvm) static void kvm_s390_clear_pv_state(struct kvm *kvm)
@@ -188,6 +189,26 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO; return -EIO;
} }
static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
struct mm_struct *mm)
{
struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
u16 dummy;
/*
* No locking is needed since this is the last thread of the last user of this
* struct mm.
* When the struct kvm gets deinitialized, this notifier is also
* unregistered. This means that if this notifier runs, then the
* struct kvm is still valid.
*/
kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
}
static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
.release = kvm_s390_pv_mmu_notifier_release,
};
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
{ {
struct uv_cb_cgc uvcb = { struct uv_cb_cgc uvcb = {
@@ -229,6 +250,11 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO; return -EIO;
} }
kvm->arch.gmap->guest_handle = uvcb.guest_handle; kvm->arch.gmap->guest_handle = uvcb.guest_handle;
/* Add the notifier only once. No races because we hold kvm->lock */
if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
}
return 0; return 0;
} }