240 lines
7.7 KiB
Diff
240 lines
7.7 KiB
Diff
|
From fe22151c95c02c6bb145ea6c3685941e8fb09d60 Mon Sep 17 00:00:00 2001
|
||
|
From: Yangbo Lu <yangbo.lu@nxp.com>
|
||
|
Date: Thu, 5 Jul 2018 17:43:16 +0800
|
||
|
Subject: [PATCH 32/32] kvm: support layerscape
|
||
|
|
||
|
This is an integrated patch for layerscape kvm support.
|
||
|
|
||
|
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
||
|
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
|
||
|
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
|
||
|
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
|
||
|
---
|
||
|
arch/arm/include/asm/kvm_mmu.h | 3 +-
|
||
|
arch/arm/kvm/mmu.c | 56 ++++++++++++++++++++++++++++++--
|
||
|
arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
|
||
|
virt/kvm/arm/vgic/vgic-its.c | 24 +++++++++++---
|
||
|
virt/kvm/arm/vgic/vgic-v2.c | 3 +-
|
||
|
5 files changed, 88 insertions(+), 12 deletions(-)
|
||
|
|
||
|
--- a/arch/arm/include/asm/kvm_mmu.h
|
||
|
+++ b/arch/arm/include/asm/kvm_mmu.h
|
||
|
@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
|
||
|
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||
|
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||
|
- phys_addr_t pa, unsigned long size, bool writable);
|
||
|
+ phys_addr_t pa, unsigned long size, bool writable,
|
||
|
+ pgprot_t prot);
|
||
|
|
||
|
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||
|
|
||
|
--- a/arch/arm/kvm/mmu.c
|
||
|
+++ b/arch/arm/kvm/mmu.c
|
||
|
@@ -1020,9 +1020,11 @@ static int stage2_pmdp_test_and_clear_yo
|
||
|
* @guest_ipa: The IPA at which to insert the mapping
|
||
|
* @pa: The physical address of the device
|
||
|
* @size: The size of the mapping
|
||
|
+ * @prot: S2 page translation bits
|
||
|
*/
|
||
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||
|
- phys_addr_t pa, unsigned long size, bool writable)
|
||
|
+ phys_addr_t pa, unsigned long size, bool writable,
|
||
|
+ pgprot_t prot)
|
||
|
{
|
||
|
phys_addr_t addr, end;
|
||
|
int ret = 0;
|
||
|
@@ -1033,7 +1035,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
|
||
|
pfn = __phys_to_pfn(pa);
|
||
|
|
||
|
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
|
||
|
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
|
||
|
+ pte_t pte = pfn_pte(pfn, prot);
|
||
|
|
||
|
if (writable)
|
||
|
pte = kvm_s2pte_mkwrite(pte);
|
||
|
@@ -1057,6 +1059,30 @@ out:
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
+#ifdef CONFIG_ARM64
|
||
|
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
|
||
|
+{
|
||
|
+ switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
|
||
|
+ case PTE_ATTRINDX(MT_DEVICE_nGnRE):
|
||
|
+ case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
|
||
|
+ case PTE_ATTRINDX(MT_DEVICE_GRE):
|
||
|
+ return PAGE_S2_DEVICE;
|
||
|
+ case PTE_ATTRINDX(MT_NORMAL_NC):
|
||
|
+ case PTE_ATTRINDX(MT_NORMAL):
|
||
|
+ return (pgprot_val(prot) & PTE_SHARED)
|
||
|
+ ? PAGE_S2
|
||
|
+ : PAGE_S2_NS;
|
||
|
+ }
|
||
|
+
|
||
|
+ return PAGE_S2_DEVICE;
|
||
|
+}
|
||
|
+#else
|
||
|
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
|
||
|
+{
|
||
|
+ return PAGE_S2_DEVICE;
|
||
|
+}
|
||
|
+#endif
|
||
|
+
|
||
|
static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
|
||
|
{
|
||
|
kvm_pfn_t pfn = *pfnp;
|
||
|
@@ -1308,6 +1334,19 @@ static int user_mem_abort(struct kvm_vcp
|
||
|
hugetlb = true;
|
||
|
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||
|
} else {
|
||
|
+ if (!is_vm_hugetlb_page(vma)) {
|
||
|
+ pte_t *pte;
|
||
|
+ spinlock_t *ptl;
|
||
|
+ pgprot_t prot;
|
||
|
+
|
||
|
+ pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
|
||
|
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
|
||
|
+ pte_unmap_unlock(pte, ptl);
|
||
|
+#ifdef CONFIG_ARM64
|
||
|
+ if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
|
||
|
+ mem_type = PAGE_S2_NS;
|
||
|
+#endif
|
||
|
+ }
|
||
|
/*
|
||
|
* Pages belonging to memslots that don't have the same
|
||
|
* alignment for userspace and IPA cannot be mapped using
|
||
|
@@ -1345,6 +1384,11 @@ static int user_mem_abort(struct kvm_vcp
|
||
|
if (is_error_noslot_pfn(pfn))
|
||
|
return -EFAULT;
|
||
|
|
||
|
+#ifdef CONFIG_ARM64
|
||
|
+ if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
|
||
|
+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
|
||
|
+ } else
|
||
|
+#endif
|
||
|
if (kvm_is_device_pfn(pfn)) {
|
||
|
mem_type = PAGE_S2_DEVICE;
|
||
|
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
|
||
|
@@ -1882,6 +1926,9 @@ int kvm_arch_prepare_memory_region(struc
|
||
|
gpa_t gpa = mem->guest_phys_addr +
|
||
|
(vm_start - mem->userspace_addr);
|
||
|
phys_addr_t pa;
|
||
|
+ pgprot_t prot;
|
||
|
+ pte_t *pte;
|
||
|
+ spinlock_t *ptl;
|
||
|
|
||
|
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
|
||
|
pa += vm_start - vma->vm_start;
|
||
|
@@ -1891,10 +1938,13 @@ int kvm_arch_prepare_memory_region(struc
|
||
|
ret = -EINVAL;
|
||
|
goto out;
|
||
|
}
|
||
|
+ pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
|
||
|
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
|
||
|
+ pte_unmap_unlock(pte, ptl);
|
||
|
|
||
|
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
||
|
vm_end - vm_start,
|
||
|
- writable);
|
||
|
+ writable, prot);
|
||
|
if (ret)
|
||
|
break;
|
||
|
}
|
||
|
--- a/arch/arm64/include/asm/kvm_mmu.h
|
||
|
+++ b/arch/arm64/include/asm/kvm_mmu.h
|
||
|
@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
|
||
|
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||
|
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||
|
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||
|
- phys_addr_t pa, unsigned long size, bool writable);
|
||
|
+ phys_addr_t pa, unsigned long size, bool writable,
|
||
|
+ pgprot_t prot);
|
||
|
|
||
|
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||
|
|
||
|
@@ -274,8 +275,15 @@ static inline void __coherent_cache_gues
|
||
|
|
||
|
static inline void __kvm_flush_dcache_pte(pte_t pte)
|
||
|
{
|
||
|
- struct page *page = pte_page(pte);
|
||
|
- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
|
||
|
+ if (pfn_valid(pte_pfn(pte))) {
|
||
|
+ struct page *page = pte_page(pte);
|
||
|
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
|
||
|
+ } else {
|
||
|
+ void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
|
||
|
+
|
||
|
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
|
||
|
+ iounmap(va);
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
|
||
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
||
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
||
|
@@ -176,6 +176,8 @@ static struct its_itte *find_itte(struct
|
||
|
|
||
|
#define GIC_LPI_OFFSET 8192
|
||
|
|
||
|
+#define VITS_TYPER_DEVBITS 17
|
||
|
+
|
||
|
/*
|
||
|
* Finds and returns a collection in the ITS collection table.
|
||
|
* Must be called with the its_lock mutex held.
|
||
|
@@ -375,7 +377,7 @@ static unsigned long vgic_mmio_read_its_
|
||
|
* To avoid memory waste in the guest, we keep the number of IDBits and
|
||
|
* DevBits low - as least for the time being.
|
||
|
*/
|
||
|
- reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
|
||
|
+ reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
|
||
|
reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
|
||
|
|
||
|
return extract_bytes(reg, addr & 7, len);
|
||
|
@@ -601,16 +603,30 @@ static int vgic_its_cmd_handle_movi(stru
|
||
|
* Check whether an ID can be stored into the corresponding guest table.
|
||
|
* For a direct table this is pretty easy, but gets a bit nasty for
|
||
|
* indirect tables. We check whether the resulting guest physical address
|
||
|
- * is actually valid (covered by a memslot and guest accessbible).
|
||
|
+ * is actually valid (covered by a memslot and guest accessible).
|
||
|
* For this we have to read the respective first level entry.
|
||
|
*/
|
||
|
-static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
|
||
|
+static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id)
|
||
|
{
|
||
|
int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
|
||
|
+ u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
|
||
|
int index;
|
||
|
- u64 indirect_ptr;
|
||
|
gfn_t gfn;
|
||
|
|
||
|
+ switch (type) {
|
||
|
+ case GITS_BASER_TYPE_DEVICE:
|
||
|
+ if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
|
||
|
+ return false;
|
||
|
+ break;
|
||
|
+ case GITS_BASER_TYPE_COLLECTION:
|
||
|
+ /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
|
||
|
+ if (id >= BIT_ULL(16))
|
||
|
+ return false;
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ return false;
|
||
|
+ }
|
||
|
+
|
||
|
if (!(baser & GITS_BASER_INDIRECT)) {
|
||
|
phys_addr_t addr;
|
||
|
|
||
|
--- a/virt/kvm/arm/vgic/vgic-v2.c
|
||
|
+++ b/virt/kvm/arm/vgic/vgic-v2.c
|
||
|
@@ -290,7 +290,8 @@ int vgic_v2_map_resources(struct kvm *kv
|
||
|
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||
|
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||
|
kvm_vgic_global_state.vcpu_base,
|
||
|
- KVM_VGIC_V2_CPU_SIZE, true);
|
||
|
+ KVM_VGIC_V2_CPU_SIZE, true,
|
||
|
+ PAGE_S2_DEVICE);
|
||
|
if (ret) {
|
||
|
kvm_err("Unable to remap VGIC CPU to VCPU\n");
|
||
|
goto out;
|