Skip to content

Commit c205cb1

Browse files
davidhildenbrandroxanan1996
authored andcommitted
drivers/virt/acrn: fix PFNMAP PTE checks in acrn_vm_ram_map()
BugLink: https://bugs.launchpad.net/bugs/2072617 [ Upstream commit 3d6586008f7b638f91f3332602592caa8b00b559 ] Patch series "mm: follow_pte() improvements and acrn follow_pte() fixes". Patch #1 fixes a bunch of issues I spotted in the acrn driver. It compiles, that's all I know. I'll appreciate some review and testing from acrn folks. Patch #2+#3 improve follow_pte(), passing a VMA instead of the MM, adding more sanity checks, and improving the documentation. Gave it a quick test on x86-64 using VM_PAT that ends up using follow_pte(). This patch (of 3): We currently miss handling various cases, resulting in a dangerous follow_pte() (previously follow_pfn()) usage. (1) We're not checking PTE write permissions. Maybe we should simply always require pte_write() like we do for pin_user_pages_fast(FOLL_WRITE)? Hard to tell, so let's check for ACRN_MEM_ACCESS_WRITE for now. (2) We're not rejecting refcounted pages. As we are not using MMU notifiers, messing with refcounted pages is dangerous and can result in use-after-free. Let's make sure to reject them. (3) We are only looking at the first PTE of a bigger range. We only lookup a single PTE, but memmap->len may span a larger area. Let's loop over all involved PTEs and make sure the PFN range is actually contiguous. Reject everything else: it couldn't have worked either way, and rather made use access PFNs we shouldn't be accessing. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Fixes: 8a6e85f ("virt: acrn: obtain pa from VMA with PFNMAP flag") Signed-off-by: David Hildenbrand <[email protected]> Cc: Alex Williamson <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Fei Li <[email protected]> Cc: Gerald Schaefer <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Yonghua Huang <[email protected]> Cc: Sean Christopherson <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: Portia Stephens <[email protected]> Signed-off-by: Stefan Bader <[email protected]>
1 parent f080ae9 commit c205cb1

File tree

1 file changed

+47
-16
lines changed
  • drivers/virt/acrn

1 file changed

+47
-16
lines changed

drivers/virt/acrn/mm.c

Lines changed: 47 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -155,49 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
155155
int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
156156
{
157157
struct vm_memory_region_batch *regions_info;
158-
int nr_pages, i = 0, order, nr_regions = 0;
158+
int nr_pages, i, order, nr_regions = 0;
159159
struct vm_memory_mapping *region_mapping;
160160
struct vm_memory_region_op *vm_region;
161161
struct page **pages = NULL, *page;
162162
void *remap_vaddr;
163163
int ret, pinned;
164164
u64 user_vm_pa;
165-
unsigned long pfn;
166165
struct vm_area_struct *vma;
167166

168167
if (!vm || !memmap)
169168
return -EINVAL;
170169

170+
/* Get the page number of the map region */
171+
nr_pages = memmap->len >> PAGE_SHIFT;
172+
if (!nr_pages)
173+
return -EINVAL;
174+
171175
mmap_read_lock(current->mm);
172176
vma = vma_lookup(current->mm, memmap->vma_base);
173177
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
178+
unsigned long start_pfn, cur_pfn;
174179
spinlock_t *ptl;
180+
bool writable;
175181
pte_t *ptep;
176182

177183
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
178184
mmap_read_unlock(current->mm);
179185
return -EINVAL;
180186
}
181187

182-
ret = follow_pte(vma->vm_mm, memmap->vma_base, &ptep, &ptl);
183-
if (ret < 0) {
184-
mmap_read_unlock(current->mm);
188+
for (i = 0; i < nr_pages; i++) {
189+
ret = follow_pte(vma->vm_mm,
190+
memmap->vma_base + i * PAGE_SIZE,
191+
&ptep, &ptl);
192+
if (ret)
193+
break;
194+
195+
cur_pfn = pte_pfn(ptep_get(ptep));
196+
if (i == 0)
197+
start_pfn = cur_pfn;
198+
writable = !!pte_write(ptep_get(ptep));
199+
pte_unmap_unlock(ptep, ptl);
200+
201+
/* Disallow write access if the PTE is not writable. */
202+
if (!writable &&
203+
(memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
204+
ret = -EFAULT;
205+
break;
206+
}
207+
208+
/* Disallow refcounted pages. */
209+
if (pfn_valid(cur_pfn) &&
210+
!PageReserved(pfn_to_page(cur_pfn))) {
211+
ret = -EFAULT;
212+
break;
213+
}
214+
215+
/* Disallow non-contiguous ranges. */
216+
if (cur_pfn != start_pfn + i) {
217+
ret = -EINVAL;
218+
break;
219+
}
220+
}
221+
mmap_read_unlock(current->mm);
222+
223+
if (ret) {
185224
dev_dbg(acrn_dev.this_device,
186225
"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
187226
return ret;
188227
}
189-
pfn = pte_pfn(ptep_get(ptep));
190-
pte_unmap_unlock(ptep, ptl);
191-
mmap_read_unlock(current->mm);
192228

193229
return acrn_mm_region_add(vm, memmap->user_vm_pa,
194-
PFN_PHYS(pfn), memmap->len,
230+
PFN_PHYS(start_pfn), memmap->len,
195231
ACRN_MEM_TYPE_WB, memmap->attr);
196232
}
197233
mmap_read_unlock(current->mm);
198234

199-
/* Get the page number of the map region */
200-
nr_pages = memmap->len >> PAGE_SHIFT;
201235
pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
202236
if (!pages)
203237
return -ENOMEM;
@@ -241,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
241275
mutex_unlock(&vm->regions_mapping_lock);
242276

243277
/* Calculate count of vm_memory_region_op */
244-
while (i < nr_pages) {
278+
for (i = 0; i < nr_pages; i += 1 << order) {
245279
page = pages[i];
246280
VM_BUG_ON_PAGE(PageTail(page), page);
247281
order = compound_order(page);
248282
nr_regions++;
249-
i += 1 << order;
250283
}
251284

252285
/* Prepare the vm_memory_region_batch */
@@ -263,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
263296
regions_info->regions_num = nr_regions;
264297
regions_info->regions_gpa = virt_to_phys(vm_region);
265298
user_vm_pa = memmap->user_vm_pa;
266-
i = 0;
267-
while (i < nr_pages) {
299+
for (i = 0; i < nr_pages; i += 1 << order) {
268300
u32 region_size;
269301

270302
page = pages[i];
@@ -280,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
280312

281313
vm_region++;
282314
user_vm_pa += region_size;
283-
i += 1 << order;
284315
}
285316

286317
/* Inform the ACRN Hypervisor to set up EPT mappings */

0 commit comments

Comments
 (0)