diff options
| -rw-r--r-- | drivers/hv/mshv_root_main.c | 80 |
1 files changed, 36 insertions, 44 deletions
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c index bc15d6f6922fa4..fec82619684a6f 100644 --- a/drivers/hv/mshv_root_main.c +++ b/drivers/hv/mshv_root_main.c @@ -1114,8 +1114,8 @@ mshv_region_map(struct mshv_mem_region *region) } static void -mshv_region_evict_pages(struct mshv_mem_region *region, - u64 page_offset, u64 page_count) +mshv_region_invalidate_pages(struct mshv_mem_region *region, + u64 page_offset, u64 page_count) { if (region->flags.range_pinned) unpin_user_pages(region->pages + page_offset, page_count); @@ -1125,29 +1125,24 @@ mshv_region_evict_pages(struct mshv_mem_region *region, } static void -mshv_region_evict(struct mshv_mem_region *region) +mshv_region_invalidate(struct mshv_mem_region *region) { - mshv_region_evict_pages(region, 0, region->nr_pages); + mshv_region_invalidate_pages(region, 0, region->nr_pages); } static int -mshv_region_populate_pages(struct mshv_mem_region *region, - u64 page_offset, u64 page_count) +mshv_region_pin(struct mshv_mem_region *region) { u64 done_count, nr_pages; struct page **pages; __u64 userspace_addr; int ret; - if (page_offset + page_count > region->nr_pages) - return -EINVAL; - - for (done_count = 0; done_count < page_count; done_count += ret) { - pages = region->pages + page_offset + done_count; + for (done_count = 0; done_count < region->nr_pages; done_count += ret) { + pages = region->pages + done_count; userspace_addr = region->start_uaddr + - (page_offset + done_count) * - HV_HYP_PAGE_SIZE; - nr_pages = min(page_count - done_count, + done_count * HV_HYP_PAGE_SIZE; + nr_pages = min(region->nr_pages - done_count, MSHV_PIN_PAGES_BATCH_SIZE); /* @@ -1158,34 +1153,23 @@ mshv_region_populate_pages(struct mshv_mem_region *region, * with the FOLL_LONGTERM flag does a large temporary * allocation of contiguous memory. */ - if (region->flags.range_pinned) - ret = pin_user_pages_fast(userspace_addr, - nr_pages, - FOLL_WRITE | FOLL_LONGTERM, - pages); - else - ret = -EOPNOTSUPP; - + ret = pin_user_pages_fast(userspace_addr, nr_pages, + FOLL_WRITE | FOLL_LONGTERM, + pages); if (ret < 0) goto release_pages; } - if (PageHuge(region->pages[page_offset])) + if (PageHuge(region->pages[0])) region->flags.large_pages = true; return 0; release_pages: - mshv_region_evict_pages(region, page_offset, done_count); + mshv_region_invalidate_pages(region, 0, done_count); return ret; } -static int -mshv_region_populate(struct mshv_mem_region *region) -{ - return mshv_region_populate_pages(region, 0, region->nr_pages); -} - static struct mshv_mem_region * mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn) { @@ -1245,19 +1229,27 @@ static int mshv_partition_create_region(struct mshv_partition *partition, return 0; } -/* - * Map guest ram. if snp, make sure to release that from the host first - * Side Effects: In case of failure, pages are unpinned when feasible. +/** + * mshv_prepare_pinned_region - Pin and map memory regions + * @region: Pointer to the memory region structure + * + * This function processes memory regions that are explicitly marked as pinned. + * Pinned regions are preallocated, mapped upfront, and do not rely on fault-based + * population. The function ensures the region is properly populated, handles + * encryption requirements for SNP partitions if applicable, maps the region, + * and performs necessary sharing or eviction operations based on the mapping + * result. + * + * Return: 0 on success, negative error code on failure. */ -static int -mshv_partition_mem_region_map(struct mshv_mem_region *region) +static int mshv_prepare_pinned_region(struct mshv_mem_region *region) { struct mshv_partition *partition = region->partition; int ret; - ret = mshv_region_populate(region); + ret = mshv_region_pin(region); if (ret) { - pt_err(partition, "Failed to populate memory region: %d\n", + pt_err(partition, "Failed to pin memory region: %d\n", ret); goto err_out; } @@ -1275,7 +1267,7 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region) pt_err(partition, "Failed to unshare memory region (guest_pfn: %llu): %d\n", region->start_gfn, ret); - goto evict_region; + goto invalidate_region; } } @@ -1285,7 +1277,7 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region) shrc = mshv_partition_region_share(region); if (!shrc) - goto evict_region; + goto invalidate_region; pt_err(partition, "Failed to share memory region (guest_pfn: %llu): %d\n", @@ -1299,8 +1291,8 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region) return 0; -evict_region: - mshv_region_evict(region); +invalidate_region: + mshv_region_invalidate(region); err_out: return ret; } @@ -1349,7 +1341,7 @@ mshv_map_user_memory(struct mshv_partition *partition, ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn, mmio_pfn, HVPFN_DOWN(mem.size)); else - ret = mshv_partition_mem_region_map(region); + ret = mshv_prepare_pinned_region(region); if (ret) goto errout; @@ -1394,7 +1386,7 @@ mshv_unmap_user_memory(struct mshv_partition *partition, hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn, region->nr_pages, unmap_flags); - mshv_region_evict(region); + mshv_region_invalidate(region); vfree(region); return 0; @@ -1812,7 +1804,7 @@ static void destroy_partition(struct mshv_partition *partition) } } - mshv_region_evict(region); + mshv_region_invalidate(region); vfree(region); } |
