diff options
| author | Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> | 2025-09-25 16:51:14 +0200 |
|---|---|---|
| committer | Karol Wachowski <karol.wachowski@linux.intel.com> | 2025-10-01 09:59:45 +0200 |
| commit | 8b694b405a84696f1d964f6da7cf9721e68c4714 (patch) | |
| tree | d6622e0eecb360db33097b5a3f4c53f78d82af49 | |
| parent | e0c0891cd63bf8338c25c423e28a5a93aed3d74c (diff) | |
| download | tip-8b694b405a84696f1d964f6da7cf9721e68c4714.tar.gz | |
accel/ivpu: Fix page fault in ivpu_bo_unbind_all_bos_from_context()
Don't add BO to the vdev->bo_list in ivpu_gem_create_object().
When failure happens inside drm_gem_shmem_create(), the BO is not
fully created and ivpu_gem_bo_free() callback will not be called
causing a deleted BO to be left on the list.
Fixes: 8d88e4cdce4f ("accel/ivpu: Use GEM shmem helper for all buffers")
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Link: https://lore.kernel.org/r/20250925145114.1446283-1-maciej.falkowski@linux.intel.com
| -rw-r--r-- | drivers/accel/ivpu/ivpu_gem.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 0cb48aff396c46..38ecf933d144dd 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -194,7 +194,6 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size) { - struct ivpu_device *vdev = to_ivpu_device(dev); struct ivpu_bo *bo; if (size == 0 || !PAGE_ALIGNED(size)) @@ -209,20 +208,17 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz INIT_LIST_HEAD(&bo->bo_list_node); - mutex_lock(&vdev->bo_list_lock); - list_add_tail(&bo->bo_list_node, &vdev->bo_list); - mutex_unlock(&vdev->bo_list_lock); - - ivpu_dbg(vdev, BO, " alloc: bo %8p size %9zu\n", bo, size); return &bo->base.base; } struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + struct ivpu_device *vdev = to_ivpu_device(dev); struct device *attach_dev = dev->dev; struct dma_buf_attachment *attach; struct drm_gem_object *obj; + struct ivpu_bo *bo; int ret; attach = dma_buf_attach(dma_buf, attach_dev); @@ -240,6 +236,14 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, obj->import_attach = attach; obj->resv = dma_buf->resv; + bo = to_ivpu_bo(obj); + + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + + ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo)); + return obj; fail_detach: @@ -270,6 +274,12 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + + ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size); + return bo; } |
