drm/amdgpu: prevent get_user_pages recursion
Remember the tasks which are inside get_user_pages() and ignore MMU callbacks from there. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
211dff5518
commit
637dd3b5ca
1 changed files with 38 additions and 10 deletions
|
@ -494,13 +494,20 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
|
||||||
/*
|
/*
|
||||||
* TTM backend functions.
|
* TTM backend functions.
|
||||||
*/
|
*/
|
||||||
|
struct amdgpu_ttm_gup_task_list {
|
||||||
|
struct list_head list;
|
||||||
|
struct task_struct *task;
|
||||||
|
};
|
||||||
|
|
||||||
struct amdgpu_ttm_tt {
|
struct amdgpu_ttm_tt {
|
||||||
struct ttm_dma_tt ttm;
|
struct ttm_dma_tt ttm;
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
u64 offset;
|
u64 offset;
|
||||||
uint64_t userptr;
|
uint64_t userptr;
|
||||||
struct mm_struct *usermm;
|
struct mm_struct *usermm;
|
||||||
uint32_t userflags;
|
uint32_t userflags;
|
||||||
|
spinlock_t guptasklock;
|
||||||
|
struct list_head guptasks;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* prepare the sg table with the user pages */
|
/* prepare the sg table with the user pages */
|
||||||
|
@ -530,9 +537,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
||||||
unsigned num_pages = ttm->num_pages - pinned;
|
unsigned num_pages = ttm->num_pages - pinned;
|
||||||
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
|
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
|
||||||
struct page **pages = ttm->pages + pinned;
|
struct page **pages = ttm->pages + pinned;
|
||||||
|
struct amdgpu_ttm_gup_task_list guptask;
|
||||||
|
|
||||||
|
guptask.task = current;
|
||||||
|
spin_lock(>t->guptasklock);
|
||||||
|
list_add(&guptask.list, >t->guptasks);
|
||||||
|
spin_unlock(>t->guptasklock);
|
||||||
|
|
||||||
r = get_user_pages(current, current->mm, userptr, num_pages,
|
r = get_user_pages(current, current->mm, userptr, num_pages,
|
||||||
write, 0, pages, NULL);
|
write, 0, pages, NULL);
|
||||||
|
|
||||||
|
spin_lock(>t->guptasklock);
|
||||||
|
list_del(&guptask.list);
|
||||||
|
spin_unlock(>t->guptasklock);
|
||||||
|
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto release_pages;
|
goto release_pages;
|
||||||
|
|
||||||
|
@ -783,6 +801,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||||
gtt->userptr = addr;
|
gtt->userptr = addr;
|
||||||
gtt->usermm = current->mm;
|
gtt->usermm = current->mm;
|
||||||
gtt->userflags = flags;
|
gtt->userflags = flags;
|
||||||
|
spin_lock_init(>t->guptasklock);
|
||||||
|
INIT_LIST_HEAD(>t->guptasks);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -800,18 +821,25 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
struct amdgpu_ttm_gup_task_list *entry;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
if (gtt == NULL)
|
if (gtt == NULL || !gtt->userptr)
|
||||||
return false;
|
|
||||||
|
|
||||||
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
||||||
if (gtt->userptr > end || gtt->userptr + size <= start)
|
if (gtt->userptr > end || gtt->userptr + size <= start)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
spin_lock(>t->guptasklock);
|
||||||
|
list_for_each_entry(entry, >t->guptasks, list) {
|
||||||
|
if (entry->task == current) {
|
||||||
|
spin_unlock(>t->guptasklock);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(>t->guptasklock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue