nfs: don't try to migrate pages with active requests
nfs_find_and_lock_request will take a reference to the nfs_page and will then put it if the req is already locked. It's possible though that the reference will be the last one. That put then can kick off a whole series of reference puts: nfs_page nfs_open_context dentry inode If the inode ends up being deleted, then the VFS will call truncate_inode_pages. That function will try to take the page lock, but it was already locked when migrate_page was called. The code deadlocks. Fix this by simply refusing the migration request if PagePrivate is already set, indicating that the page is already associated with an active read or write request. We've had a customer test a backported version of this patch and the preliminary results seem good. Cc: stable@kernel.org Cc: Andrea Arcangeli <aarcange@redhat.com> Reported-by: Harshula Jayasuriya <harshula@redhat.com> Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
b9dd3abbbc
commit
2da9565235
1 changed files with 11 additions and 25 deletions
|
@ -1691,34 +1691,20 @@ out_error:
|
|||
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
|
||||
struct page *page)
|
||||
{
|
||||
struct nfs_page *req;
|
||||
int ret;
|
||||
/*
|
||||
* If PagePrivate is set, then the page is currently associated with
|
||||
* an in-progress read or write request. Don't try to migrate it.
|
||||
*
|
||||
* FIXME: we could do this in principle, but we'll need a way to ensure
|
||||
* that we can safely release the inode reference while holding
|
||||
* the page lock.
|
||||
*/
|
||||
if (PagePrivate(page))
|
||||
return -EBUSY;
|
||||
|
||||
nfs_fscache_release_page(page, GFP_KERNEL);
|
||||
|
||||
req = nfs_find_and_lock_request(page, false);
|
||||
ret = PTR_ERR(req);
|
||||
if (IS_ERR(req))
|
||||
goto out;
|
||||
|
||||
ret = migrate_page(mapping, newpage, page);
|
||||
if (!req)
|
||||
goto out;
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
page_cache_get(newpage);
|
||||
spin_lock(&mapping->host->i_lock);
|
||||
req->wb_page = newpage;
|
||||
SetPagePrivate(newpage);
|
||||
set_page_private(newpage, (unsigned long)req);
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
spin_unlock(&mapping->host->i_lock);
|
||||
page_cache_release(page);
|
||||
out_unlock:
|
||||
nfs_clear_page_tag_locked(req);
|
||||
out:
|
||||
return ret;
|
||||
return migrate_page(mapping, newpage, page);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in a new issue