[PATCH] NFS: Make searching and waiting on busy writeback requests more efficient.
Basically copies the VFS's method for tracking writebacks and applies it to the struct nfs_page. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
ab0a3dbedc
commit
c6a556b88a
4 changed files with 45 additions and 18 deletions
|
@ -111,6 +111,33 @@ void nfs_unlock_request(struct nfs_page *req)
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nfs_set_page_writeback_locked - Lock a request for writeback
|
||||||
|
* @req:
|
||||||
|
*/
|
||||||
|
int nfs_set_page_writeback_locked(struct nfs_page *req)
|
||||||
|
{
|
||||||
|
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
|
||||||
|
|
||||||
|
if (!nfs_lock_request(req))
|
||||||
|
return 0;
|
||||||
|
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nfs_clear_page_writeback - Unlock request and wake up sleepers
|
||||||
|
*/
|
||||||
|
void nfs_clear_page_writeback(struct nfs_page *req)
|
||||||
|
{
|
||||||
|
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
|
||||||
|
|
||||||
|
spin_lock(&nfsi->req_lock);
|
||||||
|
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
|
||||||
|
spin_unlock(&nfsi->req_lock);
|
||||||
|
nfs_unlock_request(req);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nfs_clear_request - Free up all resources allocated to the request
|
* nfs_clear_request - Free up all resources allocated to the request
|
||||||
* @req:
|
* @req:
|
||||||
|
@ -301,7 +328,7 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
|
||||||
if (req->wb_index > idx_end)
|
if (req->wb_index > idx_end)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!nfs_lock_request(req))
|
if (!nfs_set_page_writeback_locked(req))
|
||||||
continue;
|
continue;
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
nfs_list_add_request(req, dst);
|
nfs_list_add_request(req, dst);
|
||||||
|
|
|
@ -173,7 +173,6 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
if (len < PAGE_CACHE_SIZE)
|
if (len < PAGE_CACHE_SIZE)
|
||||||
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
||||||
|
|
||||||
nfs_lock_request(new);
|
|
||||||
nfs_list_add_request(new, &one_request);
|
nfs_list_add_request(new, &one_request);
|
||||||
nfs_pagein_one(&one_request, inode);
|
nfs_pagein_one(&one_request, inode);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -185,7 +184,6 @@ static void nfs_readpage_release(struct nfs_page *req)
|
||||||
|
|
||||||
nfs_clear_request(req);
|
nfs_clear_request(req);
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
nfs_unlock_request(req);
|
|
||||||
|
|
||||||
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
|
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
|
||||||
req->wb_context->dentry->d_inode->i_sb->s_id,
|
req->wb_context->dentry->d_inode->i_sb->s_id,
|
||||||
|
@ -553,7 +551,6 @@ readpage_async_filler(void *data, struct page *page)
|
||||||
}
|
}
|
||||||
if (len < PAGE_CACHE_SIZE)
|
if (len < PAGE_CACHE_SIZE)
|
||||||
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
||||||
nfs_lock_request(new);
|
|
||||||
nfs_list_add_request(new, desc->head);
|
nfs_list_add_request(new, desc->head);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -503,13 +503,12 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
|
||||||
|
|
||||||
spin_lock(&nfsi->req_lock);
|
spin_lock(&nfsi->req_lock);
|
||||||
next = idx_start;
|
next = idx_start;
|
||||||
while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {
|
while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
|
||||||
if (req->wb_index > idx_end)
|
if (req->wb_index > idx_end)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
next = req->wb_index + 1;
|
next = req->wb_index + 1;
|
||||||
if (!NFS_WBACK_BUSY(req))
|
BUG_ON(!NFS_WBACK_BUSY(req));
|
||||||
continue;
|
|
||||||
|
|
||||||
atomic_inc(&req->wb_count);
|
atomic_inc(&req->wb_count);
|
||||||
spin_unlock(&nfsi->req_lock);
|
spin_unlock(&nfsi->req_lock);
|
||||||
|
@ -821,7 +820,7 @@ out:
|
||||||
#else
|
#else
|
||||||
nfs_inode_remove_request(req);
|
nfs_inode_remove_request(req);
|
||||||
#endif
|
#endif
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int flush_task_priority(int how)
|
static inline int flush_task_priority(int how)
|
||||||
|
@ -952,7 +951,7 @@ out_bad:
|
||||||
nfs_writedata_free(data);
|
nfs_writedata_free(data);
|
||||||
}
|
}
|
||||||
nfs_mark_request_dirty(req);
|
nfs_mark_request_dirty(req);
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1002,7 +1001,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
|
||||||
struct nfs_page *req = nfs_list_entry(head->next);
|
struct nfs_page *req = nfs_list_entry(head->next);
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
nfs_mark_request_dirty(req);
|
nfs_mark_request_dirty(req);
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1029,7 +1028,7 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
|
||||||
req = nfs_list_entry(head->next);
|
req = nfs_list_entry(head->next);
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
nfs_mark_request_dirty(req);
|
nfs_mark_request_dirty(req);
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
}
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -1121,7 +1120,7 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
|
||||||
nfs_inode_remove_request(req);
|
nfs_inode_remove_request(req);
|
||||||
#endif
|
#endif
|
||||||
next:
|
next:
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1278,7 +1277,7 @@ nfs_commit_list(struct list_head *head, int how)
|
||||||
req = nfs_list_entry(head->next);
|
req = nfs_list_entry(head->next);
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
nfs_mark_request_commit(req);
|
nfs_mark_request_commit(req);
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
}
|
}
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1324,7 +1323,7 @@ nfs_commit_done(struct rpc_task *task)
|
||||||
dprintk(" mismatch\n");
|
dprintk(" mismatch\n");
|
||||||
nfs_mark_request_dirty(req);
|
nfs_mark_request_dirty(req);
|
||||||
next:
|
next:
|
||||||
nfs_unlock_request(req);
|
nfs_clear_page_writeback(req);
|
||||||
res++;
|
res++;
|
||||||
}
|
}
|
||||||
sub_page_state(nr_unstable,res);
|
sub_page_state(nr_unstable,res);
|
||||||
|
|
|
@ -19,6 +19,11 @@
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Valid flags for the radix tree
|
||||||
|
*/
|
||||||
|
#define NFS_PAGE_TAG_WRITEBACK 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Valid flags for a dirty buffer
|
* Valid flags for a dirty buffer
|
||||||
*/
|
*/
|
||||||
|
@ -62,6 +67,9 @@ extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
|
||||||
unsigned int);
|
unsigned int);
|
||||||
extern int nfs_wait_on_request(struct nfs_page *);
|
extern int nfs_wait_on_request(struct nfs_page *);
|
||||||
extern void nfs_unlock_request(struct nfs_page *req);
|
extern void nfs_unlock_request(struct nfs_page *req);
|
||||||
|
extern int nfs_set_page_writeback_locked(struct nfs_page *req);
|
||||||
|
extern void nfs_clear_page_writeback(struct nfs_page *req);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock the page of an asynchronous request without incrementing the wb_count
|
* Lock the page of an asynchronous request without incrementing the wb_count
|
||||||
|
@ -96,10 +104,6 @@ nfs_list_remove_request(struct nfs_page *req)
|
||||||
{
|
{
|
||||||
if (list_empty(&req->wb_list))
|
if (list_empty(&req->wb_list))
|
||||||
return;
|
return;
|
||||||
if (!NFS_WBACK_BUSY(req)) {
|
|
||||||
printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n");
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
list_del_init(&req->wb_list);
|
list_del_init(&req->wb_list);
|
||||||
req->wb_list_head = NULL;
|
req->wb_list_head = NULL;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue