vhost: Remove custom vhost rcu usage

Now, vq->private_data is always accessed under vq mutex. No need to play
the vhost rcu trick.

Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Asias He 2013-05-07 14:54:36 +08:00 committed by Michael S. Tsirkin
parent e7802212ea
commit 22fa90c7fb
4 changed files with 12 additions and 26 deletions

View file

@ -15,7 +15,6 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@ -749,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
sock = vq->private_data;
if (!sock)
return 0;
@ -763,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct socket *sock;
mutex_lock(&vq->mutex);
sock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
sock = vq->private_data;
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, NULL);
vq->private_data = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
@ -922,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
oldsock = vq->private_data;
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@ -933,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
vq->private_data = sock;
r = vhost_init_used(vq);
if (r)
goto err_used;
@ -967,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
rcu_assign_pointer(vq->private_data, oldsock);
vq->private_data = oldsock;
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);

View file

@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, vs_tpg);
vq->private_data = vs_tpg;
vhost_init_used(vq);
mutex_unlock(&vq->mutex);
}
@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
/* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
rcu_assign_pointer(vq->private_data, NULL);
vq->private_data = NULL;
mutex_unlock(&vq->mutex);
}
}

View file

@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
oldpriv = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
rcu_assign_pointer(vq->private_data, priv);
oldpriv = vq->private_data;
vq->private_data = priv;
r = vhost_init_used(&n->vqs[index]);

View file

@ -103,14 +103,8 @@ struct vhost_virtqueue {
struct iovec iov[UIO_MAXIOV];
struct iovec *indirect;
struct vring_used_elem *heads;
/* We use a kind of RCU to access private pointer.
* All readers access it from worker, which makes it possible to
* flush the vhost_work instead of synchronize_rcu. Therefore readers do
* not need to call rcu_read_lock/rcu_read_unlock: the beginning of
* vhost_work execution acts instead of rcu_read_lock() and the end of
* vhost_work execution acts instead of rcu_read_unlock().
* Writers use virtqueue mutex. */
void __rcu *private_data;
/* Protected by virtqueue mutex. */
void *private_data;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;