virtio: net refill on out-of-memory
If we run out of memory, use keventd to fill the buffer. There's a report of this happening: "Page allocation failures in guest", Message-ID: <20090713115158.0a4892b0@mjolnir.ossman.eu> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0b4f2928f1
commit
3161e453e4
1 changed files with 46 additions and 15 deletions
|
@ -70,6 +70,9 @@ struct virtnet_info
|
||||||
struct sk_buff_head recv;
|
struct sk_buff_head recv;
|
||||||
struct sk_buff_head send;
|
struct sk_buff_head send;
|
||||||
|
|
||||||
|
/* Work struct for refilling if we run low on memory. */
|
||||||
|
struct delayed_work refill;
|
||||||
|
|
||||||
/* Chain pages by the private ptr. */
|
/* Chain pages by the private ptr. */
|
||||||
struct page *pages;
|
struct page *pages;
|
||||||
};
|
};
|
||||||
|
@ -273,19 +276,22 @@ drop:
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct scatterlist sg[2+MAX_SKB_FRAGS];
|
struct scatterlist sg[2+MAX_SKB_FRAGS];
|
||||||
int num, err, i;
|
int num, err, i;
|
||||||
|
bool oom = false;
|
||||||
|
|
||||||
sg_init_table(sg, 2+MAX_SKB_FRAGS);
|
sg_init_table(sg, 2+MAX_SKB_FRAGS);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct virtio_net_hdr *hdr;
|
struct virtio_net_hdr *hdr;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb)) {
|
||||||
|
oom = true;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
skb_put(skb, MAX_PACKET_LEN);
|
skb_put(skb, MAX_PACKET_LEN);
|
||||||
|
@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
||||||
if (vi->big_packets) {
|
if (vi->big_packets) {
|
||||||
for (i = 0; i < MAX_SKB_FRAGS; i++) {
|
for (i = 0; i < MAX_SKB_FRAGS; i++) {
|
||||||
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
||||||
f->page = get_a_page(vi, GFP_ATOMIC);
|
f->page = get_a_page(vi, gfp);
|
||||||
if (!f->page)
|
if (!f->page)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
|
||||||
if (unlikely(vi->num > vi->max))
|
if (unlikely(vi->num > vi->max))
|
||||||
vi->max = vi->num;
|
vi->max = vi->num;
|
||||||
vi->rvq->vq_ops->kick(vi->rvq);
|
vi->rvq->vq_ops->kick(vi->rvq);
|
||||||
|
return !oom;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void try_fill_recv(struct virtnet_info *vi)
|
/* Returns false if we couldn't fill entirely (OOM). */
|
||||||
|
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct scatterlist sg[1];
|
struct scatterlist sg[1];
|
||||||
int err;
|
int err;
|
||||||
|
bool oom = false;
|
||||||
|
|
||||||
if (!vi->mergeable_rx_bufs) {
|
if (!vi->mergeable_rx_bufs)
|
||||||
try_fill_recv_maxbufs(vi);
|
return try_fill_recv_maxbufs(vi, gfp);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
skb_frag_t *f;
|
skb_frag_t *f;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb)) {
|
||||||
|
oom = true;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
|
|
||||||
f = &skb_shinfo(skb)->frags[0];
|
f = &skb_shinfo(skb)->frags[0];
|
||||||
f->page = get_a_page(vi, GFP_ATOMIC);
|
f->page = get_a_page(vi, gfp);
|
||||||
if (!f->page) {
|
if (!f->page) {
|
||||||
|
oom = true;
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
|
||||||
if (unlikely(vi->num > vi->max))
|
if (unlikely(vi->num > vi->max))
|
||||||
vi->max = vi->num;
|
vi->max = vi->num;
|
||||||
vi->rvq->vq_ops->kick(vi->rvq);
|
vi->rvq->vq_ops->kick(vi->rvq);
|
||||||
|
return !oom;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skb_recv_done(struct virtqueue *rvq)
|
static void skb_recv_done(struct virtqueue *rvq)
|
||||||
|
@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void refill_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct virtnet_info *vi;
|
||||||
|
bool still_empty;
|
||||||
|
|
||||||
|
vi = container_of(work, struct virtnet_info, refill.work);
|
||||||
|
napi_disable(&vi->napi);
|
||||||
|
try_fill_recv(vi, GFP_KERNEL);
|
||||||
|
still_empty = (vi->num == 0);
|
||||||
|
napi_enable(&vi->napi);
|
||||||
|
|
||||||
|
/* In theory, this can happen: if we don't get any buffers in
|
||||||
|
* we will *never* try to fill again. */
|
||||||
|
if (still_empty)
|
||||||
|
schedule_delayed_work(&vi->refill, HZ/2);
|
||||||
|
}
|
||||||
|
|
||||||
static int virtnet_poll(struct napi_struct *napi, int budget)
|
static int virtnet_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
|
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
|
||||||
|
@ -400,10 +428,10 @@ again:
|
||||||
received++;
|
received++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: If we oom and completely run out of inbufs, we need
|
if (vi->num < vi->max / 2) {
|
||||||
* to start a timer trying to fill more. */
|
if (!try_fill_recv(vi, GFP_ATOMIC))
|
||||||
if (vi->num < vi->max / 2)
|
schedule_delayed_work(&vi->refill, 0);
|
||||||
try_fill_recv(vi);
|
}
|
||||||
|
|
||||||
/* Out of packets? */
|
/* Out of packets? */
|
||||||
if (received < budget) {
|
if (received < budget) {
|
||||||
|
@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
vi->vdev = vdev;
|
vi->vdev = vdev;
|
||||||
vdev->priv = vi;
|
vdev->priv = vi;
|
||||||
vi->pages = NULL;
|
vi->pages = NULL;
|
||||||
|
INIT_DELAYED_WORK(&vi->refill, refill_work);
|
||||||
|
|
||||||
/* If they give us a callback when all buffers are done, we don't need
|
/* If they give us a callback when all buffers are done, we don't need
|
||||||
* the timer. */
|
* the timer. */
|
||||||
|
@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Last of all, set up some receive buffers. */
|
/* Last of all, set up some receive buffers. */
|
||||||
try_fill_recv(vi);
|
try_fill_recv(vi, GFP_KERNEL);
|
||||||
|
|
||||||
/* If we didn't even get one input buffer, we're useless. */
|
/* If we didn't even get one input buffer, we're useless. */
|
||||||
if (vi->num == 0) {
|
if (vi->num == 0) {
|
||||||
|
@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||||
|
|
||||||
unregister:
|
unregister:
|
||||||
unregister_netdev(dev);
|
unregister_netdev(dev);
|
||||||
|
cancel_delayed_work_sync(&vi->refill);
|
||||||
free_vqs:
|
free_vqs:
|
||||||
vdev->config->del_vqs(vdev);
|
vdev->config->del_vqs(vdev);
|
||||||
free:
|
free:
|
||||||
|
@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
|
||||||
BUG_ON(vi->num != 0);
|
BUG_ON(vi->num != 0);
|
||||||
|
|
||||||
unregister_netdev(vi->dev);
|
unregister_netdev(vi->dev);
|
||||||
|
cancel_delayed_work_sync(&vi->refill);
|
||||||
|
|
||||||
vdev->config->del_vqs(vi->vdev);
|
vdev->config->del_vqs(vi->vdev);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue