net_sched: add the ability to defer skb freeing
qdisc are changed under RTNL protection and often while blocking BH and root qdisc spinlock. When lots of skbs need to be dropped, we free them under these locks causing TX/RX freezes, and more generally latency spikes. This commit adds rtnl_kfree_skbs(), used to queue skbs for deferred freeing. Actual freeing happens right after RTNL is released, with appropriate scheduling points. rtnl_qdisc_drop() can also be used in place of disc_drop() when RTNL is held. qdisc_reset_queue() and __qdisc_reset_queue() get the new behavior, so standard qdiscs like pfifo, pfifo_fast... have their ->reset() method automatically handled. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
35c55c9877
commit
1b5c5493e3
4 changed files with 38 additions and 7 deletions
|
@ -89,8 +89,9 @@ void net_inc_egress_queue(void);
|
|||
void net_dec_egress_queue(void);
|
||||
#endif
|
||||
|
||||
extern void rtnetlink_init(void);
|
||||
extern void __rtnl_unlock(void);
|
||||
void rtnetlink_init(void);
|
||||
void __rtnl_unlock(void);
|
||||
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
|
||||
|
||||
#define ASSERT_RTNL() do { \
|
||||
if (unlikely(!rtnl_is_locked())) { \
|
||||
|
|
|
@ -683,19 +683,21 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
|
|||
return skb;
|
||||
}
|
||||
|
||||
static inline void __qdisc_reset_queue(struct Qdisc *sch,
|
||||
struct sk_buff_head *list)
|
||||
static inline void __qdisc_reset_queue(struct sk_buff_head *list)
|
||||
{
|
||||
/*
|
||||
* We do not know the backlog in bytes of this list, it
|
||||
* is up to the caller to correct it
|
||||
*/
|
||||
__skb_queue_purge(list);
|
||||
if (!skb_queue_empty(list)) {
|
||||
rtnl_kfree_skbs(list->next, list->prev);
|
||||
__skb_queue_head_init(list);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void qdisc_reset_queue(struct Qdisc *sch)
|
||||
{
|
||||
__qdisc_reset_queue(sch, &sch->q);
|
||||
__qdisc_reset_queue(&sch->q);
|
||||
sch->qstats.backlog = 0;
|
||||
}
|
||||
|
||||
|
@ -716,6 +718,12 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
|
|||
return old;
|
||||
}
|
||||
|
||||
static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
rtnl_kfree_skbs(skb, skb);
|
||||
qdisc_qstats_drop(sch);
|
||||
}
|
||||
|
||||
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -71,9 +71,31 @@ void rtnl_lock(void)
|
|||
}
|
||||
EXPORT_SYMBOL(rtnl_lock);
|
||||
|
||||
static struct sk_buff *defer_kfree_skb_list;
|
||||
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
|
||||
{
|
||||
if (head && tail) {
|
||||
tail->next = defer_kfree_skb_list;
|
||||
defer_kfree_skb_list = head;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_kfree_skbs);
|
||||
|
||||
void __rtnl_unlock(void)
|
||||
{
|
||||
struct sk_buff *head = defer_kfree_skb_list;
|
||||
|
||||
defer_kfree_skb_list = NULL;
|
||||
|
||||
mutex_unlock(&rtnl_mutex);
|
||||
|
||||
while (head) {
|
||||
struct sk_buff *next = head->next;
|
||||
|
||||
kfree_skb(head);
|
||||
cond_resched();
|
||||
head = next;
|
||||
}
|
||||
}
|
||||
|
||||
void rtnl_unlock(void)
|
||||
|
|
|
@ -493,7 +493,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|||
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
||||
|
||||
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
|
||||
__qdisc_reset_queue(qdisc, band2list(priv, prio));
|
||||
__qdisc_reset_queue(band2list(priv, prio));
|
||||
|
||||
priv->bitmap = 0;
|
||||
qdisc->qstats.backlog = 0;
|
||||
|
|
Loading…
Reference in a new issue