net: sched: restrict use of qstats qlen
This removes the use of qstats->qlen variable from the classifiers and makes it an explicit argument to gnet_stats_copy_queue(). The qlen represents the qdisc queue length and is packed into the qstats at the last moment before passnig to user space. By handling it explicitely we avoid, in the percpu stats case, having to figure out which per_cpu variable to put it in. It would probably be best to remove it from qstats completely but qstats is a user space ABI and can't be broken. A future patch could make an internal only qstats structure that would avoid having to allocate an additional u32 variable on the Qdisc struct. This would make the qstats struct 128bits instead of 128+32. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
25331d6ce4
commit
6401585366
16 changed files with 32 additions and 34 deletions
|
@ -40,7 +40,8 @@ void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
|
|||
int gnet_stats_copy_rate_est(struct gnet_dump *d,
|
||||
const struct gnet_stats_basic_packed *b,
|
||||
struct gnet_stats_rate_est64 *r);
|
||||
int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
|
||||
int gnet_stats_copy_queue(struct gnet_dump *d,
|
||||
struct gnet_stats_queue *q, __u32 len);
|
||||
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
|
||||
|
||||
int gnet_stats_finish_copy(struct gnet_dump *d);
|
||||
|
|
|
@ -219,6 +219,7 @@ EXPORT_SYMBOL(gnet_stats_copy_rate_est);
|
|||
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
|
||||
* @d: dumping handle
|
||||
* @q: queue statistics
|
||||
* @qlen: queue length statistics
|
||||
*
|
||||
* Appends the queue statistics to the top level TLV created by
|
||||
* gnet_stats_start_copy().
|
||||
|
@ -227,8 +228,11 @@ EXPORT_SYMBOL(gnet_stats_copy_rate_est);
|
|||
* if the room in the socket buffer was not sufficient.
|
||||
*/
|
||||
int
|
||||
gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q)
|
||||
gnet_stats_copy_queue(struct gnet_dump *d,
|
||||
struct gnet_stats_queue *q, __u32 qlen)
|
||||
{
|
||||
q->qlen = qlen;
|
||||
|
||||
if (d->compat_tc_stats) {
|
||||
d->tc_stats.drops = q->drops;
|
||||
d->tc_stats.qlen = q->qlen;
|
||||
|
|
|
@ -623,7 +623,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
|
|||
if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
|
||||
&p->tcfc_rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0)
|
||||
gnet_stats_copy_queue(&d,
|
||||
&p->tcfc_qstats,
|
||||
p->tcfc_qstats.qlen) < 0)
|
||||
goto errout;
|
||||
|
||||
if (gnet_stats_finish_copy(&d) < 0)
|
||||
|
|
|
@ -1318,6 +1318,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
|||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct gnet_dump d;
|
||||
struct qdisc_size_table *stab;
|
||||
__u32 qlen;
|
||||
|
||||
cond_resched();
|
||||
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
|
||||
|
@ -1335,7 +1336,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
|||
goto nla_put_failure;
|
||||
if (q->ops->dump && q->ops->dump(q, skb) < 0)
|
||||
goto nla_put_failure;
|
||||
q->qstats.qlen = q->q.qlen;
|
||||
qlen = q->q.qlen;
|
||||
|
||||
stab = rtnl_dereference(q->stab);
|
||||
if (stab && qdisc_dump_stab(skb, stab) < 0)
|
||||
|
@ -1353,7 +1354,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
|||
|
||||
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(&d, &q->qstats) < 0)
|
||||
gnet_stats_copy_queue(&d, &q->qstats, qlen) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (gnet_stats_finish_copy(&d) < 0)
|
||||
|
|
|
@ -637,10 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
{
|
||||
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
|
||||
|
||||
flow->qstats.qlen = flow->q->q.qlen;
|
||||
|
||||
if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &flow->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &flow->qstats, flow->q->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1594,7 +1594,6 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
struct cbq_sched_data *q = qdisc_priv(sch);
|
||||
struct cbq_class *cl = (struct cbq_class *)arg;
|
||||
|
||||
cl->qstats.qlen = cl->q->q.qlen;
|
||||
cl->xstats.avgidle = cl->avgidle;
|
||||
cl->xstats.undertime = 0;
|
||||
|
||||
|
@ -1603,7 +1602,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl->qstats, cl->q->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
|
||||
|
|
|
@ -275,17 +275,16 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
struct gnet_dump *d)
|
||||
{
|
||||
struct drr_class *cl = (struct drr_class *)arg;
|
||||
__u32 qlen = cl->qdisc->q.qlen;
|
||||
struct tc_drr_stats xstats;
|
||||
|
||||
memset(&xstats, 0, sizeof(xstats));
|
||||
if (cl->qdisc->q.qlen) {
|
||||
if (qlen)
|
||||
xstats.deficit = cl->deficit;
|
||||
cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
|
||||
}
|
||||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl->qdisc->qstats, qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
|
|
|
@ -550,7 +550,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
qs.backlog = q->backlogs[idx];
|
||||
qs.drops = flow->dropped;
|
||||
}
|
||||
if (gnet_stats_copy_queue(d, &qs) < 0)
|
||||
if (gnet_stats_copy_queue(d, &qs, 0) < 0)
|
||||
return -1;
|
||||
if (idx < q->flows_cnt)
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
|
|
|
@ -1370,7 +1370,6 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
struct hfsc_class *cl = (struct hfsc_class *)arg;
|
||||
struct tc_hfsc_stats xstats;
|
||||
|
||||
cl->qstats.qlen = cl->qdisc->q.qlen;
|
||||
cl->qstats.backlog = cl->qdisc->qstats.backlog;
|
||||
xstats.level = cl->level;
|
||||
xstats.period = cl->cl_vtperiod;
|
||||
|
@ -1379,7 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl->qstats, cl->qdisc->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
|
|
|
@ -1138,15 +1138,16 @@ static int
|
|||
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
|
||||
{
|
||||
struct htb_class *cl = (struct htb_class *)arg;
|
||||
__u32 qlen = 0;
|
||||
|
||||
if (!cl->level && cl->un.leaf.q)
|
||||
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
|
||||
qlen = cl->un.leaf.q->q.qlen;
|
||||
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
|
||||
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
|
||||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl->qstats, qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
|
||||
|
|
|
@ -112,7 +112,6 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
sch->q.qlen += qdisc->q.qlen;
|
||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||
sch->bstats.packets += qdisc->bstats.packets;
|
||||
sch->qstats.qlen += qdisc->qstats.qlen;
|
||||
sch->qstats.backlog += qdisc->qstats.backlog;
|
||||
sch->qstats.drops += qdisc->qstats.drops;
|
||||
sch->qstats.requeues += qdisc->qstats.requeues;
|
||||
|
@ -200,9 +199,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
sch->qstats.qlen = sch->q.qlen;
|
||||
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &sch->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -236,7 +236,6 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
sch->q.qlen += qdisc->q.qlen;
|
||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||
sch->bstats.packets += qdisc->bstats.packets;
|
||||
sch->qstats.qlen += qdisc->qstats.qlen;
|
||||
sch->qstats.backlog += qdisc->qstats.backlog;
|
||||
sch->qstats.drops += qdisc->qstats.drops;
|
||||
sch->qstats.requeues += qdisc->qstats.requeues;
|
||||
|
@ -327,6 +326,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
if (cl <= netdev_get_num_tc(dev)) {
|
||||
int i;
|
||||
__u32 qlen = 0;
|
||||
struct Qdisc *qdisc;
|
||||
struct gnet_stats_queue qstats = {0};
|
||||
struct gnet_stats_basic_packed bstats = {0};
|
||||
|
@ -344,9 +344,9 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
qdisc = rtnl_dereference(q->qdisc);
|
||||
spin_lock_bh(qdisc_lock(qdisc));
|
||||
qlen += qdisc->q.qlen;
|
||||
bstats.bytes += qdisc->bstats.bytes;
|
||||
bstats.packets += qdisc->bstats.packets;
|
||||
qstats.qlen += qdisc->qstats.qlen;
|
||||
qstats.backlog += qdisc->qstats.backlog;
|
||||
qstats.drops += qdisc->qstats.drops;
|
||||
qstats.requeues += qdisc->qstats.requeues;
|
||||
|
@ -356,15 +356,14 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
/* Reclaim root sleeping lock before completing stats */
|
||||
spin_lock_bh(d->lock);
|
||||
if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &qstats, qlen) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
|
||||
|
||||
sch = dev_queue->qdisc_sleeping;
|
||||
sch->qstats.qlen = sch->q.qlen;
|
||||
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &sch->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &sch->qstats, sch->q.qlen) < 0)
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -360,9 +360,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
struct Qdisc *cl_q;
|
||||
|
||||
cl_q = q->queues[cl - 1];
|
||||
cl_q->qstats.qlen = cl_q->q.qlen;
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -324,9 +324,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
struct Qdisc *cl_q;
|
||||
|
||||
cl_q = q->queues[cl - 1];
|
||||
cl_q->qstats.qlen = cl_q->q.qlen;
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl_q->qstats, cl_q->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -664,14 +664,13 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
|
|||
struct tc_qfq_stats xstats;
|
||||
|
||||
memset(&xstats, 0, sizeof(xstats));
|
||||
cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
|
||||
|
||||
xstats.weight = cl->agg->class_weight;
|
||||
xstats.lmax = cl->agg->lmax;
|
||||
|
||||
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
|
||||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
|
||||
gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
|
||||
gnet_stats_copy_queue(d, &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
|
||||
return -1;
|
||||
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
|
|
|
@ -871,7 +871,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
qs.qlen = slot->qlen;
|
||||
qs.backlog = slot->backlog;
|
||||
}
|
||||
if (gnet_stats_copy_queue(d, &qs) < 0)
|
||||
if (gnet_stats_copy_queue(d, &qs, qs.qlen) < 0)
|
||||
return -1;
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue