igb: move alloc_failed and csum_err stats into per rx-ring stat
The allocation failed and checksum error stats are currently kept as a global stat. If we end up allocating the queues to multiple netdevs then the global counter doesn't make much sense. For this reason I felt it necessary to move the alloc_rx_buff_failed stat into the rx_stats portion of the rx_ring. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4c844851d1
commit
04a5fcaaf0
3 changed files with 17 additions and 15 deletions
|
@ -145,12 +145,15 @@ struct igb_buffer {
|
||||||
struct igb_tx_queue_stats {
|
struct igb_tx_queue_stats {
|
||||||
u64 packets;
|
u64 packets;
|
||||||
u64 bytes;
|
u64 bytes;
|
||||||
|
u64 restart_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct igb_rx_queue_stats {
|
struct igb_rx_queue_stats {
|
||||||
u64 packets;
|
u64 packets;
|
||||||
u64 bytes;
|
u64 bytes;
|
||||||
u64 drops;
|
u64 drops;
|
||||||
|
u64 csum_err;
|
||||||
|
u64 alloc_failed;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct igb_q_vector {
|
struct igb_q_vector {
|
||||||
|
@ -241,7 +244,6 @@ struct igb_adapter {
|
||||||
|
|
||||||
/* TX */
|
/* TX */
|
||||||
struct igb_ring *tx_ring; /* One per active queue */
|
struct igb_ring *tx_ring; /* One per active queue */
|
||||||
unsigned int restart_queue;
|
|
||||||
unsigned long tx_queue_len;
|
unsigned long tx_queue_len;
|
||||||
u32 txd_cmd;
|
u32 txd_cmd;
|
||||||
u32 gotc;
|
u32 gotc;
|
||||||
|
@ -255,8 +257,6 @@ struct igb_adapter {
|
||||||
int num_tx_queues;
|
int num_tx_queues;
|
||||||
int num_rx_queues;
|
int num_rx_queues;
|
||||||
|
|
||||||
u64 hw_csum_err;
|
|
||||||
u32 alloc_rx_buff_failed;
|
|
||||||
u32 gorc;
|
u32 gorc;
|
||||||
u64 gorc_old;
|
u64 gorc_old;
|
||||||
u32 max_frame_size;
|
u32 max_frame_size;
|
||||||
|
|
|
@ -84,7 +84,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
|
||||||
{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
|
{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
|
||||||
{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
|
{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
|
||||||
{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
|
{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
|
||||||
{ "tx_restart_queue", IGB_STAT(restart_queue) },
|
|
||||||
{ "rx_long_length_errors", IGB_STAT(stats.roc) },
|
{ "rx_long_length_errors", IGB_STAT(stats.roc) },
|
||||||
{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
|
{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
|
||||||
{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
|
{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
|
||||||
|
@ -95,9 +94,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
|
||||||
{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
|
{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
|
||||||
{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
|
{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
|
||||||
{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
|
{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
|
||||||
{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
|
|
||||||
{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
|
{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
|
||||||
{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
|
|
||||||
{ "tx_smbus", IGB_STAT(stats.mgptc) },
|
{ "tx_smbus", IGB_STAT(stats.mgptc) },
|
||||||
{ "rx_smbus", IGB_STAT(stats.mgprc) },
|
{ "rx_smbus", IGB_STAT(stats.mgprc) },
|
||||||
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
|
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
|
||||||
|
@ -2031,6 +2028,8 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||||
p += ETH_GSTRING_LEN;
|
p += ETH_GSTRING_LEN;
|
||||||
sprintf(p, "tx_queue_%u_bytes", i);
|
sprintf(p, "tx_queue_%u_bytes", i);
|
||||||
p += ETH_GSTRING_LEN;
|
p += ETH_GSTRING_LEN;
|
||||||
|
sprintf(p, "tx_queue_%u_restart", i);
|
||||||
|
p += ETH_GSTRING_LEN;
|
||||||
}
|
}
|
||||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||||
sprintf(p, "rx_queue_%u_packets", i);
|
sprintf(p, "rx_queue_%u_packets", i);
|
||||||
|
@ -2039,6 +2038,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||||
p += ETH_GSTRING_LEN;
|
p += ETH_GSTRING_LEN;
|
||||||
sprintf(p, "rx_queue_%u_drops", i);
|
sprintf(p, "rx_queue_%u_drops", i);
|
||||||
p += ETH_GSTRING_LEN;
|
p += ETH_GSTRING_LEN;
|
||||||
|
sprintf(p, "rx_queue_%u_csum_err", i);
|
||||||
|
p += ETH_GSTRING_LEN;
|
||||||
|
sprintf(p, "rx_queue_%u_alloc_failed", i);
|
||||||
|
p += ETH_GSTRING_LEN;
|
||||||
}
|
}
|
||||||
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
|
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -3562,8 +3562,6 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
|
||||||
static int __igb_maybe_stop_tx(struct net_device *netdev,
|
static int __igb_maybe_stop_tx(struct net_device *netdev,
|
||||||
struct igb_ring *tx_ring, int size)
|
struct igb_ring *tx_ring, int size)
|
||||||
{
|
{
|
||||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
|
||||||
|
|
||||||
netif_stop_subqueue(netdev, tx_ring->queue_index);
|
netif_stop_subqueue(netdev, tx_ring->queue_index);
|
||||||
|
|
||||||
/* Herbert's original patch had:
|
/* Herbert's original patch had:
|
||||||
|
@ -3578,7 +3576,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
|
||||||
|
|
||||||
/* A reprieve! */
|
/* A reprieve! */
|
||||||
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
||||||
++adapter->restart_queue;
|
tx_ring->tx_stats.restart_queue++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4734,7 +4732,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
|
||||||
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
|
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
|
||||||
!(test_bit(__IGB_DOWN, &adapter->state))) {
|
!(test_bit(__IGB_DOWN, &adapter->state))) {
|
||||||
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
netif_wake_subqueue(netdev, tx_ring->queue_index);
|
||||||
++adapter->restart_queue;
|
tx_ring->tx_stats.restart_queue++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4801,7 +4799,8 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
|
||||||
napi_gro_receive(&q_vector->napi, skb);
|
napi_gro_receive(&q_vector->napi, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
|
static inline void igb_rx_checksum_adv(struct igb_ring *ring,
|
||||||
|
struct igb_adapter *adapter,
|
||||||
u32 status_err, struct sk_buff *skb)
|
u32 status_err, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
skb->ip_summed = CHECKSUM_NONE;
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
@ -4820,7 +4819,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
|
||||||
*/
|
*/
|
||||||
if (!((adapter->hw.mac.type == e1000_82576) &&
|
if (!((adapter->hw.mac.type == e1000_82576) &&
|
||||||
(skb->len == 60)))
|
(skb->len == 60)))
|
||||||
adapter->hw_csum_err++;
|
ring->rx_stats.csum_err++;
|
||||||
/* let the stack verify checksum errors */
|
/* let the stack verify checksum errors */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -4979,7 +4978,7 @@ send_up:
|
||||||
total_bytes += skb->len;
|
total_bytes += skb->len;
|
||||||
total_packets++;
|
total_packets++;
|
||||||
|
|
||||||
igb_rx_checksum_adv(adapter, staterr, skb);
|
igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
|
||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
skb_record_rx_queue(skb, rx_ring->queue_index);
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
||||||
|
@ -5046,7 +5045,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
|
||||||
if (!buffer_info->page) {
|
if (!buffer_info->page) {
|
||||||
buffer_info->page = alloc_page(GFP_ATOMIC);
|
buffer_info->page = alloc_page(GFP_ATOMIC);
|
||||||
if (!buffer_info->page) {
|
if (!buffer_info->page) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
rx_ring->rx_stats.alloc_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
}
|
}
|
||||||
buffer_info->page_offset = 0;
|
buffer_info->page_offset = 0;
|
||||||
|
@ -5063,7 +5062,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
|
||||||
if (!buffer_info->skb) {
|
if (!buffer_info->skb) {
|
||||||
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
rx_ring->rx_stats.alloc_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue