Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-10-28

This series contains updates to i40e and i40evf only.

Carolyn provides a couple of fixes, first resolving a problem in the
client interface that was causing random stack traces in the RDMA driver
which was due to a timing related NULL pointer dereference.  Fixed a
problem where it could take a very long time to print the link down
notification, by changing how often we update link info from firmware.

Alex provides a number of changes, first is a re-write of the bust wait
loop in the Flow Director transmit function to reduce code size.  Cleans
up unused code in favor of the same functionality which can be inlined.
Dropped the functionality for SCTP since we cannot currently support it.
Cleans up redundant code in the receive clean-up path.  Finally cleaned
up the convoluted configuration for how the driver handled the debug
flags contained in msg_level.

Filip fixes an incorrect bit mask which was being used for testing the
"get link status".  Cleaned up a workaround that is no longer needed
for production NICs and was causing frames to pass while disregarding
the VLAN tagging.

Mitch brings another fix for the client interface supporting the VF RDMA
driver to allow clients to recover from reset by re-opening existing
clients.

Alan fixes a bug in which a "perfect storm" can occur and cause interrupts
to fail to be correctly affinitized.

Lihong fixes a confusing dmesg reported when users were using ethtool -L
option.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-10-29 16:19:41 -04:00
commit 6ce40fc541
13 changed files with 232 additions and 261 deletions

View file

@ -607,6 +607,8 @@ struct i40e_q_vector {
unsigned long hung_detected; /* Set/Reset for hung_detection logic */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
@ -728,8 +730,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,

View file

@ -287,6 +287,7 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
}
cdev->client->ops->close(&cdev->lan_info, cdev->client,
reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
}
}
@ -405,37 +406,6 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
return capable;
}
/**
* i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
* @pf: board private structure
* @type: vsi type
* @start_vsi: a VSI pointer from where to start the search
*
* Returns non NULL on success or NULL for failure
**/
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
enum i40e_vsi_type type,
struct i40e_vsi *start_vsi)
{
struct i40e_vsi *vsi;
int i = 0;
if (start_vsi) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi == start_vsi)
break;
}
}
for (; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi && vsi->type == type)
return vsi;
}
return NULL;
}
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
@ -565,7 +535,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
} else {
dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
client->name);
}
@ -575,29 +545,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
continue;
if (!existing) {
/* Also up the ref_cnt for no. of instances of this
* client.
*/
atomic_inc(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
client->name, pf->hw.pf_id,
pf->hw.bus.device, pf->hw.bus.func);
}
mutex_lock(&i40e_client_instance_mutex);
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
if (client->ops && client->ops->open)
ret = client->ops->open(&cdev->lan_info, client);
atomic_dec(&cdev->ref_cnt);
if (!ret) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
} else {
/* remove client instance */
mutex_unlock(&i40e_client_instance_mutex);
i40e_client_del_instance(pf, client);
atomic_dec(&client->ref_cnt);
continue;
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state)) {
/* Send an Open request to the client */
if (client->ops && client->ops->open)
ret = client->ops->open(&cdev->lan_info,
client);
if (!ret) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
} else {
/* remove client instance */
i40e_client_del_instance(pf, client);
}
}
mutex_unlock(&i40e_client_instance_mutex);
}
@ -694,10 +660,6 @@ static int i40e_client_release(struct i40e_client *client)
continue;
pf = (struct i40e_pf *)cdev->lan_info.pf;
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (atomic_read(&cdev->ref_cnt) > 0) {
ret = I40E_ERR_NOT_READY;
goto out;
}
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
false);
@ -710,11 +672,9 @@ static int i40e_client_release(struct i40e_client *client)
}
/* delete the client instance from the list */
list_move(&cdev->list, &cdevs_tmp);
atomic_dec(&client->ref_cnt);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
}
out:
mutex_unlock(&i40e_client_instance_mutex);
/* free the client device and release its vsi */
@ -1040,17 +1000,10 @@ int i40e_unregister_client(struct i40e_client *client)
ret = -ENODEV;
goto out;
}
if (atomic_read(&client->ref_cnt) == 0) {
clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
list_del(&client->list);
pr_info("i40e: Unregistered client %s with return code %d\n",
client->name, ret);
} else {
ret = I40E_ERR_NOT_READY;
pr_err("i40e: Client %s failed unregister - client has open instances\n",
client->name);
}
clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
list_del(&client->list);
pr_info("i40e: Unregistered client %s with return code %d\n",
client->name, ret);
out:
mutex_unlock(&i40e_client_mutex);
return ret;

View file

@ -203,8 +203,6 @@ struct i40e_client_instance {
struct i40e_info lan_info;
struct i40e_client *client;
unsigned long state;
/* A count of all the in-progress calls to the client */
atomic_t ref_cnt;
};
struct i40e_client {

View file

@ -1849,7 +1849,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
@ -2494,7 +2494,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
/* extra checking needed to ensure link info to user is timely */
if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
!(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)

View file

@ -1210,24 +1210,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"dump debug fwdata <cluster_id> <table_id> <index>\n");
}
} else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
u32 level;
cnt = sscanf(&cmd_buf[10], "%i", &level);
if (cnt) {
if (I40E_DEBUG_USER & level) {
pf->hw.debug_mask = level;
dev_info(&pf->pdev->dev,
"set hw.debug_mask = 0x%08x\n",
pf->hw.debug_mask);
}
pf->msg_enable = level;
dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
pf->msg_enable);
} else {
dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
pf->msg_enable);
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
@ -1644,7 +1626,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump desc aq\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
dev_info(&pf->pdev->dev, " msg_enable [level]\n");
dev_info(&pf->pdev->dev, " read <reg>\n");
dev_info(&pf->pdev->dev, " write <reg> <value>\n");
dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");

View file

@ -104,7 +104,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev, not on the VMDq or FCoE netdev.
*/
static struct i40e_stats i40e_gstrings_stats[] = {
static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
@ -978,6 +978,10 @@ static u32 i40e_get_msglevel(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
u32 debug_mask = pf->hw.debug_mask;
if (debug_mask)
netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
return pf->msg_enable;
}
@ -989,7 +993,8 @@ static void i40e_set_msglevel(struct net_device *netdev, u32 data)
if (I40E_DEBUG_USER & data)
pf->hw.debug_mask = data;
pf->msg_enable = data;
else
pf->msg_enable = data;
}
static int i40e_get_regs_len(struct net_device *netdev)

View file

@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
#define DRV_VERSION_BUILD 16
#define DRV_VERSION_BUILD 21
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@ -93,8 +93,8 @@ MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
#define I40E_MAX_VF_COUNT 128
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
@ -1286,39 +1286,6 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
return -ENOENT;
}
/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
*
* Remove whatever filter the firmware set up so the driver can manage
* its own filtering intelligently.
**/
static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
return;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
/* Ignore error returns, some firmware does it this way... */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
@ -3316,6 +3283,33 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED;
}
/**
* i40e_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
**/
static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct i40e_q_vector *q_vector =
container_of(notify, struct i40e_q_vector, affinity_notify);
q_vector->affinity_mask = *mask;
}
/**
* i40e_irq_affinity_release - Callback for affinity notifier release
* @ref: internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications.
**/
static void i40e_irq_affinity_release(struct kref *ref) {}
/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
@ -3331,10 +3325,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
int irq_num;
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
irq_num = pf->msix_entries[base + vector].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
@ -3349,7 +3346,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
err = request_irq(pf->msix_entries[base + vector].vector,
err = request_irq(irq_num,
vsi->irq_handler,
0,
q_vector->name,
@ -3359,9 +3356,13 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
"MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* register for affinity change notifications */
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
&q_vector->affinity_mask);
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
vsi->irqs_ready = true;
@ -3370,10 +3371,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
free_queue_irqs:
while (vector) {
vector--;
irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
NULL);
free_irq(pf->msix_entries[base + vector].vector,
&(vsi->q_vectors[vector]));
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
}
@ -4012,19 +4013,23 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
int irq_num;
u16 vector;
vector = i + base;
irq_num = pf->msix_entries[vector].vector;
/* free only the irqs that were actually requested */
if (!vsi->q_vectors[i] ||
!vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
synchronize_irq(pf->msix_entries[vector].vector);
free_irq(pf->msix_entries[vector].vector,
vsi->q_vectors[i]);
irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@ -8362,8 +8367,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
i40e_pf_config_rss(pf);
}
dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
pf->alloc_rss_size, pf->rss_size_max);
dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
vsi->req_queue_pairs, pf->rss_size_max);
return pf->alloc_rss_size;
}
@ -8506,15 +8511,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
int err = 0;
int size;
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
I40E_DEFAULT_MSG_ENABLE);
}
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
@ -9180,12 +9176,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
/* The following steps are necessary to prevent reception
* of tagged packets - some older NVM configurations load a
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
spin_unlock_bh(&vsi->mac_filter_list_lock);
@ -9703,8 +9693,6 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
if (vsi->type == I40E_VSI_MAIN)
i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */
ret = i40e_alloc_rings(vsi);
@ -10828,10 +10816,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
if (debug != -1) {
pf->msg_enable = pf->hw.debug_mask;
pf->msg_enable = debug;
}
pf->msg_enable = netif_msg_init(debug,
NETIF_MSG_DRV |
NETIF_MSG_PROBE |
NETIF_MSG_LINK);
if (debug < -1)
pf->hw.debug_mask = debug;
/* do a special CORER for clearing PXE mode once at init */
if (hw->revision_id == 0 &&
@ -10973,7 +10963,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */

View file

@ -122,7 +122,6 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
struct device *dev;
dma_addr_t dma;
u32 td_cmd = 0;
u16 delay = 0;
u16 i;
/* find existing FDIR VSI */
@ -137,15 +136,11 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
dev = tx_ring->dev;
/* we need two descriptors to add/del a filter and we can wait */
do {
if (I40E_DESC_UNUSED(tx_ring) > 1)
break;
for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
if (!i)
return -EAGAIN;
msleep_interruptible(1);
delay++;
} while (delay < I40E_FD_CLEAN_DELAY);
if (!(I40E_DESC_UNUSED(tx_ring) > 1))
return -EAGAIN;
}
dma = dma_map_single(dev, raw_packet,
I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
@ -335,22 +330,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
return err ? -EOPNOTSUPP : 0;
}
/**
* i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
* a specific flow spec
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
*
* Returns 0 if the filters were successfully added or removed
**/
static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
struct i40e_fdir_filter *fd_data,
bool add)
{
return -EOPNOTSUPP;
}
#define I40E_IP_DUMMY_PACKET_LEN 34
/**
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
@ -433,12 +412,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case UDP_V4_FLOW:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
case SCTP_V4_FLOW:
ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
break;
case IPV4_FLOW:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
case IP_USER_FLOW:
switch (input->ip4_proto) {
case IPPROTO_TCP:
@ -447,15 +420,16 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
case IPPROTO_UDP:
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
break;
case IPPROTO_SCTP:
ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
break;
default:
case IPPROTO_IP:
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
break;
default:
/* We cannot support masking based on protocol */
goto unsupported_flow;
}
break;
default:
unsupported_flow:
dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
input->flow_type);
ret = -EINVAL;
@ -1246,7 +1220,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@ -1767,7 +1740,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@ -1781,21 +1753,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
if (!rx_desc->wb.qword1.status_error_len)
if (!i40e_test_staterr(rx_desc,
BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@ -1829,6 +1793,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
/* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@ -2025,12 +1993,25 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
const cpumask_t *aff_mask = &q_vector->affinity_mask;
int cpu_id = smp_processor_id();
/* It is possible that the interrupt affinity has changed but,
* if the cpu is pegged at 100%, polling will never exit while
* traffic continues and the interrupt will be stuck on this
* cpu. We check to make sure affinity is correct before we
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
tx_only:
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector);
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector);
}
return budget;
}
return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
@ -2038,11 +2019,18 @@ tx_only:
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
i40e_update_enable_itr(vsi, q_vector);
} else { /* Legacy mode */
/* If we're prematurely stopping polling to fix the interrupt
* affinity we want to make sure polling starts back up so we
* issue a call to i40e_force_wb which triggers a SW interrupt.
*/
if (!clean_complete)
i40e_force_wb(vsi, q_vector);
else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
i40e_irq_dynamic_enable_icr0(vsi->back, false);
}
else
i40e_update_enable_itr(vsi, q_vector);
return 0;
}

View file

@ -165,6 +165,10 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;

View file

@ -705,7 +705,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@ -1209,7 +1208,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@ -1223,21 +1221,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
if (!rx_desc->wb.qword1.status_error_len)
if (!i40e_test_staterr(rx_desc,
BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@ -1271,6 +1261,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
/* populate checksum, VLAN, and protocol */
i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@ -1461,12 +1455,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
const cpumask_t *aff_mask = &q_vector->affinity_mask;
int cpu_id = smp_processor_id();
/* It is possible that the interrupt affinity has changed but,
* if the cpu is pegged at 100%, polling will never exit while
* traffic continues and the interrupt will be stuck on this
* cpu. We check to make sure affinity is correct before we
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
if (likely(cpumask_test_cpu(cpu_id, aff_mask))) {
tx_only:
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector);
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_enable_wb_on_itr(vsi, q_vector);
}
return budget;
}
return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
@ -1474,7 +1480,16 @@ tx_only:
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
i40e_update_enable_itr(vsi, q_vector);
/* If we're prematurely stopping polling to fix the interrupt
* affinity we want to make sure polling starts back up so we
* issue a call to i40evf_force_wb which triggers a SW interrupt.
*/
if (!clean_complete)
i40evf_force_wb(vsi, q_vector);
else
i40e_update_enable_itr(vsi, q_vector);
return 0;
}

View file

@ -162,6 +162,10 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;

View file

@ -107,7 +107,8 @@ struct i40e_q_vector {
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
cpumask_var_t affinity_mask;
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.

View file

@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
#define DRV_VERSION_BUILD 16
#define DRV_VERSION_BUILD 21
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@ -495,6 +495,33 @@ static void i40evf_netpoll(struct net_device *netdev)
}
#endif
/**
* i40evf_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
**/
static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct i40e_q_vector *q_vector =
container_of(notify, struct i40e_q_vector, affinity_notify);
q_vector->affinity_mask = *mask;
}
/**
* i40evf_irq_affinity_release - Callback for affinity notifier release
* @ref: internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications.
**/
static void i40evf_irq_affinity_release(struct kref *ref) {}
/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@ -507,6 +534,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
{
int vector, err, q_vectors;
int rx_int_idx = 0, tx_int_idx = 0;
int irq_num;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@ -514,6 +542,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@ -532,21 +561,23 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
/* skip this unused q_vector */
continue;
}
err = request_irq(
adapter->msix_entries[vector + NONQ_VECS].vector,
i40evf_msix_clean_rings,
0,
q_vector->name,
q_vector);
err = request_irq(irq_num,
i40evf_msix_clean_rings,
0,
q_vector->name,
q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* register for affinity change notifications */
q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
q_vector->affinity_notify.release =
i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
irq_set_affinity_hint(
adapter->msix_entries[vector + NONQ_VECS].vector,
q_vector->affinity_mask);
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
return 0;
@ -554,11 +585,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
free_queue_irqs:
while (vector) {
vector--;
irq_set_affinity_hint(
adapter->msix_entries[vector + NONQ_VECS].vector,
NULL);
free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
&adapter->q_vectors[vector]);
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
}
@ -599,16 +629,15 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
**/
static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
{
int i;
int q_vectors;
int vector, irq_num, q_vectors;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (i = 0; i < q_vectors; i++) {
irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
NULL);
free_irq(adapter->msix_entries[i+1].vector,
&adapter->q_vectors[i]);
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}