Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jerr Kirsher says: ==================== This series contains updates to ixgbe. ... Alexander Duyck (9): ixgbe: Use VMDq offset to indicate the default pool ixgbe: Fix memory leak when SR-IOV VFs are direct assigned ixgbe: Drop references to deprecated pci_ DMA api and instead use dma_ API ixgbe: Cleanup configuration of FCoE registers ixgbe: Merge all FCoE percpu values into a single structure ixgbe: Make FCoE allocation and configuration closer to how rings work ixgbe: Correctly set SAN MAC RAR pool to default pool of PF ixgbe: Only enable anti-spoof on VF pools ixgbe: Enable FCoE FSO and CRC offloads based on CAPABLE instead of ENABLED flag ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e4bce0f288
10 changed files with 348 additions and 252 deletions
|
@ -113,7 +113,7 @@
|
||||||
#define IXGBE_MAX_VFTA_ENTRIES 128
|
#define IXGBE_MAX_VFTA_ENTRIES 128
|
||||||
#define MAX_EMULATION_MAC_ADDRS 16
|
#define MAX_EMULATION_MAC_ADDRS 16
|
||||||
#define IXGBE_MAX_PF_MACVLANS 15
|
#define IXGBE_MAX_PF_MACVLANS 15
|
||||||
#define VMDQ_P(p) ((p) + adapter->num_vfs)
|
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
|
||||||
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
|
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
|
||||||
#define IXGBE_X540_VF_DEVICE_ID 0x1515
|
#define IXGBE_X540_VF_DEVICE_ID 0x1515
|
||||||
|
|
||||||
|
@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
|
||||||
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
|
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
|
||||||
struct ixgbe_tx_buffer *first,
|
struct ixgbe_tx_buffer *first,
|
||||||
u8 *hdr_len);
|
u8 *hdr_len);
|
||||||
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
|
|
||||||
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
||||||
union ixgbe_adv_rx_desc *rx_desc,
|
union ixgbe_adv_rx_desc *rx_desc,
|
||||||
struct sk_buff *skb);
|
struct sk_buff *skb);
|
||||||
|
@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
|
||||||
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
|
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
|
||||||
struct scatterlist *sgl, unsigned int sgc);
|
struct scatterlist *sgl, unsigned int sgc);
|
||||||
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
|
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
|
||||||
|
extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
|
||||||
|
extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
|
||||||
extern int ixgbe_fcoe_enable(struct net_device *netdev);
|
extern int ixgbe_fcoe_enable(struct net_device *netdev);
|
||||||
extern int ixgbe_fcoe_disable(struct net_device *netdev);
|
extern int ixgbe_fcoe_disable(struct net_device *netdev);
|
||||||
#ifdef CONFIG_IXGBE_DCB
|
#ifdef CONFIG_IXGBE_DCB
|
||||||
|
|
|
@ -1025,6 +1025,9 @@ mac_reset_top:
|
||||||
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
|
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
|
||||||
hw->mac.san_addr, 0, IXGBE_RAH_AV);
|
hw->mac.san_addr, 0, IXGBE_RAH_AV);
|
||||||
|
|
||||||
|
/* Save the SAN MAC RAR index */
|
||||||
|
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
|
||||||
|
|
||||||
/* Reserve the last RAR for the SAN MAC address */
|
/* Reserve the last RAR for the SAN MAC address */
|
||||||
hw->mac.num_rar_entries--;
|
hw->mac.num_rar_entries--;
|
||||||
}
|
}
|
||||||
|
@ -2106,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
|
||||||
.set_rar = &ixgbe_set_rar_generic,
|
.set_rar = &ixgbe_set_rar_generic,
|
||||||
.clear_rar = &ixgbe_clear_rar_generic,
|
.clear_rar = &ixgbe_clear_rar_generic,
|
||||||
.set_vmdq = &ixgbe_set_vmdq_generic,
|
.set_vmdq = &ixgbe_set_vmdq_generic,
|
||||||
|
.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
|
||||||
.clear_vmdq = &ixgbe_clear_vmdq_generic,
|
.clear_vmdq = &ixgbe_clear_vmdq_generic,
|
||||||
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
|
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
|
||||||
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
|
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
|
||||||
|
|
|
@ -2847,6 +2847,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function should only be involved in the IOV mode.
|
||||||
|
* In IOV mode, Default pool is next pool after the number of
|
||||||
|
* VFs advertized and not 0.
|
||||||
|
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
|
||||||
|
*
|
||||||
|
* ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
|
||||||
|
* @hw: pointer to hardware struct
|
||||||
|
* @vmdq: VMDq pool index
|
||||||
|
**/
|
||||||
|
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
|
||||||
|
{
|
||||||
|
u32 rar = hw->mac.san_mac_rar_index;
|
||||||
|
|
||||||
|
if (vmdq < 32) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
|
||||||
|
} else {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
|
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
|
||||||
* @hw: pointer to hardware structure
|
* @hw: pointer to hardware structure
|
||||||
|
@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
|
||||||
* PFVFSPOOF register array is size 8 with 8 bits assigned to
|
* PFVFSPOOF register array is size 8 with 8 bits assigned to
|
||||||
* MAC anti-spoof enables in each register array element.
|
* MAC anti-spoof enables in each register array element.
|
||||||
*/
|
*/
|
||||||
for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
|
for (j = 0; j < pf_target_reg; j++)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
|
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
|
||||||
|
|
||||||
/* If not enabling anti-spoofing then done */
|
|
||||||
if (!enable)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The PF should be allowed to spoof so that it can support
|
* The PF should be allowed to spoof so that it can support
|
||||||
* emulation mode NICs. Reset the bit assigned to the PF
|
* emulation mode NICs. Do not set the bits assigned to the PF
|
||||||
*/
|
*/
|
||||||
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
|
pfvfspoof &= (1 << pf_target_shift) - 1;
|
||||||
pfvfspoof ^= (1 << pf_target_shift);
|
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
|
|
||||||
|
/*
|
||||||
|
* Remaining pools belong to the PF so they do not need to have
|
||||||
|
* anti-spoofing enabled.
|
||||||
|
*/
|
||||||
|
for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
|
||||||
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
|
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
|
||||||
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
||||||
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||||
|
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
|
||||||
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||||
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
|
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
|
||||||
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
|
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
|
||||||
|
|
|
@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
|
||||||
udelay(100);
|
udelay(100);
|
||||||
}
|
}
|
||||||
if (ddp->sgl)
|
if (ddp->sgl)
|
||||||
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
|
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (ddp->pool) {
|
if (ddp->pool) {
|
||||||
pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
|
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
|
||||||
ddp->pool = NULL;
|
ddp->pool = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
struct ixgbe_hw *hw;
|
struct ixgbe_hw *hw;
|
||||||
struct ixgbe_fcoe *fcoe;
|
struct ixgbe_fcoe *fcoe;
|
||||||
struct ixgbe_fcoe_ddp *ddp;
|
struct ixgbe_fcoe_ddp *ddp;
|
||||||
|
struct ixgbe_fcoe_ddp_pool *ddp_pool;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
unsigned int i, j, dmacount;
|
unsigned int i, j, dmacount;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
|
@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
unsigned int thislen = 0;
|
unsigned int thislen = 0;
|
||||||
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
|
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
|
||||||
dma_addr_t addr = 0;
|
dma_addr_t addr = 0;
|
||||||
struct pci_pool *pool;
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
if (!netdev || !sgl)
|
if (!netdev || !sgl)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fcoe = &adapter->fcoe;
|
fcoe = &adapter->fcoe;
|
||||||
if (!fcoe->pool) {
|
|
||||||
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ddp = &fcoe->ddp[xid];
|
ddp = &fcoe->ddp[xid];
|
||||||
if (ddp->sgl) {
|
if (ddp->sgl) {
|
||||||
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
|
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
|
||||||
|
@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
}
|
}
|
||||||
ixgbe_fcoe_clear_ddp(ddp);
|
ixgbe_fcoe_clear_ddp(ddp);
|
||||||
|
|
||||||
/* setup dma from scsi command sgl */
|
|
||||||
dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
|
if (!fcoe->ddp_pool) {
|
||||||
if (dmacount == 0) {
|
e_warn(drv, "No ddp_pool resources allocated\n");
|
||||||
e_err(drv, "xid 0x%x DMA map error\n", xid);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
|
||||||
|
if (!ddp_pool->pool) {
|
||||||
|
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
|
||||||
|
goto out_noddp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* setup dma from scsi command sgl */
|
||||||
|
dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
|
||||||
|
if (dmacount == 0) {
|
||||||
|
e_err(drv, "xid 0x%x DMA map error\n", xid);
|
||||||
|
goto out_noddp;
|
||||||
|
}
|
||||||
|
|
||||||
/* alloc the udl from per cpu ddp pool */
|
/* alloc the udl from per cpu ddp pool */
|
||||||
cpu = get_cpu();
|
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
|
||||||
pool = *per_cpu_ptr(fcoe->pool, cpu);
|
|
||||||
ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
|
|
||||||
if (!ddp->udl) {
|
if (!ddp->udl) {
|
||||||
e_err(drv, "failed allocated ddp context\n");
|
e_err(drv, "failed allocated ddp context\n");
|
||||||
goto out_noddp_unmap;
|
goto out_noddp_unmap;
|
||||||
}
|
}
|
||||||
ddp->pool = pool;
|
ddp->pool = ddp_pool->pool;
|
||||||
ddp->sgl = sgl;
|
ddp->sgl = sgl;
|
||||||
ddp->sgc = sgc;
|
ddp->sgc = sgc;
|
||||||
|
|
||||||
|
@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
while (len) {
|
while (len) {
|
||||||
/* max number of buffers allowed in one DDP context */
|
/* max number of buffers allowed in one DDP context */
|
||||||
if (j >= IXGBE_BUFFCNT_MAX) {
|
if (j >= IXGBE_BUFFCNT_MAX) {
|
||||||
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
|
ddp_pool->noddp++;
|
||||||
goto out_noddp_free;
|
goto out_noddp_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
*/
|
*/
|
||||||
if (lastsize == bufflen) {
|
if (lastsize == bufflen) {
|
||||||
if (j >= IXGBE_BUFFCNT_MAX) {
|
if (j >= IXGBE_BUFFCNT_MAX) {
|
||||||
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
|
ddp_pool->noddp_ext_buff++;
|
||||||
goto out_noddp_free;
|
goto out_noddp_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
out_noddp_free:
|
out_noddp_free:
|
||||||
pci_pool_free(pool, ddp->udl, ddp->udp);
|
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
|
||||||
ixgbe_fcoe_clear_ddp(ddp);
|
ixgbe_fcoe_clear_ddp(ddp);
|
||||||
|
|
||||||
out_noddp_unmap:
|
out_noddp_unmap:
|
||||||
pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
|
dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
|
||||||
|
out_noddp:
|
||||||
put_cpu();
|
put_cpu();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
||||||
break;
|
break;
|
||||||
/* unmap the sg list when FCPRSP is received */
|
/* unmap the sg list when FCPRSP is received */
|
||||||
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
|
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
|
||||||
pci_unmap_sg(adapter->pdev, ddp->sgl,
|
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
|
||||||
ddp->sgc, DMA_FROM_DEVICE);
|
ddp->sgc, DMA_FROM_DEVICE);
|
||||||
ddp->err = ddp_err;
|
ddp->err = ddp_err;
|
||||||
ddp->sgl = NULL;
|
ddp->sgl = NULL;
|
||||||
|
@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
|
static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int cpu;
|
struct ixgbe_fcoe_ddp_pool *ddp_pool;
|
||||||
struct pci_pool **pool;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
|
||||||
pool = per_cpu_ptr(fcoe->pool, cpu);
|
if (ddp_pool->pool)
|
||||||
if (*pool)
|
dma_pool_destroy(ddp_pool->pool);
|
||||||
pci_pool_destroy(*pool);
|
ddp_pool->pool = NULL;
|
||||||
}
|
|
||||||
free_percpu(fcoe->pool);
|
|
||||||
fcoe->pool = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
|
static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
|
||||||
|
struct device *dev,
|
||||||
|
unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
struct ixgbe_fcoe_ddp_pool *ddp_pool;
|
||||||
unsigned int cpu;
|
struct dma_pool *pool;
|
||||||
struct pci_pool **pool;
|
|
||||||
char pool_name[32];
|
char pool_name[32];
|
||||||
|
|
||||||
fcoe->pool = alloc_percpu(struct pci_pool *);
|
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
|
||||||
if (!fcoe->pool)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* allocate pci pool for each cpu */
|
pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
|
||||||
for_each_possible_cpu(cpu) {
|
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
|
||||||
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
|
if (!pool)
|
||||||
pool = per_cpu_ptr(fcoe->pool, cpu);
|
return -ENOMEM;
|
||||||
*pool = pci_pool_create(pool_name,
|
|
||||||
adapter->pdev, IXGBE_FCPTR_MAX,
|
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
|
||||||
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
|
ddp_pool->pool = pool;
|
||||||
if (!*pool) {
|
ddp_pool->noddp = 0;
|
||||||
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
|
ddp_pool->noddp_ext_buff = 0;
|
||||||
ixgbe_fcoe_ddp_pools_free(fcoe);
|
|
||||||
return;
|
return 0;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
|
||||||
*/
|
*/
|
||||||
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
|
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
|
||||||
{
|
{
|
||||||
int i, fcoe_q, fcoe_i;
|
struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
int i, fcoe_q, fcoe_i;
|
||||||
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
|
u32 etqf;
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
if (!fcoe->pool) {
|
/* Minimal functionality for FCoE requires at least CRC offloads */
|
||||||
spin_lock_init(&fcoe->lock);
|
if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
|
||||||
|
return;
|
||||||
|
|
||||||
ixgbe_fcoe_ddp_pools_alloc(adapter);
|
/* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
|
||||||
if (!fcoe->pool) {
|
etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
|
||||||
e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
|
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
|
||||||
return;
|
etqf |= IXGBE_ETQF_POOL_ENABLE;
|
||||||
}
|
etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
|
||||||
|
|
||||||
/* Extra buffer to be shared by all DDPs for HW work around */
|
|
||||||
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
|
|
||||||
if (fcoe->extra_ddp_buffer == NULL) {
|
|
||||||
e_err(drv, "failed to allocated extra DDP buffer\n");
|
|
||||||
goto out_ddp_pools;
|
|
||||||
}
|
|
||||||
|
|
||||||
fcoe->extra_ddp_buffer_dma =
|
|
||||||
dma_map_single(&adapter->pdev->dev,
|
|
||||||
fcoe->extra_ddp_buffer,
|
|
||||||
IXGBE_FCBUFF_MIN,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
if (dma_mapping_error(&adapter->pdev->dev,
|
|
||||||
fcoe->extra_ddp_buffer_dma)) {
|
|
||||||
e_err(drv, "failed to map extra DDP buffer\n");
|
|
||||||
goto out_extra_ddp_buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Alloc per cpu mem to count the ddp alloc failure number */
|
|
||||||
fcoe->pcpu_noddp = alloc_percpu(u64);
|
|
||||||
if (!fcoe->pcpu_noddp) {
|
|
||||||
e_err(drv, "failed to alloc noddp counter\n");
|
|
||||||
goto out_pcpu_noddp_alloc_fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
|
|
||||||
if (!fcoe->pcpu_noddp_ext_buff) {
|
|
||||||
e_err(drv, "failed to alloc noddp extra buff cnt\n");
|
|
||||||
goto out_pcpu_noddp_extra_buff_alloc_fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
|
|
||||||
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
|
||||||
|
|
||||||
/* Enable L2 eth type filter for FCoE */
|
/* leave registers un-configured if FCoE is disabled */
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
|
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||||
(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
|
return;
|
||||||
/* Enable L2 eth type filter for FIP */
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
|
/* Use one or more Rx queues for FCoE by redirection table */
|
||||||
(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
|
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
|
||||||
if (adapter->ring_feature[RING_F_FCOE].indices) {
|
fcoe_i = fcoe->offset + (i % fcoe->indices);
|
||||||
/* Use multiple rx queues for FCoE by redirection table */
|
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
|
||||||
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
|
|
||||||
fcoe_i = f->offset + i % f->indices;
|
|
||||||
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
|
|
||||||
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
|
|
||||||
}
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
|
|
||||||
} else {
|
|
||||||
/* Use single rx queue for FCoE */
|
|
||||||
fcoe_i = f->offset;
|
|
||||||
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
|
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
|
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
|
|
||||||
IXGBE_ETQS_QUEUE_EN |
|
|
||||||
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
|
|
||||||
}
|
}
|
||||||
/* send FIP frames to the first FCoE queue */
|
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
|
||||||
fcoe_i = f->offset;
|
|
||||||
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
|
/* Enable L2 EtherType filter for FIP */
|
||||||
|
etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
|
||||||
|
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
|
||||||
|
etqf |= IXGBE_ETQF_POOL_ENABLE;
|
||||||
|
etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
|
||||||
|
}
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
|
||||||
|
|
||||||
|
/* Send FIP frames to the first FCoE queue */
|
||||||
|
fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
|
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
|
||||||
IXGBE_ETQS_QUEUE_EN |
|
IXGBE_ETQS_QUEUE_EN |
|
||||||
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
|
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
|
||||||
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
|
/* Configure FCoE Rx control */
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
|
||||||
|
IXGBE_FCRXCTRL_FCCRCBO |
|
||||||
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
|
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
|
||||||
return;
|
|
||||||
out_pcpu_noddp_extra_buff_alloc_fail:
|
|
||||||
free_percpu(fcoe->pcpu_noddp);
|
|
||||||
out_pcpu_noddp_alloc_fail:
|
|
||||||
dma_unmap_single(&adapter->pdev->dev,
|
|
||||||
fcoe->extra_ddp_buffer_dma,
|
|
||||||
IXGBE_FCBUFF_MIN,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
out_extra_ddp_buffer:
|
|
||||||
kfree(fcoe->extra_ddp_buffer);
|
|
||||||
out_ddp_pools:
|
|
||||||
ixgbe_fcoe_ddp_pools_free(fcoe);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbe_cleanup_fcoe - release all fcoe ddp context resources
|
* ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
|
||||||
* @adapter : ixgbe adapter
|
* @adapter : ixgbe adapter
|
||||||
*
|
*
|
||||||
* Cleans up outstanding ddp context resources
|
* Cleans up outstanding ddp context resources
|
||||||
*
|
*
|
||||||
* Returns : none
|
* Returns : none
|
||||||
*/
|
*/
|
||||||
void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
|
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
int cpu, i;
|
||||||
|
|
||||||
if (!fcoe->pool)
|
/* do nothing if no DDP pools were allocated */
|
||||||
|
if (!fcoe->ddp_pool)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
|
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
|
||||||
ixgbe_fcoe_ddp_put(adapter->netdev, i);
|
ixgbe_fcoe_ddp_put(adapter->netdev, i);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
ixgbe_fcoe_dma_pool_free(fcoe, cpu);
|
||||||
|
|
||||||
dma_unmap_single(&adapter->pdev->dev,
|
dma_unmap_single(&adapter->pdev->dev,
|
||||||
fcoe->extra_ddp_buffer_dma,
|
fcoe->extra_ddp_buffer_dma,
|
||||||
IXGBE_FCBUFF_MIN,
|
IXGBE_FCBUFF_MIN,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
free_percpu(fcoe->pcpu_noddp);
|
|
||||||
free_percpu(fcoe->pcpu_noddp_ext_buff);
|
|
||||||
kfree(fcoe->extra_ddp_buffer);
|
kfree(fcoe->extra_ddp_buffer);
|
||||||
ixgbe_fcoe_ddp_pools_free(fcoe);
|
|
||||||
|
fcoe->extra_ddp_buffer = NULL;
|
||||||
|
fcoe->extra_ddp_buffer_dma = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
|
||||||
|
* @adapter: ixgbe adapter
|
||||||
|
*
|
||||||
|
* Sets up ddp context resouces
|
||||||
|
*
|
||||||
|
* Returns : 0 indicates success or -EINVAL on failure
|
||||||
|
*/
|
||||||
|
int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
|
||||||
|
{
|
||||||
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
struct device *dev = &adapter->pdev->dev;
|
||||||
|
void *buffer;
|
||||||
|
dma_addr_t dma;
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
/* do nothing if no DDP pools were allocated */
|
||||||
|
if (!fcoe->ddp_pool)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Extra buffer to be shared by all DDPs for HW work around */
|
||||||
|
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
|
||||||
|
if (!buffer) {
|
||||||
|
e_err(drv, "failed to allocate extra DDP buffer\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
|
||||||
|
if (dma_mapping_error(dev, dma)) {
|
||||||
|
e_err(drv, "failed to map extra DDP buffer\n");
|
||||||
|
kfree(buffer);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
fcoe->extra_ddp_buffer = buffer;
|
||||||
|
fcoe->extra_ddp_buffer_dma = dma;
|
||||||
|
|
||||||
|
/* allocate pci pool for each cpu */
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
|
||||||
|
if (!err)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
|
||||||
|
ixgbe_free_fcoe_ddp_resources(adapter);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
|
||||||
|
{
|
||||||
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
|
||||||
|
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
|
||||||
|
|
||||||
|
if (!fcoe->ddp_pool) {
|
||||||
|
e_err(drv, "failed to allocate percpu DDP resources\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
|
||||||
|
{
|
||||||
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
|
||||||
|
adapter->netdev->fcoe_ddp_xid = 0;
|
||||||
|
|
||||||
|
if (!fcoe->ddp_pool)
|
||||||
|
return;
|
||||||
|
|
||||||
|
free_percpu(fcoe->ddp_pool);
|
||||||
|
fcoe->ddp_pool = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
|
||||||
*/
|
*/
|
||||||
int ixgbe_fcoe_enable(struct net_device *netdev)
|
int ixgbe_fcoe_enable(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
int rc = -EINVAL;
|
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
|
||||||
|
atomic_inc(&fcoe->refcnt);
|
||||||
|
|
||||||
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
|
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
|
||||||
goto out_enable;
|
return -EINVAL;
|
||||||
|
|
||||||
atomic_inc(&fcoe->refcnt);
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
||||||
goto out_enable;
|
return -EINVAL;
|
||||||
|
|
||||||
e_info(drv, "Enabling FCoE offload features.\n");
|
e_info(drv, "Enabling FCoE offload features.\n");
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
netdev->netdev_ops->ndo_stop(netdev);
|
netdev->netdev_ops->ndo_stop(netdev);
|
||||||
|
|
||||||
ixgbe_clear_interrupt_scheme(adapter);
|
/* Allocate per CPU memory to track DDP pools */
|
||||||
|
ixgbe_fcoe_ddp_enable(adapter);
|
||||||
|
|
||||||
|
/* enable FCoE and notify stack */
|
||||||
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
|
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
|
||||||
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
|
|
||||||
netdev->features |= NETIF_F_FCOE_CRC;
|
|
||||||
netdev->features |= NETIF_F_FSO;
|
|
||||||
netdev->features |= NETIF_F_FCOE_MTU;
|
netdev->features |= NETIF_F_FCOE_MTU;
|
||||||
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
|
|
||||||
|
|
||||||
ixgbe_init_interrupt_scheme(adapter);
|
|
||||||
netdev_features_change(netdev);
|
netdev_features_change(netdev);
|
||||||
|
|
||||||
|
/* release existing queues and reallocate them */
|
||||||
|
ixgbe_clear_interrupt_scheme(adapter);
|
||||||
|
ixgbe_init_interrupt_scheme(adapter);
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
netdev->netdev_ops->ndo_open(netdev);
|
netdev->netdev_ops->ndo_open(netdev);
|
||||||
rc = 0;
|
|
||||||
|
|
||||||
out_enable:
|
return 0;
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -797,41 +831,35 @@ out_enable:
|
||||||
*/
|
*/
|
||||||
int ixgbe_fcoe_disable(struct net_device *netdev)
|
int ixgbe_fcoe_disable(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
int rc = -EINVAL;
|
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
|
||||||
|
|
||||||
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
|
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
|
||||||
goto out_disable;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||||
goto out_disable;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&fcoe->refcnt))
|
|
||||||
goto out_disable;
|
|
||||||
|
|
||||||
e_info(drv, "Disabling FCoE offload features.\n");
|
e_info(drv, "Disabling FCoE offload features.\n");
|
||||||
netdev->features &= ~NETIF_F_FCOE_CRC;
|
|
||||||
netdev->features &= ~NETIF_F_FSO;
|
|
||||||
netdev->features &= ~NETIF_F_FCOE_MTU;
|
|
||||||
netdev->fcoe_ddp_xid = 0;
|
|
||||||
netdev_features_change(netdev);
|
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
netdev->netdev_ops->ndo_stop(netdev);
|
netdev->netdev_ops->ndo_stop(netdev);
|
||||||
|
|
||||||
ixgbe_clear_interrupt_scheme(adapter);
|
/* Free per CPU memory to track DDP pools */
|
||||||
|
ixgbe_fcoe_ddp_disable(adapter);
|
||||||
|
|
||||||
|
/* disable FCoE and notify stack */
|
||||||
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
|
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
|
||||||
adapter->ring_feature[RING_F_FCOE].indices = 0;
|
netdev->features &= ~NETIF_F_FCOE_MTU;
|
||||||
ixgbe_cleanup_fcoe(adapter);
|
|
||||||
|
netdev_features_change(netdev);
|
||||||
|
|
||||||
|
/* release existing queues and reallocate them */
|
||||||
|
ixgbe_clear_interrupt_scheme(adapter);
|
||||||
ixgbe_init_interrupt_scheme(adapter);
|
ixgbe_init_interrupt_scheme(adapter);
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
netdev->netdev_ops->ndo_open(netdev);
|
netdev->netdev_ops->ndo_open(netdev);
|
||||||
rc = 0;
|
|
||||||
|
|
||||||
out_disable:
|
return 0;
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
dma_addr_t udp;
|
dma_addr_t udp;
|
||||||
u64 *udl;
|
u64 *udl;
|
||||||
struct pci_pool *pool;
|
struct dma_pool *pool;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* per cpu variables */
|
||||||
|
struct ixgbe_fcoe_ddp_pool {
|
||||||
|
struct dma_pool *pool;
|
||||||
|
u64 noddp;
|
||||||
|
u64 noddp_ext_buff;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ixgbe_fcoe {
|
struct ixgbe_fcoe {
|
||||||
struct pci_pool **pool;
|
struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
|
||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
|
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
|
||||||
unsigned char *extra_ddp_buffer;
|
void *extra_ddp_buffer;
|
||||||
dma_addr_t extra_ddp_buffer_dma;
|
dma_addr_t extra_ddp_buffer_dma;
|
||||||
unsigned long mode;
|
unsigned long mode;
|
||||||
u64 __percpu *pcpu_noddp;
|
|
||||||
u64 __percpu *pcpu_noddp_ext_buff;
|
|
||||||
#ifdef CONFIG_IXGBE_DCB
|
#ifdef CONFIG_IXGBE_DCB
|
||||||
u8 up;
|
u8 up;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3118,7 +3118,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
|
||||||
psrtype |= 1 << 29;
|
psrtype |= 1 << 29;
|
||||||
|
|
||||||
for (p = 0; p < adapter->num_rx_pools; p++)
|
for (p = 0; p < adapter->num_rx_pools; p++)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
|
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
|
||||||
psrtype);
|
psrtype);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3135,12 +3135,12 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
|
||||||
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
|
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
|
||||||
vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
|
vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
|
||||||
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
|
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
|
||||||
vmdctl |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
|
vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
|
||||||
vmdctl |= IXGBE_VT_CTL_REPLEN;
|
vmdctl |= IXGBE_VT_CTL_REPLEN;
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
|
||||||
|
|
||||||
vf_shift = adapter->num_vfs % 32;
|
vf_shift = VMDQ_P(0) % 32;
|
||||||
reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
|
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
|
||||||
|
|
||||||
/* Enable only the PF's pool for Tx/Rx */
|
/* Enable only the PF's pool for Tx/Rx */
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
|
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
|
||||||
|
@ -3150,7 +3150,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
|
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
|
||||||
|
|
||||||
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
|
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
|
||||||
hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
|
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up VF register offsets for selected VT Mode,
|
* Set up VF register offsets for selected VT Mode,
|
||||||
|
@ -3310,10 +3310,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
|
||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
int pool_ndx = adapter->num_vfs;
|
|
||||||
|
|
||||||
/* add VID to filter table */
|
/* add VID to filter table */
|
||||||
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
|
hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
|
||||||
set_bit(vid, adapter->active_vlans);
|
set_bit(vid, adapter->active_vlans);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3323,10 +3322,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
|
||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
int pool_ndx = adapter->num_vfs;
|
|
||||||
|
|
||||||
/* remove VID from filter table */
|
/* remove VID from filter table */
|
||||||
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
|
hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
|
||||||
clear_bit(vid, adapter->active_vlans);
|
clear_bit(vid, adapter->active_vlans);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3444,7 +3442,6 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_hw *hw = &adapter->hw;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
unsigned int vfn = adapter->num_vfs;
|
|
||||||
unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
|
unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
|
@ -3462,7 +3459,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
|
||||||
if (!rar_entries)
|
if (!rar_entries)
|
||||||
break;
|
break;
|
||||||
hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
|
hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
|
||||||
vfn, IXGBE_RAH_AV);
|
VMDQ_P(0), IXGBE_RAH_AV);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3536,12 +3533,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
|
||||||
vmolr |= IXGBE_VMOLR_ROPE;
|
vmolr |= IXGBE_VMOLR_ROPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adapter->num_vfs) {
|
if (adapter->num_vfs)
|
||||||
ixgbe_restore_vf_multicasts(adapter);
|
ixgbe_restore_vf_multicasts(adapter);
|
||||||
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
|
|
||||||
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
|
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
|
||||||
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
|
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
|
||||||
IXGBE_VMOLR_ROPE);
|
IXGBE_VMOLR_ROPE);
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is useful for sniffing bad packets. */
|
/* This is useful for sniffing bad packets. */
|
||||||
|
@ -3808,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
||||||
ixgbe_set_rx_mode(adapter->netdev);
|
ixgbe_set_rx_mode(adapter->netdev);
|
||||||
ixgbe_restore_vlan(adapter);
|
ixgbe_restore_vlan(adapter);
|
||||||
|
|
||||||
#ifdef IXGBE_FCOE
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
|
||||||
ixgbe_configure_fcoe(adapter);
|
|
||||||
|
|
||||||
#endif /* IXGBE_FCOE */
|
|
||||||
|
|
||||||
switch (hw->mac.type) {
|
switch (hw->mac.type) {
|
||||||
case ixgbe_mac_82599EB:
|
case ixgbe_mac_82599EB:
|
||||||
case ixgbe_mac_X540:
|
case ixgbe_mac_X540:
|
||||||
|
@ -3843,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
|
||||||
|
|
||||||
ixgbe_configure_virtualization(adapter);
|
ixgbe_configure_virtualization(adapter);
|
||||||
|
|
||||||
|
#ifdef IXGBE_FCOE
|
||||||
|
/* configure FCoE L2 filters, redirection table, and Rx control */
|
||||||
|
ixgbe_configure_fcoe(adapter);
|
||||||
|
|
||||||
|
#endif /* IXGBE_FCOE */
|
||||||
ixgbe_configure_tx(adapter);
|
ixgbe_configure_tx(adapter);
|
||||||
ixgbe_configure_rx(adapter);
|
ixgbe_configure_rx(adapter);
|
||||||
}
|
}
|
||||||
|
@ -4120,8 +4118,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
||||||
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
|
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
|
||||||
|
|
||||||
/* reprogram the RAR[0] in case user changed it. */
|
/* reprogram the RAR[0] in case user changed it. */
|
||||||
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
|
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
|
||||||
IXGBE_RAH_AV);
|
|
||||||
|
/* update SAN MAC vmdq pool selection */
|
||||||
|
if (hw->mac.san_mac_rar_index)
|
||||||
|
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4436,6 +4437,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef IXGBE_FCOE
|
||||||
|
/* FCoE support exists, always init the FCoE lock */
|
||||||
|
spin_lock_init(&adapter->fcoe.lock);
|
||||||
|
|
||||||
|
#endif
|
||||||
/* n-tuple support exists, always init our spinlock */
|
/* n-tuple support exists, always init our spinlock */
|
||||||
spin_lock_init(&adapter->fdir_perfect_lock);
|
spin_lock_init(&adapter->fdir_perfect_lock);
|
||||||
|
|
||||||
|
@ -4664,7 +4670,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
|
||||||
goto err_setup_rx;
|
goto err_setup_rx;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
#ifdef IXGBE_FCOE
|
||||||
|
err = ixgbe_setup_fcoe_ddp_resources(adapter);
|
||||||
|
if (!err)
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
err_setup_rx:
|
err_setup_rx:
|
||||||
/* rewind the index freeing the rings as we go */
|
/* rewind the index freeing the rings as we go */
|
||||||
while (i--)
|
while (i--)
|
||||||
|
@ -4743,6 +4753,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
#ifdef IXGBE_FCOE
|
||||||
|
ixgbe_free_fcoe_ddp_resources(adapter);
|
||||||
|
|
||||||
|
#endif
|
||||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||||
if (adapter->rx_ring[i]->desc)
|
if (adapter->rx_ring[i]->desc)
|
||||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||||
|
@ -5054,11 +5068,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
||||||
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
|
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
|
||||||
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
|
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
|
||||||
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
|
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
|
||||||
#ifdef IXGBE_FCOE
|
|
||||||
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
|
||||||
unsigned int cpu;
|
|
||||||
u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
|
|
||||||
#endif /* IXGBE_FCOE */
|
|
||||||
|
|
||||||
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
|
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
|
||||||
test_bit(__IXGBE_RESETTING, &adapter->state))
|
test_bit(__IXGBE_RESETTING, &adapter->state))
|
||||||
|
@ -5189,17 +5198,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
||||||
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
|
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
|
||||||
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
|
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
|
||||||
/* Add up per cpu counters for total ddp aloc fail */
|
/* Add up per cpu counters for total ddp aloc fail */
|
||||||
if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
|
if (adapter->fcoe.ddp_pool) {
|
||||||
|
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
|
||||||
|
struct ixgbe_fcoe_ddp_pool *ddp_pool;
|
||||||
|
unsigned int cpu;
|
||||||
|
u64 noddp = 0, noddp_ext_buff = 0;
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
fcoe_noddp_counts_sum +=
|
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
|
||||||
*per_cpu_ptr(fcoe->pcpu_noddp, cpu);
|
noddp += ddp_pool->noddp;
|
||||||
fcoe_noddp_ext_buff_counts_sum +=
|
noddp_ext_buff += ddp_pool->noddp_ext_buff;
|
||||||
*per_cpu_ptr(fcoe->
|
|
||||||
pcpu_noddp_ext_buff, cpu);
|
|
||||||
}
|
}
|
||||||
|
hwstats->fcoe_noddp = noddp;
|
||||||
|
hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
|
||||||
}
|
}
|
||||||
hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
|
|
||||||
hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
|
|
||||||
#endif /* IXGBE_FCOE */
|
#endif /* IXGBE_FCOE */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -6371,7 +6382,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||||
#ifdef IXGBE_FCOE
|
#ifdef IXGBE_FCOE
|
||||||
/* setup tx offload for FCoE */
|
/* setup tx offload for FCoE */
|
||||||
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
|
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
|
||||||
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
|
(tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
|
||||||
tso = ixgbe_fso(tx_ring, first, &hdr_len);
|
tso = ixgbe_fso(tx_ring, first, &hdr_len);
|
||||||
if (tso < 0)
|
if (tso < 0)
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
|
@ -6445,8 +6456,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
|
||||||
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
|
||||||
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
|
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
|
||||||
|
|
||||||
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
|
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
|
||||||
IXGBE_RAH_AV);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -6503,12 +6513,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||||
struct ixgbe_mac_info *mac = &adapter->hw.mac;
|
struct ixgbe_hw *hw = &adapter->hw;
|
||||||
|
|
||||||
if (is_valid_ether_addr(mac->san_addr)) {
|
if (is_valid_ether_addr(hw->mac.san_addr)) {
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
|
err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
|
/* update SAN MAC vmdq pool selection */
|
||||||
|
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -7241,11 +7254,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||||
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
|
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
|
||||||
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
|
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
|
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
|
||||||
netdev->vlan_features |= NETIF_F_FCOE_CRC;
|
|
||||||
netdev->vlan_features |= NETIF_F_FSO;
|
netdev->features |= NETIF_F_FSO |
|
||||||
netdev->vlan_features |= NETIF_F_FCOE_MTU;
|
NETIF_F_FCOE_CRC;
|
||||||
|
|
||||||
|
netdev->vlan_features |= NETIF_F_FSO |
|
||||||
|
NETIF_F_FCOE_CRC |
|
||||||
|
NETIF_F_FCOE_MTU;
|
||||||
}
|
}
|
||||||
#endif /* IXGBE_FCOE */
|
#endif /* IXGBE_FCOE */
|
||||||
if (pci_using_dac) {
|
if (pci_using_dac) {
|
||||||
|
@ -7442,12 +7459,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
||||||
ixgbe_sysfs_exit(adapter);
|
ixgbe_sysfs_exit(adapter);
|
||||||
#endif /* CONFIG_IXGBE_HWMON */
|
#endif /* CONFIG_IXGBE_HWMON */
|
||||||
|
|
||||||
#ifdef IXGBE_FCOE
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
|
||||||
ixgbe_cleanup_fcoe(adapter);
|
|
||||||
|
|
||||||
#endif /* IXGBE_FCOE */
|
|
||||||
|
|
||||||
/* remove the added san mac */
|
/* remove the added san mac */
|
||||||
ixgbe_del_sanmac_netdev(netdev);
|
ixgbe_del_sanmac_netdev(netdev);
|
||||||
|
|
||||||
|
|
|
@ -208,6 +208,17 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||||
u32 vmdctl;
|
u32 vmdctl;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/* set num VFs to 0 to prevent access to vfinfo */
|
||||||
|
adapter->num_vfs = 0;
|
||||||
|
|
||||||
|
/* free VF control structures */
|
||||||
|
kfree(adapter->vfinfo);
|
||||||
|
adapter->vfinfo = NULL;
|
||||||
|
|
||||||
|
/* free macvlan list */
|
||||||
|
kfree(adapter->mv_list);
|
||||||
|
adapter->mv_list = NULL;
|
||||||
|
|
||||||
#ifdef CONFIG_PCI_IOV
|
#ifdef CONFIG_PCI_IOV
|
||||||
/* disable iov and allow time for transactions to clear */
|
/* disable iov and allow time for transactions to clear */
|
||||||
pci_disable_sriov(adapter->pdev);
|
pci_disable_sriov(adapter->pdev);
|
||||||
|
@ -225,6 +236,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
|
||||||
IXGBE_WRITE_FLUSH(hw);
|
IXGBE_WRITE_FLUSH(hw);
|
||||||
|
|
||||||
|
/* Disable VMDq flag so device will be set in VM mode */
|
||||||
|
if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
|
||||||
|
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
|
||||||
|
adapter->ring_feature[RING_F_VMDQ].offset = 0;
|
||||||
|
|
||||||
/* take a breather then clean up driver data */
|
/* take a breather then clean up driver data */
|
||||||
msleep(100);
|
msleep(100);
|
||||||
|
|
||||||
|
@ -233,11 +249,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||||
if (adapter->vfinfo[i].vfdev)
|
if (adapter->vfinfo[i].vfdev)
|
||||||
pci_dev_put(adapter->vfinfo[i].vfdev);
|
pci_dev_put(adapter->vfinfo[i].vfdev);
|
||||||
}
|
}
|
||||||
kfree(adapter->vfinfo);
|
|
||||||
kfree(adapter->mv_list);
|
|
||||||
adapter->vfinfo = NULL;
|
|
||||||
|
|
||||||
adapter->num_vfs = 0;
|
|
||||||
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
|
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1449,6 +1449,7 @@ enum {
|
||||||
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
|
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
|
||||||
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
|
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
|
||||||
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
|
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
|
||||||
|
#define IXGBE_ETQF_POOL_SHIFT 20
|
||||||
|
|
||||||
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
|
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
|
||||||
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
|
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
|
||||||
|
@ -2843,6 +2844,7 @@ struct ixgbe_mac_operations {
|
||||||
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
|
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
|
||||||
s32 (*clear_rar)(struct ixgbe_hw *, u32);
|
s32 (*clear_rar)(struct ixgbe_hw *, u32);
|
||||||
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
|
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
|
||||||
|
s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
|
||||||
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
|
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
|
||||||
s32 (*init_rx_addrs)(struct ixgbe_hw *);
|
s32 (*init_rx_addrs)(struct ixgbe_hw *);
|
||||||
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
|
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
|
||||||
|
@ -2918,6 +2920,7 @@ struct ixgbe_mac_info {
|
||||||
bool orig_link_settings_stored;
|
bool orig_link_settings_stored;
|
||||||
bool autotry_restart;
|
bool autotry_restart;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
|
u8 san_mac_rar_index;
|
||||||
struct ixgbe_thermal_sensor_data thermal_sensor_data;
|
struct ixgbe_thermal_sensor_data thermal_sensor_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -156,6 +156,9 @@ mac_reset_top:
|
||||||
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
|
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
|
||||||
hw->mac.san_addr, 0, IXGBE_RAH_AV);
|
hw->mac.san_addr, 0, IXGBE_RAH_AV);
|
||||||
|
|
||||||
|
/* Save the SAN MAC RAR index */
|
||||||
|
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
|
||||||
|
|
||||||
/* Reserve the last RAR for the SAN MAC address */
|
/* Reserve the last RAR for the SAN MAC address */
|
||||||
hw->mac.num_rar_entries--;
|
hw->mac.num_rar_entries--;
|
||||||
}
|
}
|
||||||
|
@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
|
||||||
.set_rar = &ixgbe_set_rar_generic,
|
.set_rar = &ixgbe_set_rar_generic,
|
||||||
.clear_rar = &ixgbe_clear_rar_generic,
|
.clear_rar = &ixgbe_clear_rar_generic,
|
||||||
.set_vmdq = &ixgbe_set_vmdq_generic,
|
.set_vmdq = &ixgbe_set_vmdq_generic,
|
||||||
|
.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
|
||||||
.clear_vmdq = &ixgbe_clear_vmdq_generic,
|
.clear_vmdq = &ixgbe_clear_vmdq_generic,
|
||||||
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
|
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
|
||||||
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
|
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
|
||||||
|
|
Loading…
Reference in a new issue