Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) mlx4 doesn't check fully for supported valid RSS hash function, fix from Amir Vadai 2) Off by one in ibmveth_change_mtu(), from David Gibson 3) Prevent altera chip from reporting false error interrupts in some circumstances, from Chee Nouk Phoon 4) Get rid of that stupid endless loop trying to allocate a FIN packet in TCP, and in the process kill deadlocks. From Eric Dumazet 5) Fix get_rps_cpus() crash due to wrong invalid-cpu value, also from Eric Dumazet 6) Fix two bugs in async rhashtable resizing, from Thomas Graf 7) Fix topology server listener socket namespace bug in TIPC, from Ying Xue 8) Add some missing HAS_DMA kconfig dependencies, from Geert Uytterhoeven 9) bgmac driver intends to force re-polling but does so by returning the wrong value from it's ->poll() handler. Fix from Rafał Miłecki 10) When the creater of an rhashtable configures a max size for it, don't bark in the logs and drop insertions when that is exceeded. Fix from Johannes Berg 11) Recover from out of order packets in ppp mppe properly, from Sylvain Rochet * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (41 commits) bnx2x: really disable TPA if 'disable_tpa' option is set net:treewide: Fix typo in drivers/net net/mlx4_en: Prevent setting invalid RSS hash function mdio-mux-gpio: use new gpiod_get_array and gpiod_put_array functions netfilter; Add some missing default cases to switch statements in nft_reject. ppp: mppe: discard late packet in stateless mode ppp: mppe: sanity error path rework net/bonding: Make DRV macros private net: rfs: fix crash in get_rps_cpus() altera tse: add support for fixed-links. pxa168: fix double deallocation of managed resources net: fix crash in build_skb() net: eth: altera: Resolve false errors from MSGDMA to TSE ehea: Fix memory hook reference counting crashes net/tg3: Release IRQs on permanent error net: mdio-gpio: support access that may sleep inet: fix possible panic in reqsk_queue_unlink() rhashtable: don't attempt to grow when at max_size bgmac: fix requests for extra polling calls from NAPI tcp: avoid looping in tcp_send_fin() ...
This commit is contained in:
commit
2decb2682f
53 changed files with 528 additions and 318 deletions
|
@ -18,3 +18,12 @@ platform_labels - INTEGER
|
|||
|
||||
Possible values: 0 - 1048575
|
||||
Default: 0
|
||||
|
||||
conf/<interface>/input - BOOL
|
||||
Control whether packets can be input on this interface.
|
||||
|
||||
If disabled, packets will be discarded without further
|
||||
processing.
|
||||
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
|
|
@ -282,7 +282,7 @@ following is true:
|
|||
|
||||
- The current CPU's queue head counter >= the recorded tail counter
|
||||
value in rps_dev_flow[i]
|
||||
- The current CPU is unset (equal to RPS_NO_CPU)
|
||||
- The current CPU is unset (>= nr_cpu_ids)
|
||||
- The current CPU is offline
|
||||
|
||||
After this check, the packet is sent to the (possibly updated) current
|
||||
|
|
|
@ -82,6 +82,8 @@
|
|||
#include <net/bond_3ad.h>
|
||||
#include <net/bond_alb.h>
|
||||
|
||||
#include "bonding_priv.h"
|
||||
|
||||
/*---------------------------- Module parameters ----------------------------*/
|
||||
|
||||
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <net/netns/generic.h>
|
||||
#include <net/bonding.h>
|
||||
|
||||
#include "bonding_priv.h"
|
||||
|
||||
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
|
|
25
drivers/net/bonding/bonding_priv.h
Normal file
25
drivers/net/bonding/bonding_priv.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
|
||||
*
|
||||
* Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
|
||||
* NCM: Network and Communications Management, Inc.
|
||||
*
|
||||
* BUT, I'm the one who modified it for ethernet, so:
|
||||
* (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
|
||||
*
|
||||
* This software may be used and distributed according to the terms
|
||||
* of the GNU Public License, incorporated herein by reference.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _BONDING_PRIV_H
|
||||
#define _BONDING_PRIV_H
|
||||
|
||||
#define DRV_VERSION "3.7.1"
|
||||
#define DRV_RELDATE "April 27, 2011"
|
||||
#define DRV_NAME "bonding"
|
||||
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
|
||||
|
||||
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
|
||||
|
||||
#endif
|
|
@ -112,7 +112,7 @@ config PCH_CAN
|
|||
|
||||
config CAN_GRCAN
|
||||
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
|
||||
depends on OF
|
||||
depends on OF && HAS_DMA
|
||||
---help---
|
||||
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
|
||||
Note that the driver supports little endian, even though little
|
||||
|
|
|
@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
|
|||
|
||||
if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
|
||||
MSG_FLAG_NERR)) {
|
||||
netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
|
||||
netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
|
||||
msg->u.rx_can_header.flag);
|
||||
|
||||
stats->rx_errors++;
|
||||
|
|
|
@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
|
|||
char *s;
|
||||
|
||||
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
|
||||
printk(KERN_ERR "%s: unable to read podule description string\n",
|
||||
printk(KERN_ERR "%s: unable to read module description string\n",
|
||||
dev_name(&ec->dev));
|
||||
goto no_addr;
|
||||
}
|
||||
|
|
|
@ -58,15 +58,12 @@ struct msgdma_extended_desc {
|
|||
/* Tx buffer control flags
|
||||
*/
|
||||
#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
|
||||
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
|
||||
MSGDMA_DESC_CTL_GO)
|
||||
|
||||
#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
|
||||
MSGDMA_DESC_CTL_GO)
|
||||
#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO)
|
||||
|
||||
#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
|
||||
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
|
||||
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
|
||||
MSGDMA_DESC_CTL_GO)
|
||||
|
||||
#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
|
||||
|
|
|
@ -777,6 +777,8 @@ static int init_phy(struct net_device *dev)
|
|||
struct altera_tse_private *priv = netdev_priv(dev);
|
||||
struct phy_device *phydev;
|
||||
struct device_node *phynode;
|
||||
bool fixed_link = false;
|
||||
int rc = 0;
|
||||
|
||||
/* Avoid init phy in case of no phy present */
|
||||
if (!priv->phy_iface)
|
||||
|
@ -789,13 +791,32 @@ static int init_phy(struct net_device *dev)
|
|||
phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
|
||||
|
||||
if (!phynode) {
|
||||
netdev_dbg(dev, "no phy-handle found\n");
|
||||
if (!priv->mdio) {
|
||||
netdev_err(dev,
|
||||
"No phy-handle nor local mdio specified\n");
|
||||
return -ENODEV;
|
||||
/* check if a fixed-link is defined in device-tree */
|
||||
if (of_phy_is_fixed_link(priv->device->of_node)) {
|
||||
rc = of_phy_register_fixed_link(priv->device->of_node);
|
||||
if (rc < 0) {
|
||||
netdev_err(dev, "cannot register fixed PHY\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* In the case of a fixed PHY, the DT node associated
|
||||
* to the PHY is the Ethernet MAC DT node.
|
||||
*/
|
||||
phynode = of_node_get(priv->device->of_node);
|
||||
fixed_link = true;
|
||||
|
||||
netdev_dbg(dev, "fixed-link detected\n");
|
||||
phydev = of_phy_connect(dev, phynode,
|
||||
&altera_tse_adjust_link,
|
||||
0, priv->phy_iface);
|
||||
} else {
|
||||
netdev_dbg(dev, "no phy-handle found\n");
|
||||
if (!priv->mdio) {
|
||||
netdev_err(dev, "No phy-handle nor local mdio specified\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
phydev = connect_local_phy(dev);
|
||||
}
|
||||
phydev = connect_local_phy(dev);
|
||||
} else {
|
||||
netdev_dbg(dev, "phy-handle found\n");
|
||||
phydev = of_phy_connect(dev, phynode,
|
||||
|
@ -819,10 +840,10 @@ static int init_phy(struct net_device *dev)
|
|||
/* Broken HW is sometimes missing the pull-up resistor on the
|
||||
* MDIO line, which results in reads to non-existent devices returning
|
||||
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
|
||||
* device as well.
|
||||
* device as well. If a fixed-link is used the phy_id is always 0.
|
||||
* Note: phydev->phy_id is the result of reading the UID PHY registers.
|
||||
*/
|
||||
if (phydev->phy_id == 0) {
|
||||
if ((phydev->phy_id == 0) && !fixed_link) {
|
||||
netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
|
||||
phy_disconnect(phydev);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -179,7 +179,7 @@ config SUNLANCE
|
|||
|
||||
config AMD_XGBE
|
||||
tristate "AMD 10GbE Ethernet driver"
|
||||
depends on (OF_NET || ACPI) && HAS_IOMEM
|
||||
depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
|
||||
select PHYLIB
|
||||
select AMD_XGBE_PHY
|
||||
select BITREVERSE
|
||||
|
|
|
@ -25,8 +25,7 @@ config ARC_EMAC_CORE
|
|||
config ARC_EMAC
|
||||
tristate "ARC EMAC support"
|
||||
select ARC_EMAC_CORE
|
||||
depends on OF_IRQ
|
||||
depends on OF_NET
|
||||
depends on OF_IRQ && OF_NET && HAS_DMA
|
||||
---help---
|
||||
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
|
||||
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
|
||||
|
@ -35,7 +34,7 @@ config ARC_EMAC
|
|||
config EMAC_ROCKCHIP
|
||||
tristate "Rockchip EMAC support"
|
||||
select ARC_EMAC_CORE
|
||||
depends on OF_IRQ && OF_NET && REGULATOR
|
||||
depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
|
||||
---help---
|
||||
Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
|
||||
This selects Rockchip SoC glue layer support for the
|
||||
|
|
|
@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
|
|||
|
||||
/* Poll again if more events arrived in the meantime */
|
||||
if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
|
||||
return handled;
|
||||
return weight;
|
||||
|
||||
if (handled < weight) {
|
||||
napi_complete(napi);
|
||||
|
|
|
@ -2485,8 +2485,10 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
|
|||
else if (bp->flags & GRO_ENABLE_FLAG)
|
||||
fp->mode = TPA_MODE_GRO;
|
||||
|
||||
/* We don't want TPA on an FCoE L2 ring */
|
||||
if (IS_FCOE_FP(fp))
|
||||
/* We don't want TPA if it's disabled in bp
|
||||
* or if this is an FCoE L2 ring.
|
||||
*/
|
||||
if (bp->disable_tpa || IS_FCOE_FP(fp))
|
||||
fp->disable_tpa = 1;
|
||||
}
|
||||
|
||||
|
@ -4809,6 +4811,23 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
|
|||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
||||
if (pci_num_vf(bp->pdev)) {
|
||||
netdev_features_t changed = dev->features ^ features;
|
||||
|
||||
/* Revert the requested changes in features if they
|
||||
* would require internal reload of PF in bnx2x_set_features().
|
||||
*/
|
||||
if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
|
||||
features &= ~NETIF_F_RXCSUM;
|
||||
features |= dev->features & NETIF_F_RXCSUM;
|
||||
}
|
||||
|
||||
if (changed & NETIF_F_LOOPBACK) {
|
||||
features &= ~NETIF_F_LOOPBACK;
|
||||
features |= dev->features & NETIF_F_LOOPBACK;
|
||||
}
|
||||
}
|
||||
|
||||
/* TPA requires Rx CSUM offloading */
|
||||
if (!(features & NETIF_F_RXCSUM)) {
|
||||
features &= ~NETIF_F_LRO;
|
||||
|
@ -4839,15 +4858,18 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
|||
else
|
||||
flags &= ~GRO_ENABLE_FLAG;
|
||||
|
||||
if (features & NETIF_F_LOOPBACK) {
|
||||
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
|
||||
bp->link_params.loopback_mode = LOOPBACK_BMAC;
|
||||
bnx2x_reload = true;
|
||||
}
|
||||
} else {
|
||||
if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
|
||||
bp->link_params.loopback_mode = LOOPBACK_NONE;
|
||||
bnx2x_reload = true;
|
||||
/* VFs or non SRIOV PFs should be able to change loopback feature */
|
||||
if (!pci_num_vf(bp->pdev)) {
|
||||
if (features & NETIF_F_LOOPBACK) {
|
||||
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
|
||||
bp->link_params.loopback_mode = LOOPBACK_BMAC;
|
||||
bnx2x_reload = true;
|
||||
}
|
||||
} else {
|
||||
if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
|
||||
bp->link_params.loopback_mode = LOOPBACK_NONE;
|
||||
bnx2x_reload = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4931,6 +4953,11 @@ int bnx2x_resume(struct pci_dev *pdev)
|
|||
}
|
||||
bp = netdev_priv(dev);
|
||||
|
||||
if (pci_num_vf(bp->pdev)) {
|
||||
DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
|
||||
BNX2X_ERR("Handling parity error recovery. Try again later\n");
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev,
|
|||
"set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
|
||||
ering->rx_pending, ering->tx_pending);
|
||||
|
||||
if (pci_num_vf(bp->pdev)) {
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"VFs are enabled, can not change ring parameters\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
|
||||
DP(BNX2X_MSG_ETHTOOL,
|
||||
"Handling parity error recovery. Try again later\n");
|
||||
|
@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev,
|
|||
u8 is_serdes, link_up;
|
||||
int rc, cnt = 0;
|
||||
|
||||
if (pci_num_vf(bp->pdev)) {
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"VFs are enabled, can not perform self test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
|
||||
netdev_err(bp->dev,
|
||||
"Handling parity error recovery. Try again later\n");
|
||||
|
@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev,
|
|||
channels->rx_count, channels->tx_count, channels->other_count,
|
||||
channels->combined_count);
|
||||
|
||||
if (pci_num_vf(bp->pdev)) {
|
||||
DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* We don't support separate rx / tx channels.
|
||||
* We don't allow setting 'other' channels.
|
||||
*/
|
||||
|
|
|
@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
|
|||
|
||||
rtnl_lock();
|
||||
|
||||
tp->pcierr_recovery = true;
|
||||
/* We needn't recover from permanent error */
|
||||
if (state == pci_channel_io_frozen)
|
||||
tp->pcierr_recovery = true;
|
||||
|
||||
/* We probably don't have netdev yet */
|
||||
if (!netdev || !netif_running(netdev))
|
||||
|
|
|
@ -1473,9 +1473,9 @@ static void macb_init_rings(struct macb *bp)
|
|||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
bp->queues[0].tx_ring[i].addr = 0;
|
||||
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
||||
bp->queues[0].tx_head = 0;
|
||||
bp->queues[0].tx_tail = 0;
|
||||
}
|
||||
bp->queues[0].tx_head = 0;
|
||||
bp->queues[0].tx_tail = 0;
|
||||
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
|
||||
|
||||
bp->rx_tail = 0;
|
||||
|
|
|
@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_inc_and_test(&ehea_memory_hooks_registered))
|
||||
if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
|
||||
return 0;
|
||||
|
||||
ret = ehea_create_busmap();
|
||||
|
@ -3381,12 +3381,14 @@ out3:
|
|||
out2:
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
out:
|
||||
atomic_dec(&ehea_memory_hooks_registered);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ehea_unregister_memory_hooks(void)
|
||||
{
|
||||
if (atomic_read(&ehea_memory_hooks_registered))
|
||||
/* Only remove the hooks if we've registered them */
|
||||
if (atomic_read(&ehea_memory_hooks_registered) == 0)
|
||||
return;
|
||||
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
|
|
|
@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
|
|||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
|
||||
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
|
||||
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
|
||||
break;
|
||||
|
||||
if (i == IBMVETH_NUM_BUFF_POOLS)
|
||||
|
@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
|
|||
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
|
||||
adapter->rx_buff_pool[i].active = 1;
|
||||
|
||||
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
|
||||
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
|
||||
dev->mtu = new_mtu;
|
||||
vio_cmo_set_dev_desired(viodev,
|
||||
ibmveth_get_desired_dma
|
||||
|
|
|
@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
|
|||
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
|
||||
if (!np) {
|
||||
dev_err(&pdev->dev, "missing phy-handle\n");
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err_netdev;
|
||||
}
|
||||
of_property_read_u32(np, "reg", &pep->phy_addr);
|
||||
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
|
||||
|
@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
|
|||
pep->smi_bus = mdiobus_alloc();
|
||||
if (pep->smi_bus == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err_base;
|
||||
goto err_netdev;
|
||||
}
|
||||
pep->smi_bus->priv = pep;
|
||||
pep->smi_bus->name = "pxa168_eth smi";
|
||||
|
@ -1551,13 +1552,10 @@ err_mdiobus:
|
|||
mdiobus_unregister(pep->smi_bus);
|
||||
err_free_mdio:
|
||||
mdiobus_free(pep->smi_bus);
|
||||
err_base:
|
||||
iounmap(pep->base);
|
||||
err_netdev:
|
||||
free_netdev(dev);
|
||||
err_clk:
|
||||
clk_disable(clk);
|
||||
clk_put(clk);
|
||||
clk_disable_unprepare(clk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
|
|||
if (pep->phy)
|
||||
phy_disconnect(pep->phy);
|
||||
if (pep->clk) {
|
||||
clk_disable(pep->clk);
|
||||
clk_put(pep->clk);
|
||||
pep->clk = NULL;
|
||||
clk_disable_unprepare(pep->clk);
|
||||
}
|
||||
|
||||
iounmap(pep->base);
|
||||
pep->base = NULL;
|
||||
mdiobus_unregister(pep->smi_bus);
|
||||
mdiobus_free(pep->smi_bus);
|
||||
unregister_netdev(dev);
|
||||
|
|
|
@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
|
|||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* check if requested function is supported by the device */
|
||||
if ((hfunc == ETH_RSS_HASH_TOP &&
|
||||
!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
|
||||
(hfunc == ETH_RSS_HASH_XOR &&
|
||||
!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
|
||||
return -EINVAL;
|
||||
if (hfunc == ETH_RSS_HASH_TOP) {
|
||||
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
|
||||
return -EINVAL;
|
||||
if (!(dev->features & NETIF_F_RXHASH))
|
||||
en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
|
||||
return 0;
|
||||
} else if (hfunc == ETH_RSS_HASH_XOR) {
|
||||
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
|
||||
return -EINVAL;
|
||||
if (dev->features & NETIF_F_RXHASH)
|
||||
en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
priv->rss_hash_fn = hfunc;
|
||||
if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
|
||||
en_warn(priv,
|
||||
"Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
|
||||
if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
|
||||
en_warn(priv,
|
||||
"Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
|
||||
|
@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
|
|||
priv->prof->rss_rings = rss_rings;
|
||||
if (key)
|
||||
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
|
||||
priv->rss_hash_fn = hfunc;
|
||||
|
||||
if (port_up) {
|
||||
err = mlx4_en_start_port(dev);
|
||||
|
|
|
@ -69,11 +69,7 @@
|
|||
#include <net/ip.h>
|
||||
#include <net/tcp.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#ifdef CONFIG_MTRR
|
||||
#include <asm/mtrr.h>
|
||||
#endif
|
||||
#include <net/busy_poll.h>
|
||||
|
||||
#include "myri10ge_mcp.h"
|
||||
|
@ -242,8 +238,7 @@ struct myri10ge_priv {
|
|||
unsigned int rdma_tags_available;
|
||||
int intr_coal_delay;
|
||||
__be32 __iomem *intr_coal_delay_ptr;
|
||||
int mtrr;
|
||||
int wc_enabled;
|
||||
int wc_cookie;
|
||||
int down_cnt;
|
||||
wait_queue_head_t down_wq;
|
||||
struct work_struct watchdog_work;
|
||||
|
@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
|
|||
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
|
||||
"tx_heartbeat_errors", "tx_window_errors",
|
||||
/* device-specific stats */
|
||||
"tx_boundary", "WC", "irq", "MSI", "MSIX",
|
||||
"tx_boundary", "irq", "MSI", "MSIX",
|
||||
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
|
||||
"serial_number", "watchdog_resets",
|
||||
#ifdef CONFIG_MYRI10GE_DCA
|
||||
|
@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
|
|||
data[i] = ((u64 *)&link_stats)[i];
|
||||
|
||||
data[i++] = (unsigned int)mgp->tx_boundary;
|
||||
data[i++] = (unsigned int)mgp->wc_enabled;
|
||||
data[i++] = (unsigned int)mgp->pdev->irq;
|
||||
data[i++] = (unsigned int)mgp->msi_enabled;
|
||||
data[i++] = (unsigned int)mgp->msix_enabled;
|
||||
|
@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
mgp->board_span = pci_resource_len(pdev, 0);
|
||||
mgp->iomem_base = pci_resource_start(pdev, 0);
|
||||
mgp->mtrr = -1;
|
||||
mgp->wc_enabled = 0;
|
||||
#ifdef CONFIG_MTRR
|
||||
mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
|
||||
MTRR_TYPE_WRCOMB, 1);
|
||||
if (mgp->mtrr >= 0)
|
||||
mgp->wc_enabled = 1;
|
||||
#endif
|
||||
mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
|
||||
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
|
||||
if (mgp->sram == NULL) {
|
||||
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
|
||||
|
@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto abort_with_state;
|
||||
}
|
||||
if (mgp->msix_enabled)
|
||||
dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
|
||||
dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
|
||||
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
|
||||
(mgp->wc_enabled ? "Enabled" : "Disabled"));
|
||||
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
|
||||
else
|
||||
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
|
||||
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
|
||||
mgp->msi_enabled ? "MSI" : "xPIC",
|
||||
pdev->irq, mgp->tx_boundary, mgp->fw_name,
|
||||
(mgp->wc_enabled ? "Enabled" : "Disabled"));
|
||||
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
|
||||
|
||||
board_number++;
|
||||
return 0;
|
||||
|
@ -4175,10 +4162,7 @@ abort_with_ioremap:
|
|||
iounmap(mgp->sram);
|
||||
|
||||
abort_with_mtrr:
|
||||
#ifdef CONFIG_MTRR
|
||||
if (mgp->mtrr >= 0)
|
||||
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
|
||||
#endif
|
||||
arch_phys_wc_del(mgp->wc_cookie);
|
||||
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
|
||||
mgp->cmd, mgp->cmd_bus);
|
||||
|
||||
|
@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
|
|||
pci_restore_state(pdev);
|
||||
|
||||
iounmap(mgp->sram);
|
||||
|
||||
#ifdef CONFIG_MTRR
|
||||
if (mgp->mtrr >= 0)
|
||||
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
|
||||
#endif
|
||||
arch_phys_wc_del(mgp->wc_cookie);
|
||||
myri10ge_free_slices(mgp);
|
||||
kfree(mgp->msix_vectors);
|
||||
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
|
||||
|
|
|
@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
|
|||
* assume the pin serves as pull-up. If direction is
|
||||
* output, the default value is high.
|
||||
*/
|
||||
gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
|
||||
gpio_set_value_cansleep(bitbang->mdo,
|
||||
1 ^ bitbang->mdo_active_low);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
|
|||
struct mdio_gpio_info *bitbang =
|
||||
container_of(ctrl, struct mdio_gpio_info, ctrl);
|
||||
|
||||
return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
|
||||
return gpio_get_value_cansleep(bitbang->mdio) ^
|
||||
bitbang->mdio_active_low;
|
||||
}
|
||||
|
||||
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
|
||||
|
@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
|
|||
container_of(ctrl, struct mdio_gpio_info, ctrl);
|
||||
|
||||
if (bitbang->mdo)
|
||||
gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
|
||||
gpio_set_value_cansleep(bitbang->mdo,
|
||||
what ^ bitbang->mdo_active_low);
|
||||
else
|
||||
gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
|
||||
gpio_set_value_cansleep(bitbang->mdio,
|
||||
what ^ bitbang->mdio_active_low);
|
||||
}
|
||||
|
||||
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
|
||||
|
@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
|
|||
struct mdio_gpio_info *bitbang =
|
||||
container_of(ctrl, struct mdio_gpio_info, ctrl);
|
||||
|
||||
gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
|
||||
gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
|
||||
}
|
||||
|
||||
static struct mdiobb_ops mdio_gpio_ops = {
|
||||
|
|
|
@ -12,33 +12,30 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/mdio-mux.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
#define DRV_VERSION "1.1"
|
||||
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
|
||||
|
||||
#define MDIO_MUX_GPIO_MAX_BITS 8
|
||||
|
||||
struct mdio_mux_gpio_state {
|
||||
struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
|
||||
unsigned int num_gpios;
|
||||
struct gpio_descs *gpios;
|
||||
void *mux_handle;
|
||||
};
|
||||
|
||||
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
|
||||
void *data)
|
||||
{
|
||||
int values[MDIO_MUX_GPIO_MAX_BITS];
|
||||
unsigned int n;
|
||||
struct mdio_mux_gpio_state *s = data;
|
||||
int values[s->gpios->ndescs];
|
||||
unsigned int n;
|
||||
|
||||
if (current_child == desired_child)
|
||||
return 0;
|
||||
|
||||
for (n = 0; n < s->num_gpios; n++) {
|
||||
for (n = 0; n < s->gpios->ndescs; n++)
|
||||
values[n] = (desired_child >> n) & 1;
|
||||
}
|
||||
gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
|
||||
|
||||
gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
|
|||
static int mdio_mux_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mdio_mux_gpio_state *s;
|
||||
int num_gpios;
|
||||
unsigned int n;
|
||||
int r;
|
||||
|
||||
if (!pdev->dev.of_node)
|
||||
return -ENODEV;
|
||||
|
||||
num_gpios = of_gpio_count(pdev->dev.of_node);
|
||||
if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
|
||||
return -ENODEV;
|
||||
|
||||
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
s->num_gpios = num_gpios;
|
||||
|
||||
for (n = 0; n < num_gpios; ) {
|
||||
struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(gpio)) {
|
||||
r = PTR_ERR(gpio);
|
||||
goto err;
|
||||
}
|
||||
s->gpio[n] = gpio;
|
||||
n++;
|
||||
}
|
||||
s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
|
||||
if (IS_ERR(s->gpios))
|
||||
return PTR_ERR(s->gpios);
|
||||
|
||||
r = mdio_mux_init(&pdev->dev,
|
||||
mdio_mux_gpio_switch_fn, &s->mux_handle, s);
|
||||
|
||||
if (r == 0) {
|
||||
pdev->dev.platform_data = s;
|
||||
return 0;
|
||||
if (r != 0) {
|
||||
gpiod_put_array(s->gpios);
|
||||
return r;
|
||||
}
|
||||
err:
|
||||
while (n) {
|
||||
n--;
|
||||
gpiod_put(s->gpio[n]);
|
||||
}
|
||||
return r;
|
||||
|
||||
pdev->dev.platform_data = s;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mdio_mux_gpio_remove(struct platform_device *pdev)
|
||||
{
|
||||
unsigned int n;
|
||||
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
|
||||
mdio_mux_uninit(s->mux_handle);
|
||||
for (n = 0; n < s->num_gpios; n++)
|
||||
gpiod_put(s->gpio[n]);
|
||||
gpiod_put_array(s->gpios);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
|
|||
struct blkcipher_desc desc = { .tfm = state->arc4 };
|
||||
unsigned ccount;
|
||||
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
|
||||
int sanity = 0;
|
||||
struct scatterlist sg_in[1], sg_out[1];
|
||||
|
||||
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
|
||||
|
@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
|
|||
"mppe_decompress[%d]: ENCRYPTED bit not set!\n",
|
||||
state->unit);
|
||||
state->sanity_errors += 100;
|
||||
sanity = 1;
|
||||
goto sanity_error;
|
||||
}
|
||||
if (!state->stateful && !flushed) {
|
||||
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
|
||||
"stateless mode!\n", state->unit);
|
||||
state->sanity_errors += 100;
|
||||
sanity = 1;
|
||||
goto sanity_error;
|
||||
}
|
||||
if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
|
||||
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
|
||||
"flag packet!\n", state->unit);
|
||||
state->sanity_errors += 100;
|
||||
sanity = 1;
|
||||
}
|
||||
|
||||
if (sanity) {
|
||||
if (state->sanity_errors < SANITY_MAX)
|
||||
return DECOMP_ERROR;
|
||||
else
|
||||
/*
|
||||
* Take LCP down if the peer is sending too many bogons.
|
||||
* We don't want to do this for a single or just a few
|
||||
* instances since it could just be due to packet corruption.
|
||||
*/
|
||||
return DECOMP_FATALERROR;
|
||||
goto sanity_error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
|
|||
*/
|
||||
|
||||
if (!state->stateful) {
|
||||
/* Discard late packet */
|
||||
if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
|
||||
> MPPE_CCOUNT_SPACE / 2) {
|
||||
state->sanity_errors++;
|
||||
goto sanity_error;
|
||||
}
|
||||
|
||||
/* RFC 3078, sec 8.1. Rekey for every packet. */
|
||||
while (state->ccount != ccount) {
|
||||
mppe_rekey(state, 0);
|
||||
|
@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
|
|||
state->sanity_errors >>= 1;
|
||||
|
||||
return osize;
|
||||
|
||||
sanity_error:
|
||||
if (state->sanity_errors < SANITY_MAX)
|
||||
return DECOMP_ERROR;
|
||||
else
|
||||
/* Take LCP down if the peer is sending too many bogons.
|
||||
* We don't want to do this for a single or just a few
|
||||
* instances since it could just be due to packet corruption.
|
||||
*/
|
||||
return DECOMP_FATALERROR;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
|
|||
/* Only change unicasts */
|
||||
if (!(is_multicast_ether_addr(f->eth_addr) ||
|
||||
is_zero_ether_addr(f->eth_addr))) {
|
||||
int rc = vxlan_fdb_replace(f, ip, port, vni,
|
||||
notify |= vxlan_fdb_replace(f, ip, port, vni,
|
||||
ifindex);
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
notify |= rc;
|
||||
} else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ struct phy_device;
|
|||
struct wireless_dev;
|
||||
/* 802.15.4 specific */
|
||||
struct wpan_dev;
|
||||
struct mpls_dev;
|
||||
|
||||
void netdev_set_default_ethtool_ops(struct net_device *dev,
|
||||
const struct ethtool_ops *ops);
|
||||
|
@ -1627,6 +1628,9 @@ struct net_device {
|
|||
void *ax25_ptr;
|
||||
struct wireless_dev *ieee80211_ptr;
|
||||
struct wpan_dev *ieee802154_ptr;
|
||||
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
|
||||
struct mpls_dev __rcu *mpls_ptr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Cache lines mostly used on receive path (including eth_type_trans())
|
||||
|
@ -2021,10 +2025,10 @@ struct pcpu_sw_netstats {
|
|||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
|
||||
if (pcpu_stats) { \
|
||||
int i; \
|
||||
for_each_possible_cpu(i) { \
|
||||
int __cpu; \
|
||||
for_each_possible_cpu(__cpu) { \
|
||||
typeof(type) *stat; \
|
||||
stat = per_cpu_ptr(pcpu_stats, i); \
|
||||
stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
||||
u64_stats_init(&stat->syncp); \
|
||||
} \
|
||||
} \
|
||||
|
|
|
@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
|
|||
static inline bool rht_grow_above_100(const struct rhashtable *ht,
|
||||
const struct bucket_table *tbl)
|
||||
{
|
||||
return atomic_read(&ht->nelems) > tbl->size;
|
||||
return atomic_read(&ht->nelems) > tbl->size &&
|
||||
(!ht->p.max_size || tbl->size < ht->p.max_size);
|
||||
}
|
||||
|
||||
/* The bucket lock is selected based on the hash and protects mutations
|
||||
|
|
|
@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
|
|||
|
||||
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
|
||||
int node);
|
||||
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
|
||||
struct sk_buff *build_skb(void *data, unsigned int frag_size);
|
||||
static inline struct sk_buff *alloc_skb(unsigned int size,
|
||||
gfp_t priority)
|
||||
|
|
|
@ -30,13 +30,6 @@
|
|||
#include <net/bond_alb.h>
|
||||
#include <net/bond_options.h>
|
||||
|
||||
#define DRV_VERSION "3.7.1"
|
||||
#define DRV_RELDATE "April 27, 2011"
|
||||
#define DRV_NAME "bonding"
|
||||
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
|
||||
|
||||
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
|
||||
|
||||
#define BOND_MAX_ARP_TARGETS 16
|
||||
|
||||
#define BOND_DEFAULT_MIIMON 100
|
||||
|
|
|
@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
|
|||
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
||||
unsigned long timeout);
|
||||
|
||||
static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
|
||||
struct request_sock *req)
|
||||
{
|
||||
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
|
||||
}
|
||||
|
||||
static inline void inet_csk_reqsk_queue_added(struct sock *sk,
|
||||
const unsigned long timeout)
|
||||
{
|
||||
|
@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
|
|||
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
|
||||
}
|
||||
|
||||
static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
|
||||
struct request_sock *req)
|
||||
{
|
||||
reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
|
||||
}
|
||||
|
||||
static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
|
||||
struct request_sock *req)
|
||||
{
|
||||
inet_csk_reqsk_queue_unlink(sk, req);
|
||||
inet_csk_reqsk_queue_removed(sk, req);
|
||||
reqsk_put(req);
|
||||
}
|
||||
void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
|
||||
|
||||
void inet_csk_destroy_sock(struct sock *sk);
|
||||
void inet_csk_prepare_forced_close(struct sock *sk);
|
||||
|
|
|
@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
|
|||
return queue->rskq_accept_head == NULL;
|
||||
}
|
||||
|
||||
static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
|
||||
struct request_sock *req)
|
||||
{
|
||||
struct listen_sock *lopt = queue->listen_opt;
|
||||
struct request_sock **prev;
|
||||
|
||||
spin_lock(&queue->syn_wait_lock);
|
||||
|
||||
prev = &lopt->syn_table[req->rsk_hash];
|
||||
while (*prev != req)
|
||||
prev = &(*prev)->dl_next;
|
||||
*prev = req->dl_next;
|
||||
|
||||
spin_unlock(&queue->syn_wait_lock);
|
||||
if (del_timer(&req->rsk_timer))
|
||||
reqsk_put(req);
|
||||
}
|
||||
|
||||
static inline void reqsk_queue_add(struct request_sock_queue *queue,
|
||||
struct request_sock *req,
|
||||
struct sock *parent,
|
||||
|
|
|
@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
|
|||
|
||||
if (rht_grow_above_75(ht, tbl))
|
||||
size *= 2;
|
||||
/* More than two rehashes (not resizes) detected. */
|
||||
else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
|
||||
/* Do not schedule more than one rehash */
|
||||
else if (old_tbl != tbl)
|
||||
return -EBUSY;
|
||||
|
||||
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
|
||||
if (new_tbl == NULL)
|
||||
if (new_tbl == NULL) {
|
||||
/* Schedule async resize/rehash to try allocation
|
||||
* non-atomic context.
|
||||
*/
|
||||
schedule_work(&ht->run_work);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
|
||||
if (err) {
|
||||
|
|
|
@ -3079,7 +3079,7 @@ static struct rps_dev_flow *
|
|||
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
||||
struct rps_dev_flow *rflow, u16 next_cpu)
|
||||
{
|
||||
if (next_cpu != RPS_NO_CPU) {
|
||||
if (next_cpu < nr_cpu_ids) {
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
struct netdev_rx_queue *rxqueue;
|
||||
struct rps_dev_flow_table *flow_table;
|
||||
|
@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|||
* If the desired CPU (where last recvmsg was done) is
|
||||
* different from current CPU (one in the rx-queue flow
|
||||
* table entry), switch if one of the following holds:
|
||||
* - Current CPU is unset (equal to RPS_NO_CPU).
|
||||
* - Current CPU is unset (>= nr_cpu_ids).
|
||||
* - Current CPU is offline.
|
||||
* - The current CPU's queue tail has advanced beyond the
|
||||
* last packet that was enqueued using this table entry.
|
||||
|
@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|||
* have been dequeued, thus preserving in order delivery.
|
||||
*/
|
||||
if (unlikely(tcpu != next_cpu) &&
|
||||
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
|
||||
(tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
|
||||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
|
||||
rflow->last_qtail)) >= 0)) {
|
||||
tcpu = next_cpu;
|
||||
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
|
||||
}
|
||||
|
||||
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
|
||||
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
|
||||
*rflowp = rflow;
|
||||
cpu = tcpu;
|
||||
goto done;
|
||||
|
@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
|
|||
struct rps_dev_flow_table *flow_table;
|
||||
struct rps_dev_flow *rflow;
|
||||
bool expire = true;
|
||||
int cpu;
|
||||
unsigned int cpu;
|
||||
|
||||
rcu_read_lock();
|
||||
flow_table = rcu_dereference(rxqueue->rps_flow_table);
|
||||
if (flow_table && flow_id <= flow_table->mask) {
|
||||
rflow = &flow_table->flows[flow_id];
|
||||
cpu = ACCESS_ONCE(rflow->cpu);
|
||||
if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
|
||||
if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
|
||||
((int)(per_cpu(softnet_data, cpu).input_queue_head -
|
||||
rflow->last_qtail) <
|
||||
(int)(10 * flow_table->mask)))
|
||||
|
|
|
@ -280,13 +280,14 @@ nodata:
|
|||
EXPORT_SYMBOL(__alloc_skb);
|
||||
|
||||
/**
|
||||
* build_skb - build a network buffer
|
||||
* __build_skb - build a network buffer
|
||||
* @data: data buffer provided by caller
|
||||
* @frag_size: size of fragment, or 0 if head was kmalloced
|
||||
* @frag_size: size of data, or 0 if head was kmalloced
|
||||
*
|
||||
* Allocate a new &sk_buff. Caller provides space holding head and
|
||||
* skb_shared_info. @data must have been allocated by kmalloc() only if
|
||||
* @frag_size is 0, otherwise data should come from the page allocator.
|
||||
* @frag_size is 0, otherwise data should come from the page allocator
|
||||
* or vmalloc()
|
||||
* The return is the new skb buffer.
|
||||
* On a failure the return is %NULL, and @data is not freed.
|
||||
* Notes :
|
||||
|
@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
|
|||
* before giving packet to stack.
|
||||
* RX rings only contains data buffers, not full skbs.
|
||||
*/
|
||||
struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
||||
struct sk_buff *__build_skb(void *data, unsigned int frag_size)
|
||||
{
|
||||
struct skb_shared_info *shinfo;
|
||||
struct sk_buff *skb;
|
||||
|
@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|||
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
skb->truesize = SKB_TRUESIZE(size);
|
||||
skb->head_frag = frag_size != 0;
|
||||
atomic_set(&skb->users, 1);
|
||||
skb->head = data;
|
||||
skb->data = data;
|
||||
|
@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* build_skb() is wrapper over __build_skb(), that specifically
|
||||
* takes care of skb->head and skb->pfmemalloc
|
||||
* This means that if @frag_size is not zero, then @data must be backed
|
||||
* by a page fragment, not kmalloc() or vmalloc()
|
||||
*/
|
||||
struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
||||
{
|
||||
struct sk_buff *skb = __build_skb(data, frag_size);
|
||||
|
||||
if (skb && frag_size) {
|
||||
skb->head_frag = 1;
|
||||
if (virt_to_head_page(data)->pfmemalloc)
|
||||
skb->pfmemalloc = 1;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(build_skb);
|
||||
|
||||
struct netdev_alloc_cache {
|
||||
|
@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
|
|||
gfp_t gfp = gfp_mask;
|
||||
|
||||
if (order) {
|
||||
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
||||
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
|
||||
__GFP_NOMEMALLOC;
|
||||
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
|
||||
nc->frag.size = PAGE_SIZE << (page ? order : 0);
|
||||
}
|
||||
|
|
|
@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|||
iph->saddr, iph->daddr);
|
||||
if (req) {
|
||||
nsk = dccp_check_req(sk, skb, req);
|
||||
reqsk_put(req);
|
||||
if (!nsk)
|
||||
reqsk_put(req);
|
||||
return nsk;
|
||||
}
|
||||
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
|
||||
|
|
|
@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
|
|||
&iph->daddr, inet6_iif(skb));
|
||||
if (req) {
|
||||
nsk = dccp_check_req(sk, skb, req);
|
||||
reqsk_put(req);
|
||||
if (!nsk)
|
||||
reqsk_put(req);
|
||||
return nsk;
|
||||
}
|
||||
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
|
||||
|
|
|
@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
if (child == NULL)
|
||||
goto listen_overflow;
|
||||
|
||||
inet_csk_reqsk_queue_unlink(sk, req);
|
||||
inet_csk_reqsk_queue_removed(sk, req);
|
||||
inet_csk_reqsk_queue_drop(sk, req);
|
||||
inet_csk_reqsk_queue_add(sk, req, child);
|
||||
out:
|
||||
return child;
|
||||
|
|
|
@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
|
|||
}
|
||||
EXPORT_SYMBOL(inet_rtx_syn_ack);
|
||||
|
||||
/* return true if req was found in the syn_table[] */
|
||||
static bool reqsk_queue_unlink(struct request_sock_queue *queue,
|
||||
struct request_sock *req)
|
||||
{
|
||||
struct listen_sock *lopt = queue->listen_opt;
|
||||
struct request_sock **prev;
|
||||
bool found = false;
|
||||
|
||||
spin_lock(&queue->syn_wait_lock);
|
||||
|
||||
for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
|
||||
prev = &(*prev)->dl_next) {
|
||||
if (*prev == req) {
|
||||
*prev = req->dl_next;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&queue->syn_wait_lock);
|
||||
if (del_timer(&req->rsk_timer))
|
||||
reqsk_put(req);
|
||||
return found;
|
||||
}
|
||||
|
||||
void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
|
||||
{
|
||||
if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
|
||||
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
|
||||
reqsk_put(req);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
|
||||
|
||||
static void reqsk_timer_handler(unsigned long data)
|
||||
{
|
||||
struct request_sock *req = (struct request_sock *)data;
|
||||
|
|
|
@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|||
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
|
||||
if (req) {
|
||||
nsk = tcp_check_req(sk, skb, req, false);
|
||||
reqsk_put(req);
|
||||
if (!nsk)
|
||||
reqsk_put(req);
|
||||
return nsk;
|
||||
}
|
||||
|
||||
|
|
|
@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
if (!child)
|
||||
goto listen_overflow;
|
||||
|
||||
inet_csk_reqsk_queue_unlink(sk, req);
|
||||
inet_csk_reqsk_queue_removed(sk, req);
|
||||
|
||||
inet_csk_reqsk_queue_drop(sk, req);
|
||||
inet_csk_reqsk_queue_add(sk, req, child);
|
||||
/* Warning: caller must not call reqsk_put(req);
|
||||
* child stole last reference on it.
|
||||
*/
|
||||
return child;
|
||||
|
||||
listen_overflow:
|
||||
|
|
|
@ -2812,39 +2812,65 @@ begin_fwd:
|
|||
}
|
||||
}
|
||||
|
||||
/* Send a fin. The caller locks the socket for us. This cannot be
|
||||
* allowed to fail queueing a FIN frame under any circumstances.
|
||||
/* We allow to exceed memory limits for FIN packets to expedite
|
||||
* connection tear down and (memory) recovery.
|
||||
* Otherwise tcp_send_fin() could be tempted to either delay FIN
|
||||
* or even be forced to close flow without any FIN.
|
||||
*/
|
||||
static void sk_forced_wmem_schedule(struct sock *sk, int size)
|
||||
{
|
||||
int amt, status;
|
||||
|
||||
if (size <= sk->sk_forward_alloc)
|
||||
return;
|
||||
amt = sk_mem_pages(size);
|
||||
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
|
||||
sk_memory_allocated_add(sk, amt, &status);
|
||||
}
|
||||
|
||||
/* Send a FIN. The caller locks the socket for us.
|
||||
* We should try to send a FIN packet really hard, but eventually give up.
|
||||
*/
|
||||
void tcp_send_fin(struct sock *sk)
|
||||
{
|
||||
struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb = tcp_write_queue_tail(sk);
|
||||
int mss_now;
|
||||
|
||||
/* Optimization, tack on the FIN if we have a queue of
|
||||
* unsent frames. But be careful about outgoing SACKS
|
||||
* and IP options.
|
||||
/* Optimization, tack on the FIN if we have one skb in write queue and
|
||||
* this skb was not yet sent, or we are under memory pressure.
|
||||
* Note: in the latter case, FIN packet will be sent after a timeout,
|
||||
* as TCP stack thinks it has already been transmitted.
|
||||
*/
|
||||
mss_now = tcp_current_mss(sk);
|
||||
|
||||
if (tcp_send_head(sk)) {
|
||||
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
|
||||
TCP_SKB_CB(skb)->end_seq++;
|
||||
if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
|
||||
coalesce:
|
||||
TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
|
||||
TCP_SKB_CB(tskb)->end_seq++;
|
||||
tp->write_seq++;
|
||||
} else {
|
||||
/* Socket is locked, keep trying until memory is available. */
|
||||
for (;;) {
|
||||
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
|
||||
if (skb)
|
||||
break;
|
||||
yield();
|
||||
if (!tcp_send_head(sk)) {
|
||||
/* This means tskb was already sent.
|
||||
* Pretend we included the FIN on previous transmit.
|
||||
* We need to set tp->snd_nxt to the value it would have
|
||||
* if FIN had been sent. This is because retransmit path
|
||||
* does not change tp->snd_nxt.
|
||||
*/
|
||||
tp->snd_nxt++;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
|
||||
if (unlikely(!skb)) {
|
||||
if (tskb)
|
||||
goto coalesce;
|
||||
return;
|
||||
}
|
||||
skb_reserve(skb, MAX_TCP_HEADER);
|
||||
sk_forced_wmem_schedule(sk, skb->truesize);
|
||||
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
||||
tcp_init_nondata_skb(skb, tp->write_seq,
|
||||
TCPHDR_ACK | TCPHDR_FIN);
|
||||
tcp_queue_skb(sk, skb);
|
||||
}
|
||||
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
|
||||
__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
|
||||
}
|
||||
|
||||
/* We get here when a process closes a file descriptor (either due to
|
||||
|
|
|
@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
|
|||
static int ip6gre_tunnel_init(struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *tunnel;
|
||||
int i;
|
||||
|
||||
tunnel = netdev_priv(dev);
|
||||
|
||||
|
@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev)
|
|||
if (ipv6_addr_any(&tunnel->parms.raddr))
|
||||
dev->header_ops = &ip6gre_header_ops;
|
||||
|
||||
dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_sw_netstats *ip6gre_tunnel_stats;
|
||||
ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
|
||||
u64_stats_init(&ip6gre_tunnel_stats->syncp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
|
|||
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
|
||||
if (req) {
|
||||
nsk = tcp_check_req(sk, skb, req, false);
|
||||
reqsk_put(req);
|
||||
if (!nsk)
|
||||
reqsk_put(req);
|
||||
return nsk;
|
||||
}
|
||||
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
|
||||
|
|
|
@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
|
|||
return rt;
|
||||
}
|
||||
|
||||
static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
|
||||
{
|
||||
return rcu_dereference_rtnl(dev->mpls_ptr);
|
||||
}
|
||||
|
||||
static bool mpls_output_possible(const struct net_device *dev)
|
||||
{
|
||||
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
|
||||
|
@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
|
|||
struct mpls_route *rt;
|
||||
struct mpls_entry_decoded dec;
|
||||
struct net_device *out_dev;
|
||||
struct mpls_dev *mdev;
|
||||
unsigned int hh_len;
|
||||
unsigned int new_header_size;
|
||||
unsigned int mtu;
|
||||
|
@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
/* Careful this entire function runs inside of an rcu critical section */
|
||||
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (!mdev || !mdev->input_enabled)
|
||||
goto drop;
|
||||
|
||||
if (skb->pkt_type != PACKET_HOST)
|
||||
goto drop;
|
||||
|
||||
|
@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg)
|
|||
if (!dev)
|
||||
goto errout;
|
||||
|
||||
/* For now just support ethernet devices */
|
||||
/* Ensure this is a supported device */
|
||||
err = -EINVAL;
|
||||
if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
|
||||
if (!mpls_dev_get(dev))
|
||||
goto errout;
|
||||
|
||||
err = -EINVAL;
|
||||
|
@ -428,10 +438,89 @@ errout:
|
|||
return err;
|
||||
}
|
||||
|
||||
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
|
||||
(&((struct mpls_dev *)0)->field)
|
||||
|
||||
static const struct ctl_table mpls_dev_table[] = {
|
||||
{
|
||||
.procname = "input",
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int mpls_dev_sysctl_register(struct net_device *dev,
|
||||
struct mpls_dev *mdev)
|
||||
{
|
||||
char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
|
||||
struct ctl_table *table;
|
||||
int i;
|
||||
|
||||
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
/* Table data contains only offsets relative to the base of
|
||||
* the mdev at this point, so make them absolute.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++)
|
||||
table[i].data = (char *)mdev + (uintptr_t)table[i].data;
|
||||
|
||||
snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
|
||||
|
||||
mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
|
||||
if (!mdev->sysctl)
|
||||
goto free;
|
||||
|
||||
return 0;
|
||||
|
||||
free:
|
||||
kfree(table);
|
||||
out:
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
table = mdev->sysctl->ctl_table_arg;
|
||||
unregister_net_sysctl_table(mdev->sysctl);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static struct mpls_dev *mpls_add_dev(struct net_device *dev)
|
||||
{
|
||||
struct mpls_dev *mdev;
|
||||
int err = -ENOMEM;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
|
||||
if (!mdev)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = mpls_dev_sysctl_register(dev, mdev);
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
rcu_assign_pointer(dev->mpls_ptr, mdev);
|
||||
|
||||
return mdev;
|
||||
|
||||
free:
|
||||
kfree(mdev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void mpls_ifdown(struct net_device *dev)
|
||||
{
|
||||
struct mpls_route __rcu **platform_label;
|
||||
struct net *net = dev_net(dev);
|
||||
struct mpls_dev *mdev;
|
||||
unsigned index;
|
||||
|
||||
platform_label = rtnl_dereference(net->mpls.platform_label);
|
||||
|
@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev)
|
|||
continue;
|
||||
rt->rt_dev = NULL;
|
||||
}
|
||||
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (!mdev)
|
||||
return;
|
||||
|
||||
mpls_dev_sysctl_unregister(mdev);
|
||||
|
||||
RCU_INIT_POINTER(dev->mpls_ptr, NULL);
|
||||
|
||||
kfree(mdev);
|
||||
}
|
||||
|
||||
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct mpls_dev *mdev;
|
||||
|
||||
switch(event) {
|
||||
case NETDEV_REGISTER:
|
||||
/* For now just support ethernet devices */
|
||||
if ((dev->type == ARPHRD_ETHER) ||
|
||||
(dev->type == ARPHRD_LOOPBACK)) {
|
||||
mdev = mpls_add_dev(dev);
|
||||
if (IS_ERR(mdev))
|
||||
return notifier_from_errno(PTR_ERR(mdev));
|
||||
}
|
||||
break;
|
||||
|
||||
case NETDEV_UNREGISTER:
|
||||
mpls_ifdown(dev);
|
||||
break;
|
||||
|
@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla,
|
|||
if ((dec.bos != bos) || dec.ttl || dec.tc)
|
||||
return -EINVAL;
|
||||
|
||||
switch (dec.label) {
|
||||
case LABEL_IMPLICIT_NULL:
|
||||
/* RFC3032: This is a label that an LSR may
|
||||
* assign and distribute, but which never
|
||||
* actually appears in the encapsulation.
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
label[i] = dec.label;
|
||||
}
|
||||
*labels = nla_labels;
|
||||
|
@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct ctl_table mpls_table[] = {
|
||||
static const struct ctl_table mpls_table[] = {
|
||||
{
|
||||
.procname = "platform_labels",
|
||||
.data = NULL,
|
||||
|
|
|
@ -22,6 +22,12 @@ struct mpls_entry_decoded {
|
|||
u8 bos;
|
||||
};
|
||||
|
||||
struct mpls_dev {
|
||||
int input_enabled;
|
||||
|
||||
struct ctl_table_header *sysctl;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
|
||||
static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
|
||||
|
|
|
@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb,
|
|||
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
|
|||
if (data == NULL)
|
||||
return NULL;
|
||||
|
||||
skb = build_skb(data, size);
|
||||
skb = __build_skb(data, size);
|
||||
if (skb == NULL)
|
||||
vfree(data);
|
||||
else {
|
||||
skb->head_frag = 0;
|
||||
else
|
||||
skb->destructor = netlink_skb_destructor;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -2143,7 +2143,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
err = __tipc_nl_add_node_links(net, &msg, node,
|
||||
&prev_link);
|
||||
tipc_node_unlock(node);
|
||||
tipc_node_put(node);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref)
|
|||
}
|
||||
saddr->scope = -TIPC_NODE_SCOPE;
|
||||
kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
|
||||
sk_release_kernel(sk);
|
||||
sock_release(sock);
|
||||
con->sock = NULL;
|
||||
}
|
||||
|
||||
|
@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
|
|||
struct socket *sock = NULL;
|
||||
int ret;
|
||||
|
||||
ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock);
|
||||
ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
|
||||
if (ret < 0)
|
||||
return NULL;
|
||||
|
||||
sk_change_net(sock->sk, s->net);
|
||||
|
||||
ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
|
||||
(char *)&s->imp, sizeof(s->imp));
|
||||
if (ret < 0)
|
||||
|
@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
|
|||
|
||||
create_err:
|
||||
kernel_sock_shutdown(sock, SHUT_RDWR);
|
||||
sk_release_kernel(sock->sk);
|
||||
sock_release(sock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
|
|||
int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
|
||||
{
|
||||
u32 dnode, dport = 0;
|
||||
int err = -TIPC_ERR_NO_PORT;
|
||||
int err;
|
||||
struct sk_buff *skb;
|
||||
struct tipc_sock *tsk;
|
||||
struct tipc_net *tn;
|
||||
struct sock *sk;
|
||||
|
||||
while (skb_queue_len(inputq)) {
|
||||
err = -TIPC_ERR_NO_PORT;
|
||||
skb = NULL;
|
||||
dport = tipc_skb_peek_port(inputq, dport);
|
||||
tsk = tipc_sk_lookup(net, dport);
|
||||
|
|
|
@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
|
|||
|
||||
unsigned int unix_tot_inflight;
|
||||
|
||||
|
||||
struct sock *unix_get_socket(struct file *filp)
|
||||
{
|
||||
struct sock *u_sock = NULL;
|
||||
struct inode *inode = file_inode(filp);
|
||||
|
||||
/*
|
||||
* Socket ?
|
||||
*/
|
||||
/* Socket ? */
|
||||
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
|
||||
struct socket *sock = SOCKET_I(inode);
|
||||
struct sock *s = sock->sk;
|
||||
|
||||
/*
|
||||
* PF_UNIX ?
|
||||
*/
|
||||
/* PF_UNIX ? */
|
||||
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
||||
u_sock = s;
|
||||
}
|
||||
return u_sock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep the number of times in flight count for the file
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
/* Keep the number of times in flight count for the file
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
*/
|
||||
|
||||
void unix_inflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
|
@ -142,10 +139,13 @@ void unix_inflight(struct file *fp)
|
|||
void unix_notinflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
|
@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
|||
|
||||
spin_lock(&x->sk_receive_queue.lock);
|
||||
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
|
||||
/*
|
||||
* Do we have file descriptors ?
|
||||
*/
|
||||
/* Do we have file descriptors ? */
|
||||
if (UNIXCB(skb).fp) {
|
||||
bool hit = false;
|
||||
/*
|
||||
* Process the descriptors of this socket
|
||||
*/
|
||||
/* Process the descriptors of this socket */
|
||||
int nfd = UNIXCB(skb).fp->count;
|
||||
struct file **fp = UNIXCB(skb).fp->fp;
|
||||
|
||||
while (nfd--) {
|
||||
/*
|
||||
* Get the socket the fd matches
|
||||
* if it indeed does so
|
||||
*/
|
||||
/* Get the socket the fd matches if it indeed does so */
|
||||
struct sock *sk = unix_get_socket(*fp++);
|
||||
|
||||
if (sk) {
|
||||
struct unix_sock *u = unix_sk(sk);
|
||||
|
||||
/*
|
||||
* Ignore non-candidates, they could
|
||||
/* Ignore non-candidates, they could
|
||||
* have been added to the queues after
|
||||
* starting the garbage collection
|
||||
*/
|
||||
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
|
||||
hit = true;
|
||||
|
||||
func(u);
|
||||
}
|
||||
}
|
||||
|
@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
|||
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
|
||||
struct sk_buff_head *hitlist)
|
||||
{
|
||||
if (x->sk_state != TCP_LISTEN)
|
||||
if (x->sk_state != TCP_LISTEN) {
|
||||
scan_inflight(x, func, hitlist);
|
||||
else {
|
||||
} else {
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *next;
|
||||
struct unix_sock *u;
|
||||
LIST_HEAD(embryos);
|
||||
|
||||
/*
|
||||
* For a listening socket collect the queued embryos
|
||||
/* For a listening socket collect the queued embryos
|
||||
* and perform a scan on them as well.
|
||||
*/
|
||||
spin_lock(&x->sk_receive_queue.lock);
|
||||
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
|
||||
u = unix_sk(skb->sk);
|
||||
|
||||
/*
|
||||
* An embryo cannot be in-flight, so it's safe
|
||||
/* An embryo cannot be in-flight, so it's safe
|
||||
* to use the list link.
|
||||
*/
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
|
@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk)
|
|||
static void inc_inflight_move_tail(struct unix_sock *u)
|
||||
{
|
||||
atomic_long_inc(&u->inflight);
|
||||
/*
|
||||
* If this still might be part of a cycle, move it to the end
|
||||
/* If this still might be part of a cycle, move it to the end
|
||||
* of the list, so that it's checked even if it was already
|
||||
* passed over
|
||||
*/
|
||||
|
@ -263,8 +255,7 @@ static bool gc_in_progress;
|
|||
|
||||
void wait_for_unix_gc(void)
|
||||
{
|
||||
/*
|
||||
* If number of inflight sockets is insane,
|
||||
/* If number of inflight sockets is insane,
|
||||
* force a garbage collect right now.
|
||||
*/
|
||||
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
|
||||
|
@ -288,8 +279,7 @@ void unix_gc(void)
|
|||
goto out;
|
||||
|
||||
gc_in_progress = true;
|
||||
/*
|
||||
* First, select candidates for garbage collection. Only
|
||||
/* First, select candidates for garbage collection. Only
|
||||
* in-flight sockets are considered, and from those only ones
|
||||
* which don't have any external reference.
|
||||
*
|
||||
|
@ -320,15 +310,13 @@ void unix_gc(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now remove all internal in-flight reference to children of
|
||||
/* Now remove all internal in-flight reference to children of
|
||||
* the candidates.
|
||||
*/
|
||||
list_for_each_entry(u, &gc_candidates, link)
|
||||
scan_children(&u->sk, dec_inflight, NULL);
|
||||
|
||||
/*
|
||||
* Restore the references for children of all candidates,
|
||||
/* Restore the references for children of all candidates,
|
||||
* which have remaining references. Do this recursively, so
|
||||
* only those remain, which form cyclic references.
|
||||
*
|
||||
|
@ -350,8 +338,7 @@ void unix_gc(void)
|
|||
}
|
||||
list_del(&cursor);
|
||||
|
||||
/*
|
||||
* not_cycle_list contains those sockets which do not make up a
|
||||
/* not_cycle_list contains those sockets which do not make up a
|
||||
* cycle. Restore these to the inflight list.
|
||||
*/
|
||||
while (!list_empty(¬_cycle_list)) {
|
||||
|
@ -360,8 +347,7 @@ void unix_gc(void)
|
|||
list_move_tail(&u->link, &gc_inflight_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now gc_candidates contains only garbage. Restore original
|
||||
/* Now gc_candidates contains only garbage. Restore original
|
||||
* inflight counters for these as well, and remove the skbuffs
|
||||
* which are creating the cycle(s).
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue