Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix crash in ipvs tot_stats estimator, from Julian Anastasov. 2) Fix OOPS in nf_nat on netns removal, from Florian Westphal. 3) Really really really fix locking issues in slip and slcan tty write wakeups, from Tyler Hall. 4) Fix checksum offloading in fec driver, from Fugang Duan. 5) Off by one in BPF instruction limit test, from Kees Cook. 6) Need to clear all TSO capability flags when doing software TSO in tg3 driver, from Prashant Sreedharan. 7) Fix memory leak in vlan_reorder_header() error path, from Li RongQing. 8) Fix various bugs in xen-netfront and xen-netback multiqueue support, from David Vrabel and Wei Liu. 9) Fix deadlock in cxgb4 driver, from Li RongQing. 10) Prevent double free of no-cache DST entries, from Eric Dumazet. 11) Bad csum_start handling in skb_segment() leads to crashes when forwarding, from Tom Herbert. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits) net: fix setting csum_start in skb_segment() ipv4: fix dst race in sk_dst_get() net: filter: Use kcalloc/kmalloc_array to allocate arrays trivial: net: filter: Change kerneldoc parameter order trivial: net: filter: Fix typo in comment net: allwinner: emac: Add missing free_irq cxgb4: use dev_port to identify ports xen-netback: bookkeep number of active queues in our own module tg3: Change nvram command timeout value to 50ms cxgb4: Not need to hold the adap_rcu_lock lock when read adap_rcu_list be2net: fix qnq mode detection on VFs of: mdio: fixup of_phy_register_fixed_link parsing of new bindings at86rf230: fix irq setup net: phy: at803x: fix coccinelle warnings net/mlx4_core: Fix the error flow when probing with invalid VF configuration tulip: Poll link status more frequently for Comet chips net: huawei_cdc_ncm: increase command buffer size drivers: net: cpsw: fix dual EMAC stall when connected to same switch xen-netfront: recreate queues correctly when reconnecting xen-netfront: fix oops when disconnected from backend ...
This commit is contained in:
commit
f40ede392d
68 changed files with 749 additions and 374 deletions
|
@ -17,6 +17,7 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#define _GNU_SOURCE
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <inttypes.h>
|
||||
|
@ -46,12 +47,14 @@
|
|||
#define CLOCK_INVALID -1
|
||||
#endif
|
||||
|
||||
/* When glibc offers the syscall, this will go away. */
|
||||
/* clock_adjtime is not available in GLIBC < 2.14 */
|
||||
#if !__GLIBC_PREREQ(2, 14)
|
||||
#include <sys/syscall.h>
|
||||
static int clock_adjtime(clockid_t id, struct timex *tx)
|
||||
{
|
||||
return syscall(__NR_clock_adjtime, id, tx);
|
||||
}
|
||||
#endif
|
||||
|
||||
static clockid_t get_clockid(int fd)
|
||||
{
|
||||
|
|
11
MAINTAINERS
11
MAINTAINERS
|
@ -3189,14 +3189,6 @@ L: linux-scsi@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/scsi/eata_pio.*
|
||||
|
||||
EBTABLES
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
W: http://ebtables.sourceforge.net/
|
||||
S: Orphan
|
||||
F: include/linux/netfilter_bridge/ebt_*.h
|
||||
F: include/uapi/linux/netfilter_bridge/ebt_*.h
|
||||
F: net/bridge/netfilter/ebt*.c
|
||||
|
||||
EC100 MEDIA DRIVER
|
||||
M: Antti Palosaari <crope@iki.fi>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -6105,12 +6097,11 @@ F: Documentation/networking/s2io.txt
|
|||
F: Documentation/networking/vxge.txt
|
||||
F: drivers/net/ethernet/neterion/
|
||||
|
||||
NETFILTER/IPTABLES
|
||||
NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
|
||||
M: Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
L: netfilter@vger.kernel.org
|
||||
L: coreteam@netfilter.org
|
||||
W: http://www.netfilter.org/
|
||||
W: http://www.iptables.org/
|
||||
|
|
|
@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
|
|||
also to the configuration option of the driver for your particular
|
||||
card, below.
|
||||
|
||||
if ISDN_DRV_HISAX!=n
|
||||
if ISDN_DRV_HISAX
|
||||
|
||||
comment "D-channel protocol features"
|
||||
|
||||
|
@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
|
|||
This enables HiSax support for the Formula-n enter:now PCI
|
||||
ISDN card.
|
||||
|
||||
endif
|
||||
|
||||
if ISDN_DRV_HISAX
|
||||
|
||||
config HISAX_DEBUG
|
||||
bool "HiSax debugging"
|
||||
help
|
||||
|
@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
|
|||
(the latter also needs you to select "ISA Plug and Play support"
|
||||
from the menu "Plug and Play configuration")
|
||||
|
||||
config HISAX_AVM_A1_PCMCIA
|
||||
bool
|
||||
depends on HISAX_AVM_A1_CS
|
||||
default y
|
||||
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -1025,10 +1025,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
|
|||
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
|
||||
NETIF_F_HIGHDMA | NETIF_F_LRO)
|
||||
|
||||
#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
|
||||
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
|
||||
|
||||
static void bond_compute_features(struct bonding *bond)
|
||||
{
|
||||
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
|
||||
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
|
||||
netdev_features_t enc_features = BOND_ENC_FEATURES;
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
|
@ -1044,6 +1048,9 @@ static void bond_compute_features(struct bonding *bond)
|
|||
vlan_features = netdev_increment_features(vlan_features,
|
||||
slave->dev->vlan_features, BOND_VLAN_FEATURES);
|
||||
|
||||
enc_features = netdev_increment_features(enc_features,
|
||||
slave->dev->hw_enc_features,
|
||||
BOND_ENC_FEATURES);
|
||||
dst_release_flag &= slave->dev->priv_flags;
|
||||
if (slave->dev->hard_header_len > max_hard_header_len)
|
||||
max_hard_header_len = slave->dev->hard_header_len;
|
||||
|
@ -1054,6 +1061,7 @@ static void bond_compute_features(struct bonding *bond)
|
|||
|
||||
done:
|
||||
bond_dev->vlan_features = vlan_features;
|
||||
bond_dev->hw_enc_features = enc_features;
|
||||
bond_dev->hard_header_len = max_hard_header_len;
|
||||
bond_dev->gso_max_segs = gso_max_segs;
|
||||
netif_set_gso_max_size(bond_dev, gso_max_size);
|
||||
|
@ -3975,6 +3983,7 @@ void bond_setup(struct net_device *bond_dev)
|
|||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
|
||||
bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
bond_dev->features |= bond_dev->hw_features;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/can.h>
|
||||
#include <linux/can/skb.h>
|
||||
|
||||
|
@ -85,6 +86,7 @@ struct slcan {
|
|||
struct tty_struct *tty; /* ptr to TTY structure */
|
||||
struct net_device *dev; /* easy for intr handling */
|
||||
spinlock_t lock;
|
||||
struct work_struct tx_work; /* Flushes transmit buffer */
|
||||
|
||||
/* These are pointers to the malloc()ed frame buffers. */
|
||||
unsigned char rbuff[SLC_MTU]; /* receiver buffer */
|
||||
|
@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
|
|||
sl->dev->stats.tx_bytes += cf->can_dlc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the driver when there's room for more data. If we have
|
||||
* more packets to send, we send them here.
|
||||
*/
|
||||
static void slcan_write_wakeup(struct tty_struct *tty)
|
||||
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
|
||||
static void slcan_transmit(struct work_struct *work)
|
||||
{
|
||||
struct slcan *sl = container_of(work, struct slcan, tx_work);
|
||||
int actual;
|
||||
struct slcan *sl = (struct slcan *) tty->disc_data;
|
||||
|
||||
/* First make sure we're connected. */
|
||||
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
/* First make sure we're connected. */
|
||||
if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
|
||||
spin_unlock_bh(&sl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sl->xleft <= 0) {
|
||||
/* Now serial buffer is almost free & we can start
|
||||
* transmission of another packet */
|
||||
sl->dev->stats.tx_packets++;
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
netif_wake_queue(sl->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
|
||||
actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
|
||||
sl->xleft -= actual;
|
||||
sl->xhead += actual;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the driver when there's room for more data.
|
||||
* Schedule the transmit.
|
||||
*/
|
||||
static void slcan_write_wakeup(struct tty_struct *tty)
|
||||
{
|
||||
struct slcan *sl = tty->disc_data;
|
||||
|
||||
schedule_work(&sl->tx_work);
|
||||
}
|
||||
|
||||
/* Send a can_frame to a TTY queue. */
|
||||
static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
|
@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
|
|||
sl->magic = SLCAN_MAGIC;
|
||||
sl->dev = dev;
|
||||
spin_lock_init(&sl->lock);
|
||||
INIT_WORK(&sl->tx_work, slcan_transmit);
|
||||
slcan_devs[i] = dev;
|
||||
|
||||
return sl;
|
||||
|
@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
|
|||
if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
tty->disc_data = NULL;
|
||||
sl->tty = NULL;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
flush_work(&sl->tx_work);
|
||||
|
||||
/* Flush network side */
|
||||
unregister_netdev(sl->dev);
|
||||
|
|
|
@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
|
|||
|
||||
ret = emac_mdio_probe(dev);
|
||||
if (ret < 0) {
|
||||
free_irq(dev->irq, dev);
|
||||
netdev_err(dev, "cannot probe MDIO bus\n");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define NVRAM_CMD_TIMEOUT 100
|
||||
#define NVRAM_CMD_TIMEOUT 5000
|
||||
|
||||
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
|
||||
{
|
||||
|
@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
|
|||
|
||||
tw32(NVRAM_CMD, nvram_cmd);
|
||||
for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
|
||||
udelay(10);
|
||||
usleep_range(10, 40);
|
||||
if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
|
||||
udelay(10);
|
||||
break;
|
||||
|
@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
|
|||
netif_wake_queue(tp->dev);
|
||||
}
|
||||
|
||||
segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
|
||||
if (IS_ERR(segs))
|
||||
segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
|
||||
if (IS_ERR(segs) || !segs)
|
||||
goto tg3_tso_bug_end;
|
||||
|
||||
do {
|
||||
|
|
|
@ -4057,22 +4057,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
|
|||
EXPORT_SYMBOL(cxgb4_unregister_uld);
|
||||
|
||||
/* Check if netdev on which event is occured belongs to us or not. Return
|
||||
* suceess (1) if it belongs otherwise failure (0).
|
||||
* success (true) if it belongs otherwise failure (false).
|
||||
* Called with rcu_read_lock() held.
|
||||
*/
|
||||
static int cxgb4_netdev(struct net_device *netdev)
|
||||
static bool cxgb4_netdev(const struct net_device *netdev)
|
||||
{
|
||||
struct adapter *adap;
|
||||
int i;
|
||||
|
||||
spin_lock(&adap_rcu_lock);
|
||||
list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
|
||||
for (i = 0; i < MAX_NPORTS; i++)
|
||||
if (adap->port[i] == netdev) {
|
||||
spin_unlock(&adap_rcu_lock);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&adap_rcu_lock);
|
||||
return 0;
|
||||
if (adap->port[i] == netdev)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
|
||||
|
@ -6396,6 +6393,7 @@ static void remove_one(struct pci_dev *pdev)
|
|||
adapter->flags &= ~DEV_ENABLED;
|
||||
}
|
||||
pci_release_regions(pdev);
|
||||
synchronize_rcu();
|
||||
kfree(adapter);
|
||||
} else
|
||||
pci_release_regions(pdev);
|
||||
|
|
|
@ -3962,6 +3962,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
|
|||
p->lport = j;
|
||||
p->rss_size = rss_size;
|
||||
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
|
||||
adap->port[i]->dev_port = j;
|
||||
|
||||
ret = ntohl(c.u.info.lstatus_to_modtype);
|
||||
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
|
||||
|
|
|
@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
|
|||
{
|
||||
struct net_device *dev = (struct net_device *)data;
|
||||
struct tulip_private *tp = netdev_priv(dev);
|
||||
int next_tick = 60*HZ;
|
||||
int next_tick = 2*HZ;
|
||||
|
||||
if (tulip_debug > 1)
|
||||
netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
|
||||
|
|
|
@ -557,9 +557,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
|
|||
#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
|
||||
|
||||
/* Is BE in QNQ multi-channel mode */
|
||||
#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
|
||||
adapter->mc_type == vNIC1 || \
|
||||
adapter->mc_type == UFP)
|
||||
#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
|
||||
|
||||
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
|
||||
adapter->pdev->device == OC_DEVICE_ID4)
|
||||
|
|
|
@ -1091,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {
|
|||
* based on the skew/IPL.
|
||||
*/
|
||||
#define RDMA_ENABLED 0x4
|
||||
#define FLEX10_MODE 0x400
|
||||
#define QNQ_MODE 0x400
|
||||
#define VNIC_MODE 0x20000
|
||||
#define UMC_ENABLED 0x1000000
|
||||
struct be_cmd_req_query_fw_cfg {
|
||||
|
|
|
@ -3254,9 +3254,9 @@ err:
|
|||
|
||||
static u8 be_convert_mc_type(u32 function_mode)
|
||||
{
|
||||
if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
|
||||
if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
|
||||
return vNIC1;
|
||||
else if (function_mode & FLEX10_MODE)
|
||||
else if (function_mode & QNQ_MODE)
|
||||
return FLEX10;
|
||||
else if (function_mode & VNIC_MODE)
|
||||
return vNIC2;
|
||||
|
|
|
@ -320,6 +320,11 @@ static void *swap_buffer(void *bufaddr, int len)
|
|||
return bufaddr;
|
||||
}
|
||||
|
||||
static inline bool is_ipv4_pkt(struct sk_buff *skb)
|
||||
{
|
||||
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
|
||||
}
|
||||
|
||||
static int
|
||||
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
|
@ -330,7 +335,8 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
|
|||
if (unlikely(skb_cow_head(skb, 0)))
|
||||
return -1;
|
||||
|
||||
ip_hdr(skb)->check = 0;
|
||||
if (is_ipv4_pkt(skb))
|
||||
ip_hdr(skb)->check = 0;
|
||||
*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "FUJITSU SIEMENS A8NE-FM",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -2439,7 +2439,8 @@ slave_start:
|
|||
(num_vfs_argc > 1 || probe_vfs_argc > 1)) {
|
||||
mlx4_err(dev,
|
||||
"Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
|
||||
goto err_close;
|
||||
err = -EINVAL;
|
||||
goto err_master_mfunc;
|
||||
}
|
||||
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
|
||||
unsigned j;
|
||||
|
|
|
@ -1212,7 +1212,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
|||
for_each_slave(priv, cpsw_slave_open, priv);
|
||||
|
||||
/* Add default VLAN */
|
||||
cpsw_add_default_vlan(priv);
|
||||
if (!priv->data.dual_emac)
|
||||
cpsw_add_default_vlan(priv);
|
||||
else
|
||||
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
|
||||
ALE_ALL_PORTS << priv->host_port,
|
||||
ALE_ALL_PORTS << priv->host_port, 0, 0);
|
||||
|
||||
if (!cpsw_common_res_usage_state(priv)) {
|
||||
/* setup tx dma to fixed prio and zero offset */
|
||||
|
|
|
@ -2191,7 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
|
|||
static void tile_net_dev_init(const char *name, const uint8_t *mac)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
struct net_device *dev;
|
||||
struct tile_net_priv *priv;
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
|
|||
"unable to teardown send buffer's gpadl\n");
|
||||
return ret;
|
||||
}
|
||||
net_device->recv_buf_gpadl_handle = 0;
|
||||
net_device->send_buf_gpadl_handle = 0;
|
||||
}
|
||||
if (net_device->send_buf) {
|
||||
/* Free up the receive buffer */
|
||||
|
|
|
@ -1137,6 +1137,8 @@ static int at86rf230_probe(struct spi_device *spi)
|
|||
dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
|
||||
|
||||
irq_type = irq_get_trigger_type(spi->irq);
|
||||
if (!irq_type)
|
||||
irq_type = IRQF_TRIGGER_RISING;
|
||||
if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
|
||||
irq_worker = at86rf230_irqwork;
|
||||
irq_handler = at86rf230_isr;
|
||||
|
@ -1168,7 +1170,8 @@ static int at86rf230_probe(struct spi_device *spi)
|
|||
if (rc)
|
||||
goto err_hw_init;
|
||||
|
||||
rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
|
||||
rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
|
||||
IRQF_SHARED | irq_type,
|
||||
dev_name(&spi->dev), lp);
|
||||
if (rc)
|
||||
goto err_hw_init;
|
||||
|
|
|
@ -16,9 +16,13 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
#define AT803X_INTR_ENABLE 0x12
|
||||
#define AT803X_INTR_STATUS 0x13
|
||||
#define AT803X_SMART_SPEED 0x14
|
||||
#define AT803X_LED_CONTROL 0x18
|
||||
#define AT803X_WOL_ENABLE 0x01
|
||||
#define AT803X_DEVICE_ADDR 0x03
|
||||
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
|
||||
|
@ -35,10 +39,52 @@
|
|||
#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
|
||||
#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
|
||||
|
||||
#define ATH8030_PHY_ID 0x004dd076
|
||||
#define ATH8031_PHY_ID 0x004dd074
|
||||
#define ATH8035_PHY_ID 0x004dd072
|
||||
|
||||
MODULE_DESCRIPTION("Atheros 803x PHY driver");
|
||||
MODULE_AUTHOR("Matus Ujhelyi");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
struct at803x_priv {
|
||||
bool phy_reset:1;
|
||||
struct gpio_desc *gpiod_reset;
|
||||
};
|
||||
|
||||
struct at803x_context {
|
||||
u16 bmcr;
|
||||
u16 advertise;
|
||||
u16 control1000;
|
||||
u16 int_enable;
|
||||
u16 smart_speed;
|
||||
u16 led_control;
|
||||
};
|
||||
|
||||
/* save relevant PHY registers to private copy */
|
||||
static void at803x_context_save(struct phy_device *phydev,
|
||||
struct at803x_context *context)
|
||||
{
|
||||
context->bmcr = phy_read(phydev, MII_BMCR);
|
||||
context->advertise = phy_read(phydev, MII_ADVERTISE);
|
||||
context->control1000 = phy_read(phydev, MII_CTRL1000);
|
||||
context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
|
||||
context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
|
||||
context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
|
||||
}
|
||||
|
||||
/* restore relevant PHY registers from private copy */
|
||||
static void at803x_context_restore(struct phy_device *phydev,
|
||||
const struct at803x_context *context)
|
||||
{
|
||||
phy_write(phydev, MII_BMCR, context->bmcr);
|
||||
phy_write(phydev, MII_ADVERTISE, context->advertise);
|
||||
phy_write(phydev, MII_CTRL1000, context->control1000);
|
||||
phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
|
||||
phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
|
||||
phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
|
||||
}
|
||||
|
||||
static int at803x_set_wol(struct phy_device *phydev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
|
@ -142,6 +188,26 @@ static int at803x_resume(struct phy_device *phydev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int at803x_probe(struct phy_device *phydev)
|
||||
{
|
||||
struct device *dev = &phydev->dev;
|
||||
struct at803x_priv *priv;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->gpiod_reset = devm_gpiod_get(dev, "reset");
|
||||
if (IS_ERR(priv->gpiod_reset))
|
||||
priv->gpiod_reset = NULL;
|
||||
else
|
||||
gpiod_direction_output(priv->gpiod_reset, 1);
|
||||
|
||||
phydev->priv = priv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int at803x_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -189,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void at803x_link_change_notify(struct phy_device *phydev)
|
||||
{
|
||||
struct at803x_priv *priv = phydev->priv;
|
||||
|
||||
/*
|
||||
* Conduct a hardware reset for AT8030 every time a link loss is
|
||||
* signalled. This is necessary to circumvent a hardware bug that
|
||||
* occurs when the cable is unplugged while TX packets are pending
|
||||
* in the FIFO. In such cases, the FIFO enters an error mode it
|
||||
* cannot recover from by software.
|
||||
*/
|
||||
if (phydev->drv->phy_id == ATH8030_PHY_ID) {
|
||||
if (phydev->state == PHY_NOLINK) {
|
||||
if (priv->gpiod_reset && !priv->phy_reset) {
|
||||
struct at803x_context context;
|
||||
|
||||
at803x_context_save(phydev, &context);
|
||||
|
||||
gpiod_set_value(priv->gpiod_reset, 0);
|
||||
msleep(1);
|
||||
gpiod_set_value(priv->gpiod_reset, 1);
|
||||
msleep(1);
|
||||
|
||||
at803x_context_restore(phydev, &context);
|
||||
|
||||
dev_dbg(&phydev->dev, "%s(): phy was reset\n",
|
||||
__func__);
|
||||
priv->phy_reset = true;
|
||||
}
|
||||
} else {
|
||||
priv->phy_reset = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct phy_driver at803x_driver[] = {
|
||||
{
|
||||
/* ATHEROS 8035 */
|
||||
.phy_id = 0x004dd072,
|
||||
.name = "Atheros 8035 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.config_init = at803x_config_init,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.driver = {
|
||||
.phy_id = ATH8035_PHY_ID,
|
||||
.name = "Atheros 8035 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.probe = at803x_probe,
|
||||
.config_init = at803x_config_init,
|
||||
.link_change_notify = at803x_link_change_notify,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
}, {
|
||||
/* ATHEROS 8030 */
|
||||
.phy_id = 0x004dd076,
|
||||
.name = "Atheros 8030 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.config_init = at803x_config_init,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.driver = {
|
||||
.phy_id = ATH8030_PHY_ID,
|
||||
.name = "Atheros 8030 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.probe = at803x_probe,
|
||||
.config_init = at803x_config_init,
|
||||
.link_change_notify = at803x_link_change_notify,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
}, {
|
||||
/* ATHEROS 8031 */
|
||||
.phy_id = 0x004dd074,
|
||||
.name = "Atheros 8031 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.config_init = at803x_config_init,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = &at803x_ack_interrupt,
|
||||
.config_intr = &at803x_config_intr,
|
||||
.driver = {
|
||||
.phy_id = ATH8031_PHY_ID,
|
||||
.name = "Atheros 8031 ethernet",
|
||||
.phy_id_mask = 0xffffffef,
|
||||
.probe = at803x_probe,
|
||||
.config_init = at803x_config_init,
|
||||
.link_change_notify = at803x_link_change_notify,
|
||||
.set_wol = at803x_set_wol,
|
||||
.get_wol = at803x_get_wol,
|
||||
.suspend = at803x_suspend,
|
||||
.resume = at803x_resume,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = &at803x_ack_interrupt,
|
||||
.config_intr = &at803x_config_intr,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
} };
|
||||
|
@ -260,9 +367,9 @@ module_init(atheros_init);
|
|||
module_exit(atheros_exit);
|
||||
|
||||
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
|
||||
{ 0x004dd076, 0xffffffef },
|
||||
{ 0x004dd074, 0xffffffef },
|
||||
{ 0x004dd072, 0xffffffef },
|
||||
{ ATH8030_PHY_ID, 0xffffffef },
|
||||
{ ATH8031_PHY_ID, 0xffffffef },
|
||||
{ ATH8035_PHY_ID, 0xffffffef },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -720,6 +720,9 @@ void phy_state_machine(struct work_struct *work)
|
|||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
if (phydev->drv->link_change_notify)
|
||||
phydev->drv->link_change_notify(phydev);
|
||||
|
||||
switch (phydev->state) {
|
||||
case PHY_DOWN:
|
||||
case PHY_STARTING:
|
||||
|
|
|
@ -83,6 +83,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include "slip.h"
|
||||
#ifdef CONFIG_INET
|
||||
#include <linux/ip.h>
|
||||
|
@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the driver when there's room for more data. If we have
|
||||
* more packets to send, we send them here.
|
||||
*/
|
||||
static void slip_write_wakeup(struct tty_struct *tty)
|
||||
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
|
||||
static void slip_transmit(struct work_struct *work)
|
||||
{
|
||||
struct slip *sl = container_of(work, struct slip, tx_work);
|
||||
int actual;
|
||||
struct slip *sl = tty->disc_data;
|
||||
|
||||
/* First make sure we're connected. */
|
||||
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
/* First make sure we're connected. */
|
||||
if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
|
||||
spin_unlock_bh(&sl->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sl->xleft <= 0) {
|
||||
/* Now serial buffer is almost free & we can start
|
||||
* transmission of another packet */
|
||||
sl->dev->stats.tx_packets++;
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
sl_unlock(sl);
|
||||
return;
|
||||
}
|
||||
|
||||
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
|
||||
actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
|
||||
sl->xleft -= actual;
|
||||
sl->xhead += actual;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the driver when there's room for more data.
|
||||
* Schedule the transmit.
|
||||
*/
|
||||
static void slip_write_wakeup(struct tty_struct *tty)
|
||||
{
|
||||
struct slip *sl = tty->disc_data;
|
||||
|
||||
schedule_work(&sl->tx_work);
|
||||
}
|
||||
|
||||
static void sl_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct slip *sl = netdev_priv(dev);
|
||||
|
@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
|
|||
sl->magic = SLIP_MAGIC;
|
||||
sl->dev = dev;
|
||||
spin_lock_init(&sl->lock);
|
||||
INIT_WORK(&sl->tx_work, slip_transmit);
|
||||
sl->mode = SL_MODE_DEFAULT;
|
||||
#ifdef CONFIG_SLIP_SMART
|
||||
/* initialize timer_list struct */
|
||||
|
@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
|
|||
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sl->lock);
|
||||
tty->disc_data = NULL;
|
||||
sl->tty = NULL;
|
||||
spin_unlock_bh(&sl->lock);
|
||||
|
||||
flush_work(&sl->tx_work);
|
||||
|
||||
/* VSV = very important to remove timers */
|
||||
#ifdef CONFIG_SLIP_SMART
|
||||
|
|
|
@ -53,6 +53,7 @@ struct slip {
|
|||
struct tty_struct *tty; /* ptr to TTY structure */
|
||||
struct net_device *dev; /* easy for intr handling */
|
||||
spinlock_t lock;
|
||||
struct work_struct tx_work; /* Flushes transmit buffer */
|
||||
|
||||
#ifdef SL_INCLUDE_CSLIP
|
||||
struct slcompress *slcomp; /* for header compression */
|
||||
|
|
|
@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
|
|||
ctx = drvstate->ctx;
|
||||
|
||||
if (usbnet_dev->status)
|
||||
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
|
||||
* decimal (0x100)"
|
||||
/* The wMaxCommand buffer must be big enough to hold
|
||||
* any message from the modem. Experience has shown
|
||||
* that some replies are more than 256 bytes long
|
||||
*/
|
||||
subdriver = usb_cdc_wdm_register(ctx->control,
|
||||
&usbnet_dev->status->desc,
|
||||
256, /* wMaxCommand */
|
||||
1024, /* wMaxCommand */
|
||||
huawei_cdc_ncm_wdm_manage_power);
|
||||
if (IS_ERR(subdriver)) {
|
||||
ret = PTR_ERR(subdriver);
|
||||
|
|
|
@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
|
|||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
spin_lock_init(&adapter->tx_queue[i].tx_lock);
|
||||
|
||||
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
|
||||
VMXNET3_DEF_RX_RING_SIZE,
|
||||
err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
|
||||
adapter->rx_ring_size,
|
||||
VMXNET3_DEF_RX_RING_SIZE);
|
||||
if (err)
|
||||
goto queue_err;
|
||||
|
@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|||
adapter->netdev = netdev;
|
||||
adapter->pdev = pdev;
|
||||
|
||||
adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
|
||||
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
|
||||
|
||||
spin_lock_init(&adapter->cmd_lock);
|
||||
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
||||
sizeof(struct vmxnet3_adapter),
|
||||
|
|
|
@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
|
|||
param->rx_mini_max_pending = 0;
|
||||
param->rx_jumbo_max_pending = 0;
|
||||
|
||||
param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
|
||||
param->tx_pending = adapter->tx_queue[0].tx_ring.size;
|
||||
param->rx_pending = adapter->rx_ring_size;
|
||||
param->tx_pending = adapter->tx_ring_size;
|
||||
param->rx_mini_pending = 0;
|
||||
param->rx_jumbo_pending = 0;
|
||||
}
|
||||
|
@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
|
|||
* size */
|
||||
netdev_err(netdev, "failed to apply new sizes, "
|
||||
"try the default ones\n");
|
||||
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
|
||||
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
|
||||
err = vmxnet3_create_queues(adapter,
|
||||
VMXNET3_DEF_TX_RING_SIZE,
|
||||
VMXNET3_DEF_RX_RING_SIZE,
|
||||
new_tx_ring_size,
|
||||
new_rx_ring_size,
|
||||
VMXNET3_DEF_RX_RING_SIZE);
|
||||
if (err) {
|
||||
netdev_err(netdev, "failed to create queues "
|
||||
|
@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
|
|||
netdev_err(netdev, "failed to re-activate, error %d."
|
||||
" Closing it\n", err);
|
||||
}
|
||||
adapter->tx_ring_size = new_tx_ring_size;
|
||||
adapter->rx_ring_size = new_rx_ring_size;
|
||||
|
||||
out:
|
||||
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
|
||||
|
|
|
@ -349,6 +349,11 @@ struct vmxnet3_adapter {
|
|||
u32 link_speed; /* in mbps */
|
||||
|
||||
u64 tx_timeout_count;
|
||||
|
||||
/* Ring sizes */
|
||||
u32 tx_ring_size;
|
||||
u32 rx_ring_size;
|
||||
|
||||
struct work_struct work;
|
||||
|
||||
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
|
||||
|
|
|
@ -36,7 +36,7 @@ config B43_SSB
|
|||
choice
|
||||
prompt "Supported bus types"
|
||||
depends on B43
|
||||
default B43_BCMA_AND_SSB
|
||||
default B43_BUSES_BCMA_AND_SSB
|
||||
|
||||
config B43_BUSES_BCMA_AND_SSB
|
||||
bool "BCMA and SSB"
|
||||
|
|
|
@ -5221,6 +5221,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
|||
/* We don't support 5 GHz on some PHYs yet */
|
||||
switch (dev->phy.type) {
|
||||
case B43_PHYTYPE_A:
|
||||
case B43_PHYTYPE_G:
|
||||
case B43_PHYTYPE_N:
|
||||
case B43_PHYTYPE_LP:
|
||||
case B43_PHYTYPE_HT:
|
||||
|
|
|
@ -811,9 +811,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
|
|||
break;
|
||||
case B43_PHYTYPE_G:
|
||||
status.band = IEEE80211_BAND_2GHZ;
|
||||
/* chanid is the radio channel cookie value as used
|
||||
* to tune the radio. */
|
||||
status.freq = chanid + 2400;
|
||||
/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
|
||||
* has been modified to be compatible with N-PHY and others.
|
||||
*/
|
||||
if (dev->fw.rev >= 508)
|
||||
status.freq = ieee80211_channel_to_frequency(chanid, status.band);
|
||||
else
|
||||
status.freq = chanid + 2400;
|
||||
break;
|
||||
case B43_PHYTYPE_N:
|
||||
case B43_PHYTYPE_LP:
|
||||
|
|
|
@ -50,7 +50,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
|
|||
return -1;
|
||||
}
|
||||
mapping.len = size;
|
||||
memcpy(skb->cb, &mapping, sizeof(mapping));
|
||||
mwifiex_store_mapping(skb, &mapping);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
|
|||
struct pcie_service_card *card = adapter->card;
|
||||
struct mwifiex_dma_mapping mapping;
|
||||
|
||||
MWIFIEX_SKB_PACB(skb, &mapping);
|
||||
mwifiex_get_mapping(skb, &mapping);
|
||||
pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,32 +20,55 @@
|
|||
#ifndef _MWIFIEX_UTIL_H_
|
||||
#define _MWIFIEX_UTIL_H_
|
||||
|
||||
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
|
||||
{
|
||||
return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t));
|
||||
}
|
||||
|
||||
static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
|
||||
{
|
||||
return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
|
||||
}
|
||||
|
||||
struct mwifiex_dma_mapping {
|
||||
dma_addr_t addr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
|
||||
struct mwifiex_dma_mapping *mapping)
|
||||
struct mwifiex_cb {
|
||||
struct mwifiex_dma_mapping dma_mapping;
|
||||
union {
|
||||
struct mwifiex_rxinfo rx_info;
|
||||
struct mwifiex_txinfo tx_info;
|
||||
};
|
||||
};
|
||||
|
||||
static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
|
||||
{
|
||||
memcpy(mapping, skb->cb, sizeof(*mapping));
|
||||
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
|
||||
return &cb->rx_info;
|
||||
}
|
||||
|
||||
static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
|
||||
{
|
||||
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
|
||||
|
||||
return &cb->tx_info;
|
||||
}
|
||||
|
||||
static inline void mwifiex_store_mapping(struct sk_buff *skb,
|
||||
struct mwifiex_dma_mapping *mapping)
|
||||
{
|
||||
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
|
||||
|
||||
memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
|
||||
}
|
||||
|
||||
static inline void mwifiex_get_mapping(struct sk_buff *skb,
|
||||
struct mwifiex_dma_mapping *mapping)
|
||||
{
|
||||
struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
|
||||
|
||||
memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
|
||||
}
|
||||
|
||||
static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
|
||||
{
|
||||
struct mwifiex_dma_mapping mapping;
|
||||
|
||||
MWIFIEX_SKB_PACB(skb, &mapping);
|
||||
mwifiex_get_mapping(skb, &mapping);
|
||||
|
||||
return mapping.addr;
|
||||
}
|
||||
|
|
|
@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
|
|||
/*
|
||||
* Detect if this device has an hardware controlled radio.
|
||||
*/
|
||||
if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
|
||||
if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
|
||||
__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
|
||||
/*
|
||||
* On this device RFKILL initialized during probe does not work.
|
||||
*/
|
||||
__set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the BBP tuning should be enabled.
|
||||
|
|
|
@ -229,6 +229,27 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
|
|||
/*
|
||||
* Firmware functions
|
||||
*/
|
||||
static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
__le32 reg;
|
||||
u32 fw_mode;
|
||||
|
||||
/* cannot use rt2x00usb_register_read here as it uses different
|
||||
* mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
|
||||
* magic value USB_MODE_AUTORUN (0x11) to the device, thus the
|
||||
* returned value would be invalid.
|
||||
*/
|
||||
rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
|
||||
USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
|
||||
®, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
|
||||
fw_mode = le32_to_cpu(reg);
|
||||
|
||||
if ((fw_mode & 0x00000003) == 2)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
return FIRMWARE_RT2870;
|
||||
|
@ -257,8 +278,13 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
|
|||
/*
|
||||
* Write firmware to device.
|
||||
*/
|
||||
rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
|
||||
data + offset, length);
|
||||
if (rt2800usb_autorun_detect(rt2x00dev)) {
|
||||
rt2x00_info(rt2x00dev,
|
||||
"Firmware loading not required - NIC in AutoRun mode\n");
|
||||
} else {
|
||||
rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
|
||||
data + offset, length);
|
||||
}
|
||||
|
||||
rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
|
||||
rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
|
||||
|
@ -735,11 +761,18 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
|
|||
/*
|
||||
* Device probe functions.
|
||||
*/
|
||||
static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
if (rt2800usb_autorun_detect(rt2x00dev))
|
||||
return 1;
|
||||
return rt2800_efuse_detect(rt2x00dev);
|
||||
}
|
||||
|
||||
static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (rt2800_efuse_detect(rt2x00dev))
|
||||
if (rt2800usb_efuse_detect(rt2x00dev))
|
||||
retval = rt2800_read_eeprom_efuse(rt2x00dev);
|
||||
else
|
||||
retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
|
||||
|
|
|
@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
|
|||
REQUIRE_SW_SEQNO,
|
||||
REQUIRE_HT_TX_DESC,
|
||||
REQUIRE_PS_AUTOWAKE,
|
||||
REQUIRE_DELAYED_RFKILL,
|
||||
|
||||
/*
|
||||
* Capabilities
|
||||
|
|
|
@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
|
|||
return;
|
||||
|
||||
/*
|
||||
* Unregister extra components.
|
||||
* Stop rfkill polling.
|
||||
*/
|
||||
rt2x00rfkill_unregister(rt2x00dev);
|
||||
if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
|
||||
rt2x00rfkill_unregister(rt2x00dev);
|
||||
|
||||
/*
|
||||
* Allow the HW to uninitialize.
|
||||
|
@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
|
|||
|
||||
set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
|
||||
|
||||
/*
|
||||
* Start rfkill polling.
|
||||
*/
|
||||
if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
|
||||
rt2x00rfkill_register(rt2x00dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
|||
rt2x00link_register(rt2x00dev);
|
||||
rt2x00leds_register(rt2x00dev);
|
||||
rt2x00debug_register(rt2x00dev);
|
||||
rt2x00rfkill_register(rt2x00dev);
|
||||
|
||||
/*
|
||||
* Start rfkill polling.
|
||||
*/
|
||||
if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
|
||||
rt2x00rfkill_register(rt2x00dev);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1390,6 +1402,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
|
|||
{
|
||||
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
|
||||
|
||||
/*
|
||||
* Stop rfkill polling.
|
||||
*/
|
||||
if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
|
||||
rt2x00rfkill_unregister(rt2x00dev);
|
||||
|
||||
/*
|
||||
* Disable radio.
|
||||
*/
|
||||
|
|
|
@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
crypto.cipher = rt2x00crypto_key_to_cipher(key);
|
||||
if (crypto.cipher == CIPHER_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
crypto.cmd = cmd;
|
||||
|
||||
|
|
|
@ -93,6 +93,7 @@ enum rt2x00usb_mode_offset {
|
|||
USB_MODE_SLEEP = 7, /* RT73USB */
|
||||
USB_MODE_FIRMWARE = 8, /* RT73USB */
|
||||
USB_MODE_WAKEUP = 9, /* RT73USB */
|
||||
USB_MODE_AUTORUN = 17, /* RT2800USB */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -222,6 +222,7 @@ struct xenvif {
|
|||
|
||||
/* Queues */
|
||||
struct xenvif_queue *queues;
|
||||
unsigned int num_queues; /* active queues, resource allocated */
|
||||
|
||||
/* Miscellaneous private stuff. */
|
||||
struct net_device *dev;
|
||||
|
|
|
@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
|
|||
}
|
||||
}
|
||||
|
||||
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
u32 hash;
|
||||
u16 queue_index;
|
||||
|
||||
/* First, check if there is only one queue to optimise the
|
||||
* single-queue or old frontend scenario.
|
||||
*/
|
||||
if (num_queues == 1) {
|
||||
queue_index = 0;
|
||||
} else {
|
||||
/* Use skb_get_hash to obtain an L4 hash if available */
|
||||
hash = skb_get_hash(skb);
|
||||
queue_index = hash % num_queues;
|
||||
}
|
||||
|
||||
return queue_index;
|
||||
}
|
||||
|
||||
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
u16 index;
|
||||
int min_slots_needed;
|
||||
|
||||
|
@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
|||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned long rx_bytes = 0;
|
||||
unsigned long rx_packets = 0;
|
||||
unsigned long tx_bytes = 0;
|
||||
|
@ -256,7 +235,7 @@ out:
|
|||
static void xenvif_up(struct xenvif *vif)
|
||||
{
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
||||
|
@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
|
|||
static void xenvif_down(struct xenvif *vif)
|
||||
{
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
||||
|
@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
|
|||
struct ethtool_stats *stats, u64 * data)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
int i;
|
||||
unsigned int queue_index;
|
||||
struct xenvif_stats *vif_stats;
|
||||
|
@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
|
|||
.ndo_fix_features = xenvif_fix_features,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_select_queue = xenvif_select_queue,
|
||||
};
|
||||
|
||||
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
||||
|
@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
|||
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
|
||||
/* Allocate a netdev with the max. supported number of queues.
|
||||
* When the guest selects the desired number, it will be updated
|
||||
* via netif_set_real_num_tx_queues().
|
||||
* via netif_set_real_num_*_queues().
|
||||
*/
|
||||
dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
|
||||
xenvif_max_queues);
|
||||
|
@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
|||
vif->dev = dev;
|
||||
vif->disabled = false;
|
||||
|
||||
/* Start out with no queues. The call below does not require
|
||||
* rtnl_lock() as it happens before register_netdev().
|
||||
*/
|
||||
/* Start out with no queues. */
|
||||
vif->queues = NULL;
|
||||
netif_set_real_num_tx_queues(dev, 0);
|
||||
vif->num_queues = 0;
|
||||
|
||||
dev->netdev_ops = &xenvif_netdev_ops;
|
||||
dev->hw_features = NETIF_F_SG |
|
||||
|
@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
|
|||
void xenvif_disconnect(struct xenvif *vif)
|
||||
{
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
|
||||
if (netif_carrier_ok(vif->dev))
|
||||
|
@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
|
|||
void xenvif_free(struct xenvif *vif)
|
||||
{
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->dev->real_num_tx_queues;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
/* Here we want to avoid timeout messages if an skb can be legitimately
|
||||
* stuck somewhere else. Realistically this could be an another vif's
|
||||
|
@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
|
|||
xenvif_deinit_queue(queue);
|
||||
}
|
||||
|
||||
/* Free the array of queues. The call below does not require
|
||||
* rtnl_lock() because it happens after unregister_netdev().
|
||||
*/
|
||||
netif_set_real_num_tx_queues(vif->dev, 0);
|
||||
vfree(vif->queues);
|
||||
vif->queues = NULL;
|
||||
vif->num_queues = 0;
|
||||
|
||||
free_netdev(vif->dev);
|
||||
|
||||
|
|
|
@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
|
|||
/* Use the number of queues requested by the frontend */
|
||||
be->vif->queues = vzalloc(requested_num_queues *
|
||||
sizeof(struct xenvif_queue));
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
|
||||
rtnl_unlock();
|
||||
be->vif->num_queues = requested_num_queues;
|
||||
|
||||
for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
|
||||
queue = &be->vif->queues[queue_index];
|
||||
|
@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
|
|||
* earlier queues can be destroyed using the regular
|
||||
* disconnect logic.
|
||||
*/
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(be->vif->dev, queue_index);
|
||||
rtnl_unlock();
|
||||
be->vif->num_queues = queue_index;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
|
|||
* and also clean up any previously initialised queues.
|
||||
*/
|
||||
xenvif_deinit_queue(queue);
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(be->vif->dev, queue_index);
|
||||
rtnl_unlock();
|
||||
be->vif->num_queues = queue_index;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialisation completed, tell core driver the number of
|
||||
* active queues.
|
||||
*/
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
|
||||
netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
|
||||
rtnl_unlock();
|
||||
|
||||
xenvif_carrier_on(be->vif);
|
||||
|
||||
unregister_hotplug_status_watch(be);
|
||||
|
@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
|
|||
return;
|
||||
|
||||
err:
|
||||
if (be->vif->dev->real_num_tx_queues > 0)
|
||||
if (be->vif->num_queues > 0)
|
||||
xenvif_disconnect(be->vif); /* Clean up existing queues */
|
||||
vfree(be->vif->queues);
|
||||
be->vif->queues = NULL;
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(be->vif->dev, 0);
|
||||
rtnl_unlock();
|
||||
be->vif->num_queues = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,7 @@ err:
|
|||
static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
|
||||
{
|
||||
struct xenbus_device *dev = be->dev;
|
||||
unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
|
||||
unsigned int num_queues = queue->vif->num_queues;
|
||||
unsigned long tx_ring_ref, rx_ring_ref;
|
||||
unsigned int tx_evtchn, rx_evtchn;
|
||||
int err;
|
||||
|
|
|
@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
|
|||
|
||||
if (likely(netif_carrier_ok(dev) &&
|
||||
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
|
||||
napi_schedule(&queue->napi);
|
||||
napi_schedule(&queue->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1437,10 +1437,11 @@ static void xennet_end_access(int ref, void *page)
|
|||
static void xennet_disconnect_backend(struct netfront_info *info)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
struct netfront_queue *queue = NULL;
|
||||
unsigned int num_queues = info->netdev->real_num_tx_queues;
|
||||
|
||||
for (i = 0; i < num_queues; ++i) {
|
||||
struct netfront_queue *queue = &info->queues[i];
|
||||
|
||||
/* Stop old i/f to prevent errors whilst we rebuild the state. */
|
||||
spin_lock_bh(&queue->rx_lock);
|
||||
spin_lock_irq(&queue->tx_lock);
|
||||
|
@ -1698,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
|
|||
goto exit_free_tx;
|
||||
}
|
||||
|
||||
netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
|
||||
|
||||
return 0;
|
||||
|
||||
exit_free_tx:
|
||||
|
@ -1790,6 +1789,70 @@ error:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void xennet_destroy_queues(struct netfront_info *info)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
|
||||
struct netfront_queue *queue = &info->queues[i];
|
||||
|
||||
if (netif_running(info->netdev))
|
||||
napi_disable(&queue->napi);
|
||||
netif_napi_del(&queue->napi);
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
kfree(info->queues);
|
||||
info->queues = NULL;
|
||||
}
|
||||
|
||||
static int xennet_create_queues(struct netfront_info *info,
|
||||
unsigned int num_queues)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
|
||||
GFP_KERNEL);
|
||||
if (!info->queues)
|
||||
return -ENOMEM;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
struct netfront_queue *queue = &info->queues[i];
|
||||
|
||||
queue->id = i;
|
||||
queue->info = info;
|
||||
|
||||
ret = xennet_init_queue(queue);
|
||||
if (ret < 0) {
|
||||
dev_warn(&info->netdev->dev, "only created %d queues\n",
|
||||
num_queues);
|
||||
num_queues = i;
|
||||
break;
|
||||
}
|
||||
|
||||
netif_napi_add(queue->info->netdev, &queue->napi,
|
||||
xennet_poll, 64);
|
||||
if (netif_running(info->netdev))
|
||||
napi_enable(&queue->napi);
|
||||
}
|
||||
|
||||
netif_set_real_num_tx_queues(info->netdev, num_queues);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (num_queues == 0) {
|
||||
dev_err(&info->netdev->dev, "no queues\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Common code used when first setting up, and when resuming. */
|
||||
static int talk_to_netback(struct xenbus_device *dev,
|
||||
struct netfront_info *info)
|
||||
|
@ -1826,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Allocate array of queues */
|
||||
info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
|
||||
if (!info->queues) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(info->netdev, num_queues);
|
||||
rtnl_unlock();
|
||||
if (info->queues)
|
||||
xennet_destroy_queues(info);
|
||||
|
||||
err = xennet_create_queues(info, num_queues);
|
||||
if (err < 0)
|
||||
goto destroy_ring;
|
||||
|
||||
/* Create shared ring, alloc event channel -- for each queue */
|
||||
for (i = 0; i < num_queues; ++i) {
|
||||
queue = &info->queues[i];
|
||||
queue->id = i;
|
||||
queue->info = info;
|
||||
err = xennet_init_queue(queue);
|
||||
if (err) {
|
||||
/* xennet_init_queue() cleans up after itself on failure,
|
||||
* but we still have to clean up any previously initialised
|
||||
* queues. If i > 0, set num_queues to i, then goto
|
||||
* destroy_ring, which calls xennet_disconnect_backend()
|
||||
* to tidy up.
|
||||
*/
|
||||
if (i > 0) {
|
||||
rtnl_lock();
|
||||
netif_set_real_num_tx_queues(info->netdev, i);
|
||||
rtnl_unlock();
|
||||
goto destroy_ring;
|
||||
} else {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
err = setup_netfront(dev, queue, feature_split_evtchn);
|
||||
if (err) {
|
||||
/* As for xennet_init_queue(), setup_netfront() will tidy
|
||||
* up the current queue on error, but we need to clean up
|
||||
/* setup_netfront() will tidy up the current
|
||||
* queue on error, but we need to clean up
|
||||
* those already allocated.
|
||||
*/
|
||||
if (i > 0) {
|
||||
|
|
|
@ -323,11 +323,13 @@ int of_phy_register_fixed_link(struct device_node *np)
|
|||
fixed_link_node = of_get_child_by_name(np, "fixed-link");
|
||||
if (fixed_link_node) {
|
||||
status.link = 1;
|
||||
status.duplex = of_property_read_bool(np, "full-duplex");
|
||||
status.duplex = of_property_read_bool(fixed_link_node,
|
||||
"full-duplex");
|
||||
if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
|
||||
return -EINVAL;
|
||||
status.pause = of_property_read_bool(np, "pause");
|
||||
status.asym_pause = of_property_read_bool(np, "asym-pause");
|
||||
status.pause = of_property_read_bool(fixed_link_node, "pause");
|
||||
status.asym_pause = of_property_read_bool(fixed_link_node,
|
||||
"asym-pause");
|
||||
of_node_put(fixed_link_node);
|
||||
return fixed_phy_register(PHY_POLL, &status, np);
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ config DP83640_PHY
|
|||
|
||||
config PTP_1588_CLOCK_PCH
|
||||
tristate "Intel PCH EG20T as PTP clock"
|
||||
depends on X86 || COMPILE_TEST
|
||||
depends on X86_32 || COMPILE_TEST
|
||||
depends on HAS_IOMEM && NET
|
||||
select PTP_1588_CLOCK
|
||||
help
|
||||
|
|
|
@ -536,6 +536,15 @@ struct phy_driver {
|
|||
/* See set_wol, but for checking whether Wake on LAN is enabled. */
|
||||
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
|
||||
|
||||
/*
|
||||
* Called to inform a PHY device driver when the core is about to
|
||||
* change the link state. This callback is supposed to be used as
|
||||
* fixup hook for drivers that need to take action when the link
|
||||
* state changes. Drivers are by no means allowed to mess with the
|
||||
* PHY device structure in their implementations.
|
||||
*/
|
||||
void (*link_change_notify)(struct phy_device *dev);
|
||||
|
||||
struct device_driver driver;
|
||||
};
|
||||
#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
|
||||
|
|
|
@ -503,9 +503,9 @@ enum nft_chain_flags {
|
|||
* @net: net namespace that this chain belongs to
|
||||
* @table: table that this chain belongs to
|
||||
* @handle: chain handle
|
||||
* @flags: bitmask of enum nft_chain_flags
|
||||
* @use: number of jump references to this chain
|
||||
* @level: length of longest path to this chain
|
||||
* @flags: bitmask of enum nft_chain_flags
|
||||
* @name: name of the chain
|
||||
*/
|
||||
struct nft_chain {
|
||||
|
@ -514,9 +514,9 @@ struct nft_chain {
|
|||
struct net *net;
|
||||
struct nft_table *table;
|
||||
u64 handle;
|
||||
u8 flags;
|
||||
u16 use;
|
||||
u32 use;
|
||||
u16 level;
|
||||
u8 flags;
|
||||
char name[NFT_CHAIN_MAXNAMELEN];
|
||||
};
|
||||
|
||||
|
|
|
@ -1730,8 +1730,8 @@ sk_dst_get(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
dst = rcu_dereference(sk->sk_dst_cache);
|
||||
if (dst)
|
||||
dst_hold(dst);
|
||||
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
|
||||
dst = NULL;
|
||||
rcu_read_unlock();
|
||||
return dst;
|
||||
}
|
||||
|
|
|
@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
|
|||
|
||||
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0)
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
|
||||
skb->mac_header += VLAN_HLEN;
|
||||
return skb;
|
||||
|
|
|
@ -610,11 +610,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
|
|||
if (hci_update_random_address(req, false, &own_addr_type))
|
||||
return;
|
||||
|
||||
/* Save the address type used for this connnection attempt so we able
|
||||
* to retrieve this information if we need it.
|
||||
*/
|
||||
conn->src_type = own_addr_type;
|
||||
|
||||
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
|
||||
cp.scan_window = cpu_to_le16(hdev->le_scan_window);
|
||||
bacpy(&cp.peer_addr, &conn->dst);
|
||||
|
@ -894,7 +889,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
|
|||
/* If we're already encrypted set the REAUTH_PEND flag,
|
||||
* otherwise set the ENCRYPT_PEND.
|
||||
*/
|
||||
if (conn->key_type != 0xff)
|
||||
if (conn->link_mode & HCI_LM_ENCRYPT)
|
||||
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
|
||||
else
|
||||
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
|
||||
|
|
|
@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
|
||||
wake_up_bit(&hdev->flags, HCI_INQUIRY);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_conn_check_pending(hdev);
|
||||
}
|
||||
|
||||
|
@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
cp.authentication = conn->auth_type;
|
||||
|
||||
/* Request MITM protection if our IO caps allow it
|
||||
* except for the no-bonding case
|
||||
* except for the no-bonding case.
|
||||
* conn->auth_type is not updated here since
|
||||
* that might cause the user confirmation to be
|
||||
* rejected in case the remote doesn't have the
|
||||
* IO capabilities for MITM.
|
||||
*/
|
||||
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
|
||||
cp.authentication != HCI_AT_NO_BONDING)
|
||||
|
@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
|
|||
|
||||
/* If we're not the initiators request authorization to
|
||||
* proceed from user space (mgmt_user_confirm with
|
||||
* confirm_hint set to 1). */
|
||||
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
|
||||
* confirm_hint set to 1). The exception is if neither
|
||||
* side had MITM in which case we do auto-accept.
|
||||
*/
|
||||
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
|
||||
(loc_mitm || rem_mitm)) {
|
||||
BT_DBG("Confirming auto-accept as acceptor");
|
||||
confirm_hint = 1;
|
||||
goto confirm;
|
||||
|
|
|
@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
|
|||
kfree_skb(conn->rx_skb);
|
||||
|
||||
skb_queue_purge(&conn->pending_rx);
|
||||
flush_work(&conn->pending_rx_work);
|
||||
|
||||
/* We can not call flush_work(&conn->pending_rx_work) here since we
|
||||
* might block if we are running on a worker from the same workqueue
|
||||
* pending_rx_work is waiting on.
|
||||
*/
|
||||
if (work_pending(&conn->pending_rx_work))
|
||||
cancel_work_sync(&conn->pending_rx_work);
|
||||
|
||||
l2cap_unregister_all_users(conn);
|
||||
|
||||
|
|
|
@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
|
|||
|
||||
/*change security for LE channels */
|
||||
if (chan->scid == L2CAP_CID_ATT) {
|
||||
if (!conn->hcon->out) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (smp_conn_security(conn->hcon, sec.level))
|
||||
break;
|
||||
sk->sk_state = BT_CONFIG;
|
||||
|
|
|
@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
|
|||
}
|
||||
}
|
||||
|
||||
static void hci_stop_discovery(struct hci_request *req)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_remote_name_req_cancel cp;
|
||||
struct inquiry_entry *e;
|
||||
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_FINDING:
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
|
||||
hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
} else {
|
||||
cancel_delayed_work(&hdev->le_scan_disable);
|
||||
hci_req_add_le_scan_disable(req);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case DISCOVERY_RESOLVING:
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
|
||||
NAME_PENDING);
|
||||
if (!e)
|
||||
return;
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Passive scanning */
|
||||
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
||||
hci_req_add_le_scan_disable(req);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int clean_up_hci_state(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_request req;
|
||||
|
@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
|
|||
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
|
||||
disable_advertising(&req);
|
||||
|
||||
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
|
||||
hci_req_add_le_scan_disable(&req);
|
||||
}
|
||||
hci_stop_discovery(&req);
|
||||
|
||||
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
|
||||
struct hci_cp_disconnect dc;
|
||||
|
@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
|
|||
}
|
||||
|
||||
if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
|
||||
/* Continue with pairing via SMP */
|
||||
/* Continue with pairing via SMP. The hdev lock must be
|
||||
* released as SMP may try to recquire it for crypto
|
||||
* purposes.
|
||||
*/
|
||||
hci_dev_unlock(hdev);
|
||||
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (!err)
|
||||
err = cmd_complete(sk, hdev->id, mgmt_op,
|
||||
|
@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
{
|
||||
struct mgmt_cp_stop_discovery *mgmt_cp = data;
|
||||
struct pending_cmd *cmd;
|
||||
struct hci_cp_remote_name_req_cancel cp;
|
||||
struct inquiry_entry *e;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
|
@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_FINDING:
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
|
||||
hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
} else {
|
||||
cancel_delayed_work(&hdev->le_scan_disable);
|
||||
hci_stop_discovery(&req);
|
||||
|
||||
hci_req_add_le_scan_disable(&req);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case DISCOVERY_RESOLVING:
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
|
||||
NAME_PENDING);
|
||||
if (!e) {
|
||||
mgmt_pending_remove(cmd);
|
||||
err = cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_STOP_DISCOVERY, 0,
|
||||
&mgmt_cp->type,
|
||||
sizeof(mgmt_cp->type));
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_DBG("unknown discovery state %u", hdev->discovery.state);
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
|
||||
MGMT_STATUS_FAILED, &mgmt_cp->type,
|
||||
sizeof(mgmt_cp->type));
|
||||
err = hci_req_run(&req, stop_discovery_complete);
|
||||
if (!err) {
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, stop_discovery_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
else
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
/* If no HCI commands were sent we're done */
|
||||
if (err == -ENODATA) {
|
||||
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
|
||||
&mgmt_cp->type, sizeof(mgmt_cp->type));
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
|
|
@ -544,7 +544,7 @@ static u8 smp_random(struct smp_chan *smp)
|
|||
hci_le_start_enc(hcon, ediv, rand, stk);
|
||||
hcon->enc_key_size = smp->enc_key_size;
|
||||
} else {
|
||||
u8 stk[16];
|
||||
u8 stk[16], auth;
|
||||
__le64 rand = 0;
|
||||
__le16 ediv = 0;
|
||||
|
||||
|
@ -556,8 +556,13 @@ static u8 smp_random(struct smp_chan *smp)
|
|||
memset(stk + smp->enc_key_size, 0,
|
||||
SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
|
||||
|
||||
if (hcon->pending_sec_level == BT_SECURITY_HIGH)
|
||||
auth = 1;
|
||||
else
|
||||
auth = 0;
|
||||
|
||||
hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
|
||||
HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size,
|
||||
HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
|
||||
ediv, rand);
|
||||
}
|
||||
|
||||
|
|
|
@ -269,6 +269,15 @@ again:
|
|||
}
|
||||
EXPORT_SYMBOL(dst_destroy);
|
||||
|
||||
static void dst_destroy_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
|
||||
|
||||
dst = dst_destroy(dst);
|
||||
if (dst)
|
||||
__dst_free(dst);
|
||||
}
|
||||
|
||||
void dst_release(struct dst_entry *dst)
|
||||
{
|
||||
if (dst) {
|
||||
|
@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
|
|||
|
||||
newrefcnt = atomic_dec_return(&dst->__refcnt);
|
||||
WARN_ON(newrefcnt < 0);
|
||||
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
|
||||
dst = dst_destroy(dst);
|
||||
if (dst)
|
||||
__dst_free(dst);
|
||||
}
|
||||
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
|
||||
call_rcu(&dst->rcu_head, dst_destroy_rcu);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dst_release);
|
||||
|
|
|
@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
|
|||
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
|
||||
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
|
||||
|
||||
if (len <= 0 || len >= BPF_MAXINSNS)
|
||||
if (len <= 0 || len > BPF_MAXINSNS)
|
||||
return -EINVAL;
|
||||
|
||||
if (new_prog) {
|
||||
addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
|
||||
addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
|
||||
if (!addrs)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
|
|||
|
||||
BUILD_BUG_ON(BPF_MEMWORDS > 16);
|
||||
|
||||
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
|
||||
masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
|
||||
if (!masks)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
|
|||
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (fp_new) {
|
||||
*fp_new = *fp;
|
||||
/* As we're kepping orig_prog in fp_new along,
|
||||
/* As we're keeping orig_prog in fp_new along,
|
||||
* we need to make sure we're not evicting it
|
||||
* from the old fp.
|
||||
*/
|
||||
|
@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
|
|||
|
||||
/**
|
||||
* sk_unattached_filter_create - create an unattached filter
|
||||
* @fprog: the filter program
|
||||
* @pfp: the unattached filter that is created
|
||||
* @fprog: the filter program
|
||||
*
|
||||
* Create a filter independent of any socket. We first run some
|
||||
* sanity checks on it to make sure it does not explode on us later.
|
||||
|
|
|
@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|||
skb_put(nskb, len),
|
||||
len, 0);
|
||||
SKB_GSO_CB(nskb)->csum_start =
|
||||
skb_headroom(nskb) + offset;
|
||||
skb_headroom(nskb) + doffset;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
|
|||
{
|
||||
struct dst_entry *old_dst;
|
||||
|
||||
if (dst) {
|
||||
if (dst->flags & DST_NOCACHE)
|
||||
dst = NULL;
|
||||
else
|
||||
dst_clone(dst);
|
||||
}
|
||||
dst_clone(dst);
|
||||
old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
|
||||
dst_release(old_dst);
|
||||
}
|
||||
|
@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
|
|||
|
||||
rcu_read_lock();
|
||||
dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
|
||||
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
|
||||
dst = NULL;
|
||||
if (dst) {
|
||||
if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
||||
rcu_read_unlock();
|
||||
tunnel_dst_reset(t);
|
||||
return NULL;
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
dst_hold(dst);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return (struct rtable *)dst;
|
||||
|
|
|
@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
|
|||
struct dst_entry *dst,
|
||||
struct request_sock *req)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_sock *tp;
|
||||
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
||||
struct sock *child;
|
||||
|
||||
|
|
|
@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
|||
unsigned int new_len = (pkt_len / mss) * mss;
|
||||
if (!in_sack && new_len < pkt_len) {
|
||||
new_len += mss;
|
||||
if (new_len > skb->len)
|
||||
if (new_len >= skb->len)
|
||||
return 0;
|
||||
}
|
||||
pkt_len = new_len;
|
||||
|
|
|
@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
|
|||
cancel_delayed_work_sync(&ipvs->defense_work);
|
||||
cancel_work_sync(&ipvs->defense_work.work);
|
||||
unregister_net_sysctl_table(ipvs->sysctl_hdr);
|
||||
ip_vs_stop_estimator(net, &ipvs->tot_stats);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
|
|||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
ip_vs_trash_cleanup(net);
|
||||
ip_vs_stop_estimator(net, &ipvs->tot_stats);
|
||||
ip_vs_control_net_cleanup_sysctl(net);
|
||||
remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
|
||||
remove_proc_entry("ip_vs_stats", net->proc_net);
|
||||
|
|
|
@ -596,6 +596,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
|
|||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
|
||||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
|
||||
#endif
|
||||
+ ctnetlink_proto_size(ct)
|
||||
+ ctnetlink_label_size(ct)
|
||||
|
@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
|
|||
static int
|
||||
ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
|
||||
{
|
||||
struct nf_conn *ct, *last = NULL;
|
||||
struct nf_conn *ct, *last;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
|
||||
|
@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
|
|||
if (cb->args[2])
|
||||
return 0;
|
||||
|
||||
if (cb->args[0] == nr_cpu_ids)
|
||||
return 0;
|
||||
last = (struct nf_conn *)cb->args[1];
|
||||
|
||||
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
|
||||
struct ct_pcpu *pcpu;
|
||||
|
@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
|
|||
|
||||
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
||||
spin_lock_bh(&pcpu->lock);
|
||||
last = (struct nf_conn *)cb->args[1];
|
||||
list = dying ? &pcpu->dying : &pcpu->unconfirmed;
|
||||
restart:
|
||||
hlist_nulls_for_each_entry(h, n, list, hnnode) {
|
||||
|
@ -1193,7 +1194,9 @@ restart:
|
|||
ct);
|
||||
rcu_read_unlock();
|
||||
if (res < 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
if (!atomic_inc_not_zero(&ct->ct_general.use))
|
||||
continue;
|
||||
cb->args[0] = cpu;
|
||||
cb->args[1] = (unsigned long)ct;
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
goto out;
|
||||
|
@ -1202,10 +1205,10 @@ restart:
|
|||
if (cb->args[1]) {
|
||||
cb->args[1] = 0;
|
||||
goto restart;
|
||||
} else
|
||||
cb->args[2] = 1;
|
||||
}
|
||||
spin_unlock_bh(&pcpu->lock);
|
||||
}
|
||||
cb->args[2] = 1;
|
||||
out:
|
||||
if (last)
|
||||
nf_ct_put(last);
|
||||
|
@ -2039,6 +2042,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
|
|||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
|
||||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
|
||||
#endif
|
||||
+ ctnetlink_proto_size(ct)
|
||||
;
|
||||
|
|
|
@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
|
|||
return i->status & IPS_NAT_MASK ? 1 : 0;
|
||||
}
|
||||
|
||||
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
|
||||
{
|
||||
struct nf_conn_nat *nat = nfct_nat(ct);
|
||||
|
||||
if (nf_nat_proto_remove(ct, data))
|
||||
return 1;
|
||||
|
||||
if (!nat || !nat->ct)
|
||||
return 0;
|
||||
|
||||
/* This netns is being destroyed, and conntrack has nat null binding.
|
||||
* Remove it from bysource hash, as the table will be freed soon.
|
||||
*
|
||||
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
|
||||
* will delete entry from already-freed table.
|
||||
*/
|
||||
if (!del_timer(&ct->timeout))
|
||||
return 1;
|
||||
|
||||
spin_lock_bh(&nf_nat_lock);
|
||||
hlist_del_rcu(&nat->bysource);
|
||||
ct->status &= ~IPS_NAT_DONE_MASK;
|
||||
nat->ct = NULL;
|
||||
spin_unlock_bh(&nf_nat_lock);
|
||||
|
||||
add_timer(&ct->timeout);
|
||||
|
||||
/* don't delete conntrack. Although that would make things a lot
|
||||
* simpler, we'd end up flushing all conntracks on nat rmmod.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
|
||||
{
|
||||
struct nf_nat_proto_clean clean = {
|
||||
|
@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
|
|||
{
|
||||
struct nf_nat_proto_clean clean = {};
|
||||
|
||||
nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
|
||||
nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
|
||||
synchronize_rcu();
|
||||
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
|
||||
}
|
||||
|
|
|
@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
|
|||
if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
|
||||
return -EINVAL;
|
||||
handle = nf_tables_alloc_handle(table);
|
||||
|
||||
if (chain->use == UINT_MAX)
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
if (nla[NFTA_RULE_POSITION]) {
|
||||
|
@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
|
|||
|
||||
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
|
||||
if (nft_rule_is_active_next(net, old_rule)) {
|
||||
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
|
||||
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
||||
old_rule);
|
||||
if (trans == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
nft_rule_disactivate_next(net, old_rule);
|
||||
list_add_tail(&rule->list, &old_rule->list);
|
||||
chain->use--;
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
} else {
|
||||
err = -ENOENT;
|
||||
goto err2;
|
||||
|
@ -1826,6 +1830,7 @@ err3:
|
|||
list_del_rcu(&nft_trans_rule(trans)->list);
|
||||
nft_rule_clear(net, nft_trans_rule(trans));
|
||||
nft_trans_destroy(trans);
|
||||
chain->use++;
|
||||
}
|
||||
err2:
|
||||
nf_tables_rule_destroy(&ctx, rule);
|
||||
|
@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
goto nla_put_failure;
|
||||
|
||||
nfmsg = nlmsg_data(nlh);
|
||||
nfmsg->nfgen_family = NFPROTO_UNSPEC;
|
||||
nfmsg->nfgen_family = ctx.afi->family;
|
||||
nfmsg->version = NFNETLINK_V0;
|
||||
nfmsg->res_id = 0;
|
||||
|
||||
|
|
|
@ -195,6 +195,15 @@ static void
|
|||
nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
||||
{
|
||||
struct xt_target *target = expr->ops->data;
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_tgdtor_param par;
|
||||
|
||||
par.net = ctx->net;
|
||||
par.target = target;
|
||||
par.targinfo = info;
|
||||
par.family = ctx->afi->family;
|
||||
if (par.target->destroy != NULL)
|
||||
par.target->destroy(&par);
|
||||
|
||||
module_put(target->me);
|
||||
}
|
||||
|
@ -382,6 +391,15 @@ static void
|
|||
nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
||||
{
|
||||
struct xt_match *match = expr->ops->data;
|
||||
void *info = nft_expr_priv(expr);
|
||||
struct xt_mtdtor_param par;
|
||||
|
||||
par.net = ctx->net;
|
||||
par.match = match;
|
||||
par.matchinfo = info;
|
||||
par.family = ctx->afi->family;
|
||||
if (par.match->destroy != NULL)
|
||||
par.match->destroy(&par);
|
||||
|
||||
module_put(match->me);
|
||||
}
|
||||
|
|
|
@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
if (nla_put_be32(skb,
|
||||
NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb,
|
||||
NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb,
|
||||
NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
|
||||
goto nla_put_failure;
|
||||
if (priv->sreg_proto_min) {
|
||||
if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
|
||||
htonl(priv->sreg_proto_min)))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
|
||||
htonl(priv->sreg_proto_max)))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
|
|
|
@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
|
|||
loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
char tmp[8];
|
||||
struct ctl_table tbl;
|
||||
int ret;
|
||||
int changed = 0;
|
||||
bool changed = false;
|
||||
char *none = "none";
|
||||
char tmp[8];
|
||||
int ret;
|
||||
|
||||
memset(&tbl, 0, sizeof(struct ctl_table));
|
||||
|
||||
if (write) {
|
||||
tbl.data = tmp;
|
||||
tbl.maxlen = 8;
|
||||
tbl.maxlen = sizeof(tmp);
|
||||
} else {
|
||||
tbl.data = net->sctp.sctp_hmac_alg ? : none;
|
||||
tbl.maxlen = strlen(tbl.data);
|
||||
}
|
||||
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
|
||||
|
||||
if (write) {
|
||||
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
|
||||
if (write && ret == 0) {
|
||||
#ifdef CONFIG_CRYPTO_MD5
|
||||
if (!strncmp(tmp, "md5", 3)) {
|
||||
net->sctp.sctp_hmac_alg = "md5";
|
||||
changed = 1;
|
||||
changed = true;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_CRYPTO_SHA1
|
||||
if (!strncmp(tmp, "sha1", 4)) {
|
||||
net->sctp.sctp_hmac_alg = "sha1";
|
||||
changed = 1;
|
||||
changed = true;
|
||||
}
|
||||
#endif
|
||||
if (!strncmp(tmp, "none", 4)) {
|
||||
net->sctp.sctp_hmac_alg = NULL;
|
||||
changed = 1;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (!changed)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
|
|||
loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
int new_value;
|
||||
struct ctl_table tbl;
|
||||
unsigned int min = *(unsigned int *) ctl->extra1;
|
||||
unsigned int max = *(unsigned int *) ctl->extra2;
|
||||
int ret;
|
||||
struct ctl_table tbl;
|
||||
int ret, new_value;
|
||||
|
||||
memset(&tbl, 0, sizeof(struct ctl_table));
|
||||
tbl.maxlen = sizeof(unsigned int);
|
||||
|
@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
|
|||
tbl.data = &new_value;
|
||||
else
|
||||
tbl.data = &net->sctp.rto_min;
|
||||
|
||||
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
|
||||
if (write) {
|
||||
if (ret || new_value > max || new_value < min)
|
||||
if (write && ret == 0) {
|
||||
if (new_value > max || new_value < min)
|
||||
return -EINVAL;
|
||||
|
||||
net->sctp.rto_min = new_value;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
|||
loff_t *ppos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
int new_value;
|
||||
struct ctl_table tbl;
|
||||
unsigned int min = *(unsigned int *) ctl->extra1;
|
||||
unsigned int max = *(unsigned int *) ctl->extra2;
|
||||
int ret;
|
||||
struct ctl_table tbl;
|
||||
int ret, new_value;
|
||||
|
||||
memset(&tbl, 0, sizeof(struct ctl_table));
|
||||
tbl.maxlen = sizeof(unsigned int);
|
||||
|
@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
|||
tbl.data = &new_value;
|
||||
else
|
||||
tbl.data = &net->sctp.rto_max;
|
||||
|
||||
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
|
||||
if (write) {
|
||||
if (ret || new_value > max || new_value < min)
|
||||
if (write && ret == 0) {
|
||||
if (new_value > max || new_value < min)
|
||||
return -EINVAL;
|
||||
|
||||
net->sctp.rto_max = new_value;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
|
|||
tbl.data = &net->sctp.auth_enable;
|
||||
|
||||
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
|
||||
|
||||
if (write) {
|
||||
if (write && ret == 0) {
|
||||
struct sock *sk = net->sctp.ctl_sock;
|
||||
|
||||
net->sctp.auth_enable = new_value;
|
||||
|
|
Loading…
Reference in a new issue