Merge git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

Conflicts:
	drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
	drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
	drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
	drivers/net/wireless/rtlwifi/rtl8192de/phy.h
	drivers/net/wireless/rtlwifi/rtl8723ae/phy.h

Just some minor conflicts between the wireless-next changes
and Joe Perches's "extern" removal from function prototypes
in header files.

John W. Linville says:

====================
Regarding the Bluetooth bits, Gustavo says:

"The big work here is from Marcel and Johan. They did a lot of work
in the L2CAP, HCI and MGMT layers. The most important ones are the
addition of a new MGMT command to enable/disable LE advertisement
and the introduction of the HCI user channel to allow applications
to get directly and exclusive access to Bluetooth devices."

As to the ath10k bits, Kalle says:

"Bartosz dropped support for qca98xx hw1.0 hardware from ath10k, it's
just too much to support it. Michal added support for the new firmware
interface. Marek fixed WEP in AP and IBSS mode. Rest of the changes are
minor fixes or cleanups."

And also:

"Major changes are:

* throughput improvements including aligning the RX frames correctly and
  optimising HTT layer (Michal)

* remove qca98xx hw1.0 support (Bartosz)

* add support for firmware version 999.999.0.636 (Michal)

* firmware htt statistics support (Kalle)

* fix WEP in AP and IBSS mode (Marek)

* fix a mutex unlock balance in debugfs file (Shafi)

And of course there's a lot of smaller fixes and cleanup."

For the wl12xx bits, Luca says:

"Here are some patches intended for 3.13.  Eliad is upstreaming a bunch
of patches that have been pending in the internal tree.  Mostly bugfixes
and other small improvements."

Along with that...

Arend and friends bring us a batch of brcmfmac updates, Larry Finger
offers some rtlwifi refactoring, and Sujith sends the usual batch of
ath9k updates.  As usual, there are a number of other small updates
from a variety of players as well.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-10-07 15:40:44 -04:00
commit 7009deab19
162 changed files with 4855 additions and 3228 deletions

View file

@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__

View file

@ -23,6 +23,8 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
@ -41,6 +43,8 @@ struct btmrvl_thread {
struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
struct device *dev;
const char *cal_data;
u8 dev_type;
@ -91,6 +95,7 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_CONFIG 0x59
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
#define BT_CMD_LOAD_CONFIG_DATA 0x61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
@ -116,11 +121,8 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
struct btmrvl_cmd {
__le16 ocf_ogf;
u8 length;
u8 data[4];
} __packed;
#define BT_CMD_DATA_SIZE 32
#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
u8 ec; /* event counter */

View file

@ -57,8 +57,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
ocf = hci_opcode_ocf(opcode);
ogf = hci_opcode_ogf(opcode);
if (ocf == BT_CMD_MODULE_CFG_REQ &&
priv->btmrvl_dev.sendcmdflag) {
if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
@ -116,7 +115,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
adapter->ps_state = PS_SLEEP;
wake_up_interruptible(&adapter->cmd_wait_q);
BT_DBG("HS ACTIVATED!");
} else {
BT_DBG("HS Enable failed");
@ -168,22 +166,24 @@ exit:
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
const void *param, u8 len)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
int ret = 0;
struct hci_command_hdr *hdr;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
cmd->length = 1;
cmd->data[0] = subcmd;
hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
hdr->plen = len;
if (len)
memcpy(skb_put(skb, len), param, len);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
@ -194,19 +194,23 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
priv->adapter->cmd_complete = false;
BT_DBG("Queue module cfg Command");
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->cmd_complete,
msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
ret = -ETIMEDOUT;
BT_ERR("module_cfg_cmd(%x): timeout: %d",
subcmd, priv->btmrvl_dev.sendcmdflag);
}
msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
return -ETIMEDOUT;
BT_DBG("module cfg Command done");
return 0;
}
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
{
int ret;
ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
if (ret)
BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
return ret;
}
@ -214,61 +218,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
int ret;
u8 param[2];
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (!skb) {
BT_ERR("No free skb");
return -ENOMEM;
}
param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
BT_CMD_HOST_SLEEP_CONFIG));
cmd->length = 2;
cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
param[0], param[1]);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
if (ret)
BT_ERR("HSCFG command failed\n");
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
cmd->data[1]);
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
BT_CMD_AUTO_SLEEP_MODE));
cmd->length = 1;
int ret;
u8 param;
if (priv->btmrvl_dev.psmode)
cmd->data[0] = BT_PS_ENABLE;
param = BT_PS_ENABLE;
else
cmd->data[0] = BT_PS_DISABLE;
param = BT_PS_DISABLE;
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
if (ret)
BT_ERR("PSMODE command failed\n");
return 0;
}
@ -276,37 +255,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
int ret = 0;
int ret;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
cmd->length = 0;
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue hs enable Command");
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->hs_state,
msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
ret = -ETIMEDOUT;
BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
priv->adapter->ps_state,
priv->adapter->wakeup_tries);
}
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
if (ret)
BT_ERR("Host sleep enable command failed\n");
return ret;
}
@ -479,6 +432,137 @@ static int btmrvl_open(struct hci_dev *hdev)
return 0;
}
/*
* This function parses provided calibration data input. It should contain
* hex bytes separated by space or new line character. Here is an example.
* 00 1C 01 37 FF FF FF FF 02 04 7F 01
* CE BA 00 00 00 2D C6 C0 00 00 00 00
* 00 F0 00 00
*/
static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
{
const u8 *s = src;
u8 *d = dst;
int ret;
u8 tmp[3];
tmp[2] = '\0';
while ((s - src) <= len - 2) {
if (isspace(*s)) {
s++;
continue;
}
if (isxdigit(*s)) {
if ((d - dst) >= dst_size) {
BT_ERR("calibration data file too big!!!");
return -EINVAL;
}
memcpy(tmp, s, 2);
ret = kstrtou8(tmp, 16, d++);
if (ret < 0)
return ret;
s += 2;
} else {
return -EINVAL;
}
}
if (d == dst)
return -EINVAL;
return 0;
}
static int btmrvl_load_cal_data(struct btmrvl_private *priv,
u8 *config_data)
{
int i, ret;
u8 data[BT_CMD_DATA_SIZE];
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
data[3] = BT_CMD_DATA_SIZE - 4;
/* Swap cal-data bytes. Each four bytes are swapped. Considering 4
* byte SDIO header offset, mapping of input and output bytes will be
* {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
* {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
for (i = 4; i < BT_CMD_DATA_SIZE; i++)
data[i] = config_data[(i / 4) * 8 - 1 - i];
print_hex_dump_bytes("Calibration data: ",
DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
BT_CMD_DATA_SIZE);
if (ret)
BT_ERR("Failed to download caibration data\n");
return 0;
}
static int
btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
{
u8 cal_data[BT_CAL_DATA_SIZE];
int ret;
ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
if (ret)
return ret;
ret = btmrvl_load_cal_data(priv, cal_data);
if (ret) {
BT_ERR("Fail to load calibrate data");
return ret;
}
return 0;
}
static int btmrvl_cal_data_config(struct btmrvl_private *priv)
{
const struct firmware *cfg;
int ret;
const char *cal_data = priv->btmrvl_dev.cal_data;
if (!cal_data)
return 0;
ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
if (ret < 0) {
BT_DBG("Failed to get %s file, skipping cal data download",
cal_data);
return 0;
}
ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
release_firmware(cfg);
return ret;
}
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
if (btmrvl_cal_data_config(priv))
BT_ERR("Set cal data failed");
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
}
/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
@ -572,8 +656,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->flush = btmrvl_flush;
hdev->send = btmrvl_send_frame;
hdev->ioctl = btmrvl_ioctl;
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
hdev->setup = btmrvl_setup;
hdev->dev_type = priv->btmrvl_dev.dev_type;

View file

@ -18,7 +18,6 @@
* this warranty disclaimer.
**/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mmc/sdio_ids.h>
@ -102,6 +101,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
@ -109,6 +109,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@ -116,6 +117,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
.cal_data = "mrvl/sd8797_caldata.conf",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@ -123,6 +125,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
@ -1006,6 +1009,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
struct btmrvl_sdio_device *data = (void *) id->driver_data;
card->helper = data->helper;
card->firmware = data->firmware;
card->cal_data = data->cal_data;
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
}
@ -1034,6 +1038,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
}
card->priv = priv;
priv->btmrvl_dev.dev = &card->func->dev;
priv->btmrvl_dev.cal_data = card->cal_data;
/* Initialize the interface specific function pointers */
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@ -1046,12 +1052,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
goto disable_host_int;
}
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
disable_host_int:
@ -1222,4 +1222,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");

View file

@ -85,6 +85,7 @@ struct btmrvl_sdio_card {
u32 ioport;
const char *helper;
const char *firmware;
const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
u8 rx_unit;
@ -94,6 +95,7 @@ struct btmrvl_sdio_card {
struct btmrvl_sdio_device {
const char *helper;
const char *firmware;
const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
};

View file

@ -24,6 +24,7 @@
*/
#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
@ -39,17 +40,17 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define VERSION "1.3"
#define VERSION "1.4"
static bool amp;
struct vhci_data {
struct hci_dev *hdev;
unsigned long flags;
wait_queue_head_t read_wait;
struct sk_buff_head readq;
struct delayed_work open_timeout;
};
static int vhci_open_dev(struct hci_dev *hdev)
@ -99,16 +100,62 @@ static int vhci_send_frame(struct sk_buff *skb)
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
return 0;
}
static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
{
struct hci_dev *hdev;
struct sk_buff *skb;
skb = bt_skb_alloc(4, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdev = hci_alloc_dev();
if (!hdev) {
kfree_skb(skb);
return -ENOMEM;
}
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
hdev->dev_type = dev_type;
hci_set_drvdata(hdev, data);
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
data->hdev = NULL;
kfree_skb(skb);
return -EBUSY;
}
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
*skb_put(skb, 1) = 0xff;
*skb_put(skb, 1) = dev_type;
put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
return 0;
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
const char __user *buf, size_t count)
const char __user *buf, size_t count)
{
struct sk_buff *skb;
__u8 pkt_type, dev_type;
int ret;
if (count > HCI_MAX_FRAME_SIZE)
if (count < 2 || count > HCI_MAX_FRAME_SIZE)
return -EINVAL;
skb = bt_skb_alloc(count, GFP_KERNEL);
@ -120,27 +167,70 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EFAULT;
}
skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
pkt_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
hci_recv_frame(skb);
switch (pkt_type) {
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
if (!data->hdev) {
kfree_skb(skb);
return -ENODEV;
}
return count;
skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = pkt_type;
ret = hci_recv_frame(skb);
break;
case HCI_VENDOR_PKT:
if (data->hdev) {
kfree_skb(skb);
return -EBADFD;
}
cancel_delayed_work_sync(&data->open_timeout);
dev_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
if (skb->len > 0) {
kfree_skb(skb);
return -EINVAL;
}
kfree_skb(skb);
if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
return -EINVAL;
ret = vhci_create_device(data, dev_type);
break;
default:
kfree_skb(skb);
return -EINVAL;
}
return (ret < 0) ? ret : count;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
struct sk_buff *skb, char __user *buf, int count)
struct sk_buff *skb,
char __user *buf, int count)
{
char __user *ptr = buf;
int len, total = 0;
int len;
len = min_t(unsigned int, skb->len, count);
if (copy_to_user(ptr, skb->data, len))
return -EFAULT;
total += len;
if (!data->hdev)
return len;
data->hdev->stat.byte_tx += len;
@ -148,21 +238,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
case HCI_COMMAND_PKT:
data->hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
data->hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
data->hdev->stat.sco_tx++;
break;
}
return total;
return len;
}
static ssize_t vhci_read(struct file *file,
char __user *buf, size_t count, loff_t *pos)
char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
struct sk_buff *skb;
@ -185,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
}
ret = wait_event_interruptible(data->read_wait,
!skb_queue_empty(&data->readq));
!skb_queue_empty(&data->readq));
if (ret < 0)
break;
}
@ -194,7 +282,7 @@ static ssize_t vhci_read(struct file *file,
}
static ssize_t vhci_write(struct file *file,
const char __user *buf, size_t count, loff_t *pos)
const char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
@ -213,10 +301,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
return POLLOUT | POLLWRNORM;
}
static void vhci_open_timeout(struct work_struct *work)
{
struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work);
vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
}
static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
struct hci_dev *hdev;
data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
if (!data)
@ -225,35 +320,13 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
hdev = hci_alloc_dev();
if (!hdev) {
kfree(data);
return -ENOMEM;
}
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
hci_set_drvdata(hdev, data);
if (amp)
hdev->dev_type = HCI_AMP;
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
kfree(data);
hci_free_dev(hdev);
return -EBUSY;
}
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
file->private_data = data;
nonseekable_open(inode, file);
schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
return 0;
}
@ -262,8 +335,12 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
hci_unregister_dev(hdev);
hci_free_dev(hdev);
cancel_delayed_work_sync(&data->open_timeout);
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
file->private_data = NULL;
kfree(data);
@ -309,3 +386,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("devname:vhci");

View file

@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:

View file

@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)

View file

@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108

View file

@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, *param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, param: %d)\n",
__func__, ar, address, *param);
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;

View file

@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *indicator_addr;
if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
return;
}
/* workaround for QCA988x_1.0 HW CE */
indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
} else {
unsigned long irq_flags;
local_irq_save(irq_flags);
iowrite32(1, indicator_addr);
/*
* PCIE write waits for ACK in IPQ8K, there is no
* need to read back value.
*/
(void)ioread32(indicator_addr);
(void)ioread32(indicator_addr); /* conservative */
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
iowrite32(0, indicator_addr);
local_irq_restore(irq_flags);
}
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
}
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
static int ath10k_ce_send_nolock(struct ce_state *ce_state,
static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ce_ring_state *src_ring = ce_state->src_ring;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
@ -306,7 +277,9 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
ath10k_pci_wake(ar);
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
@ -346,7 +319,7 @@ exit:
return ret;
}
int ath10k_ce_send(struct ce_state *ce_state,
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
@ -365,33 +338,19 @@ int ath10k_ce_send(struct ce_state *ce_state,
return ret;
}
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
unsigned int nbytes, u32 flags)
{
unsigned int num_items = sendlist->num_items;
struct ce_sendlist_item *item;
item = &sendlist->item[num_items];
item->data = buffer;
item->u.nbytes = nbytes;
item->flags = flags;
sendlist->num_items++;
}
int ath10k_ce_sendlist_send(struct ce_state *ce_state,
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
struct ce_sendlist *sendlist,
unsigned int transfer_id)
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags)
{
struct ce_ring_state *src_ring = ce_state->src_ring;
struct ce_sendlist_item *item;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int num_items = sendlist->num_items;
unsigned int sw_index;
unsigned int write_index;
int i, delta, ret = -ENOMEM;
int delta, ret = -ENOMEM;
spin_lock_bh(&ar_pci->ce_lock);
@ -400,30 +359,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
if (delta >= num_items) {
/*
* Handle all but the last item uniformly.
*/
for (i = 0; i < num_items - 1; i++) {
item = &sendlist->item[i];
ret = ath10k_ce_send_nolock(ce_state,
CE_SENDLIST_ITEM_CTXT,
(u32) item->data,
item->u.nbytes, transfer_id,
item->flags |
CE_SEND_FLAG_GATHER);
if (ret)
ath10k_warn("CE send failed for item: %d\n", i);
}
/*
* Provide valid context pointer for final item.
*/
item = &sendlist->item[i];
if (delta >= 1) {
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
(u32) item->data, item->u.nbytes,
transfer_id, item->flags);
paddr, nbytes,
transfer_id, flags);
if (ret)
ath10k_warn("CE send failed for last item: %d\n", i);
ath10k_warn("CE send failed: %d\n", ret);
}
spin_unlock_bh(&ar_pci->ce_lock);
@ -431,11 +372,11 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
return ret;
}
int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
struct ce_ring_state *dest_ring = ce_state->dest_ring;
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@ -448,7 +389,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
write_index = dest_ring->write_index;
sw_index = dest_ring->sw_index;
ath10k_pci_wake(ar);
ret = ath10k_pci_wake(ar);
if (ret)
goto out;
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
struct ce_desc *base = dest_ring->base_addr_owner_space;
@ -470,6 +413,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
ret = -EIO;
}
ath10k_pci_sleep(ar);
out:
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@ -479,14 +424,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
struct ce_ring_state *dest_ring = ce_state->dest_ring;
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
@ -535,7 +480,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
return 0;
}
int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@ -556,11 +501,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
return ret;
}
int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
struct ce_ring_state *dest_ring;
struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@ -612,19 +557,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
struct ce_ring_state *src_ring = ce_state->src_ring;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
struct ce_desc *sdesc, *sbase;
unsigned int read_index;
int ret = -EIO;
int ret;
if (src_ring->hw_index == sw_index) {
/*
@ -634,48 +580,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
* the SW has really caught up to the HW, or if the cached
* value of the HW index has become stale.
*/
ath10k_pci_wake(ar);
ret = ath10k_pci_wake(ar);
if (ret)
return ret;
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
if ((read_index != sw_index) && (read_index != 0xffffffff)) {
struct ce_desc *sbase = src_ring->shadow_base;
struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
if ((read_index == sw_index) || (read_index == 0xffffffff))
return -EIO;
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(sdesc->addr);
*nbytesp = __le16_to_cpu(sdesc->nbytes);
*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
CE_DESC_FLAGS_META_DATA);
sbase = src_ring->shadow_base;
sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
if (per_transfer_contextp)
*per_transfer_contextp =
src_ring->per_transfer_context[sw_index];
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(sdesc->addr);
*nbytesp = __le16_to_cpu(sdesc->nbytes);
*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
CE_DESC_FLAGS_META_DATA);
/* sanity */
src_ring->per_transfer_context[sw_index] = NULL;
if (per_transfer_contextp)
*per_transfer_contextp =
src_ring->per_transfer_context[sw_index];
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
src_ring->sw_index = sw_index;
ret = 0;
}
/* sanity */
src_ring->per_transfer_context[sw_index] = NULL;
return ret;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
src_ring->sw_index = sw_index;
return 0;
}
/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
struct ce_ring_state *src_ring;
struct ath10k_ce_ring *src_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
@ -727,7 +679,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
return ret;
}
int ath10k_ce_completed_send_next(struct ce_state *ce_state,
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@ -756,53 +708,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
void *transfer_context;
u32 buf;
unsigned int nbytes;
unsigned int id;
unsigned int flags;
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_pci_wake(ar);
spin_lock_bh(&ar_pci->ce_lock);
/* Clear the copy-complete interrupts that will be handled here. */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
if (ce_state->recv_cb) {
/*
* Pop completed recv buffers and call the registered
* recv callback for each
*/
while (ath10k_ce_completed_recv_next_nolock(ce_state,
&transfer_context,
&buf, &nbytes,
&id, &flags) == 0) {
spin_unlock_bh(&ar_pci->ce_lock);
ce_state->recv_cb(ce_state, transfer_context, buf,
nbytes, id, flags);
spin_lock_bh(&ar_pci->ce_lock);
}
}
spin_unlock_bh(&ar_pci->ce_lock);
if (ce_state->send_cb) {
/*
* Pop completed send buffers and call the registered
* send callback for each
*/
while (ath10k_ce_completed_send_next_nolock(ce_state,
&transfer_context,
&buf,
&nbytes,
&id) == 0) {
spin_unlock_bh(&ar_pci->ce_lock);
ce_state->send_cb(ce_state, transfer_context,
buf, nbytes, id);
spin_lock_bh(&ar_pci->ce_lock);
}
}
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
spin_lock_bh(&ar_pci->ce_lock);
/*
* Misc CE interrupts are not being handled, but still need
@ -823,10 +751,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
int ce_id, ret;
u32 intr_summary;
ath10k_pci_wake(ar);
ret = ath10k_pci_wake(ar);
if (ret)
return;
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@ -849,13 +780,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
int ret;
ath10k_pci_wake(ar);
ret = ath10k_pci_wake(ar);
if (ret)
return;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@ -871,11 +805,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
int ce_id, ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@ -883,12 +820,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
void ath10k_ce_send_cb_register(struct ce_state *ce_state,
void (*send_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
@ -900,13 +833,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
spin_unlock_bh(&ar_pci->ce_lock);
}
void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
void (*recv_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags))
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@ -919,11 +847,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
struct ce_state *ce_state,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_ring_state *src_ring;
struct ath10k_ce_ring *src_ring;
unsigned int nentries = attr->src_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@ -937,19 +865,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
return 0;
}
ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->src_ring = (struct ce_ring_state *)ptr;
ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
src_ring = ce_state->src_ring;
ptr += sizeof(struct ce_ring_state);
ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
@ -957,7 +884,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@ -970,6 +896,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
if (!src_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->src_ring);
ce_state->src_ring = NULL;
return -ENOMEM;
}
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
@ -986,12 +918,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
pci_free_consistent(ar_pci->pdev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
ce_state->src_ring = NULL;
return -ENOMEM;
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
ath10k_pci_wake(ar);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@ -999,18 +940,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
struct ce_state *ce_state,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_ring_state *dest_ring;
struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@ -1024,25 +968,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->dest_ring = (struct ce_ring_state *)ptr;
ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
ptr += sizeof(struct ce_ring_state);
ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
@ -1055,6 +997,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
&base_addr);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->dest_ring);
ce_state->dest_ring = NULL;
return -ENOMEM;
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
/*
@ -1071,44 +1019,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
ath10k_pci_wake(ar);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_state *ce_state = NULL;
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
if (!ar_pci->ce_id_to_state[ce_id]) {
ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
if (ce_state == NULL) {
spin_unlock_bh(&ar_pci->ce_lock);
return NULL;
}
ar_pci->ce_id_to_state[ce_id] = ce_state;
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ctrl_addr;
ce_state->state = CE_RUNNING;
/* Save attribute flags */
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
}
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ctrl_addr;
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
@ -1122,12 +1061,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
struct ce_state *ath10k_ce_init(struct ath10k *ar,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ce_state *ce_state;
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return NULL;
ce_state = ath10k_ce_init_state(ar, ce_id, attr);
if (!ce_state) {
@ -1136,40 +1080,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
}
if (attr->src_nentries) {
if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
ath10k_err("Failed to initialize CE src ring for ID: %d\n",
ce_id);
ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
if (attr->dest_nentries) {
if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
ce_id);
ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
ath10k_ce_deinit(ce_state);
return NULL;
}
}
/* Enable CE error interrupts */
ath10k_pci_wake(ar);
ath10k_ce_error_intr_enable(ar, ctrl_addr);
ath10k_pci_sleep(ar);
return ce_state;
}
void ath10k_ce_deinit(struct ce_state *ce_state)
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
unsigned int ce_id = ce_state->id;
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ce_state->state = CE_UNUSED;
ar_pci->ce_id_to_state[ce_id] = NULL;
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
pci_free_consistent(ar_pci->pdev,
@ -1190,5 +1132,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
kfree(ce_state);
ce_state->src_ring = NULL;
ce_state->dest_ring = NULL;
}

View file

@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
@ -36,16 +35,9 @@
* how to use copy engines.
*/
struct ce_state;
struct ath10k_ce_pipe;
/* Copy Engine operational state */
enum ce_op_state {
CE_UNUSED,
CE_PAUSED,
CE_RUNNING,
};
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
/* Copy Engine Ring internal state */
struct ce_ring_state {
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
/* Copy Engine internal state */
struct ce_state {
struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
enum ce_op_state state;
void (*send_cb) (struct ce_state *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id);
void (*recv_cb) (struct ce_state *ce_state,
void *per_transfer_recv_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
void (*send_cb)(struct ath10k_ce_pipe *);
void (*recv_cb)(struct ath10k_ce_pipe *);
unsigned int src_sz_max;
struct ce_ring_state *src_ring;
struct ce_ring_state *dest_ring;
};
struct ce_sendlist_item {
/* e.g. buffer or desc list */
dma_addr_t data;
union {
/* simple buffer */
unsigned int nbytes;
/* Rx descriptor list */
unsigned int ndesc;
} u;
/* externally-specified flags; OR-ed with internal flags */
u32 flags;
};
struct ce_sendlist {
unsigned int num_items;
struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ath10k_ce_send(struct ce_state *ce_state,
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
@ -190,21 +152,10 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
void ath10k_ce_send_cb_register(struct ce_state *ce_state,
void (*send_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
/* Append a simple buffer (address/length) to a sendlist. */
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
u32 buffer,
unsigned int nbytes,
/* OR-ed with internal flags */
u32 flags);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
@ -215,11 +166,11 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ce_state *ce_state,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
/* 14 bits */
unsigned int transfer_id);
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags);
/*==================Recv=======================*/
@ -233,17 +184,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
void (*recv_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags));
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
@ -253,7 +199,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@ -263,7 +209,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ce_state *ce_state,
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
@ -272,7 +218,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
struct ce_state *ath10k_ce_init(struct ath10k *ar,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
@ -282,7 +228,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
@ -291,13 +237,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ce_state *ce_state);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@ -322,9 +268,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* currently not in use */
unsigned int priority;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
@ -336,21 +279,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
/* Future use */
void *reserved;
};
/*
* When using sendlist_send to transfer multiple buffer fragments, the
* transfer context of each fragment, except last one, will be filled
* with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
* each fragment done with send and the transfer context would be
* CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
* status of a send completion.
*/
#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008

View file

@ -38,17 +38,6 @@ MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = QCA988X_HW_1_0_VERSION,
.name = "qca988x hw1.0",
.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
.fw = {
.dir = QCA988X_HW_1_0_FW_DIR,
.fw = QCA988X_HW_1_0_FW_FILE,
.otp = QCA988X_HW_1_0_OTP_FILE,
.board = QCA988X_HW_1_0_BOARD_DATA_FILE,
},
},
{
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
@ -64,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
@ -112,7 +101,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
@ -214,8 +203,8 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE,
"ath10k: Board extended Data download addr: 0x%x\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
@ -446,6 +435,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
/* Set the UART baud rate to 19200. */
ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
if (ret) {
ath10k_warn("could not set the baud rate (%d)\n", ret);
return ret;
}
ath10k_info("UART prints enabled\n");
return 0;
}
@ -641,6 +637,10 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
status = ath10k_debug_start(ar);
if (status)
goto err_disconnect_htc;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
@ -658,6 +658,7 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
@ -717,10 +718,46 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return 0;
}
int ath10k_core_register(struct ath10k *ar)
static int ath10k_core_check_chip_id(struct ath10k *ar)
{
u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ar->chip_id, hw_revision);
/* Check that we are not using hw1.0 (some of them have same pci id
* as hw2.0) before doing anything else as ath10k crashes horribly
* due to missing hw1.0 workarounds. */
switch (hw_revision) {
case QCA988X_HW_1_0_CHIP_ID_REV:
ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
return -EOPNOTSUPP;
case QCA988X_HW_2_0_CHIP_ID_REV:
/* known hardware revision, continue normally */
return 0;
default:
ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
ar->chip_id);
return 0;
}
return 0;
}
int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
ar->chip_id = chip_id;
status = ath10k_core_check_chip_id(ar);
if (status) {
ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
return status;
}
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
@ -755,6 +792,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);

View file

@ -52,18 +52,12 @@ struct ath10k_skb_cb {
struct {
u8 vdev_id;
u16 msdu_id;
u8 tid;
bool is_offchan;
bool is_conf;
bool discard;
bool no_ack;
u8 refcount;
struct sk_buff *txfrag;
struct sk_buff *msdu;
} __packed htt;
/* 4 bytes left on 64bit arch */
u8 frag_len;
u8 pad_len;
} __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@ -112,11 +106,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
atomic_t pending_tx_count;
wait_queue_head_t wq;
struct sk_buff_head wmi_event_list;
struct work_struct wmi_event_work;
wait_queue_head_t tx_credits_wq;
};
struct ath10k_peer_stat {
@ -203,6 +193,7 @@ struct ath10k_vif {
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
@ -246,6 +237,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
@ -270,12 +264,21 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@ -288,6 +291,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
@ -393,7 +398,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */

View file

@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@ -499,6 +501,136 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned int len;
char buf[50];
len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_chip_id = {
.read = ath10k_read_chip_id,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->debug.htt_stats_mask == 0)
/* htt stats are disabled */
return 0;
if (ar->state != ATH10K_STATE_ON)
return 0;
cookie = get_jiffies_64();
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
cookie);
if (ret) {
ath10k_warn("failed to send htt stats request: %d\n", ret);
return ret;
}
queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
return 0;
}
static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k,
debug.htt_stats_dwork.work);
mutex_lock(&ar->conf_mutex);
ath10k_debug_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
}
static ssize_t ath10k_read_htt_stats_mask(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_write_htt_stats_mask(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long mask;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &mask);
if (ret)
return ret;
/* max 8 bit masks (for now) */
if (mask > 0xff)
return -E2BIG;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats_mask = mask;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
goto out;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_htt_stats_mask = {
.read = ath10k_read_htt_stats_mask,
.write = ath10k_write_htt_stats_mask,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
return 0;
}
void ath10k_debug_stop(struct ath10k *ar)
{
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
}
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@ -507,6 +639,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@ -518,8 +653,15 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_htt_stats_mask);
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG

View file

@ -27,11 +27,12 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_CORE = 0x00000020,
ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
@ -42,6 +43,8 @@ __printf(1, 2) int ath10k_err(const char *fmt, ...);
__printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
@ -50,6 +53,15 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_stop(struct ath10k *ar)
{
}
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;

View file

@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
struct sk_buff *skb,
u8 credits)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
int ret;
ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err;
ret = ath10k_hif_send_head(htc->ar,
ep->ul_pipe_id,
ep->eid,
skb->len,
skb);
if (unlikely(ret))
goto err;
return 0;
err:
ath10k_warn("HTC issue failed: %d\n", ret);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
/* this is the simplest way to handle out-of-resources for non-credit
* based endpoints. credit based endpoints can still get -ENOSR, but
* this is highly unlikely as credit reservation should prevent that */
if (ret == -ENOSR) {
spin_lock_bh(&htc->tx_lock);
__skb_queue_head(&ep->tx_queue, skb);
spin_unlock_bh(&htc->tx_lock);
return ret;
}
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
return ret;
}
static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
u8 *credits)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
int credits_required;
int remainder;
unsigned int transfer_len;
lockdep_assert_held(&htc->tx_lock);
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
return NULL;
skb_cb = ATH10K_SKB_CB(skb);
transfer_len = skb->len;
if (likely(transfer_len <= htc->target_credit_size)) {
credits_required = 1;
} else {
/* figure out how many credits this message requires */
credits_required = transfer_len / htc->target_credit_size;
remainder = transfer_len % htc->target_credit_size;
if (remainder)
credits_required++;
}
ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
credits_required, ep->tx_credits);
if (ep->tx_credits < credits_required) {
__skb_queue_head(&ep->tx_queue, skb);
return NULL;
}
ep->tx_credits -= credits_required;
*credits = credits_required;
return skb;
}
static void ath10k_htc_send_work(struct work_struct *work)
{
struct ath10k_htc_ep *ep = container_of(work,
struct ath10k_htc_ep, send_work);
struct ath10k_htc *htc = ep->htc;
struct sk_buff *skb;
u8 credits = 0;
int ret;
while (true) {
if (ep->ul_is_polled)
ath10k_htc_send_complete_check(ep, 0);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credit_flow_enabled)
skb = ath10k_htc_get_skb_credit_based(htc, ep,
&credits);
else
skb = __skb_dequeue(&ep->tx_queue);
spin_unlock_bh(&htc->tx_lock);
if (!skb)
break;
ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
if (ret == -ENOSR)
break;
}
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
int credits = 0;
int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
/* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
__skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
spin_unlock_bh(&htc->tx_lock);
queue_work(htc->ar->workqueue, &ep->send_work);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err_credits;
ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
skb->len, skb);
if (ret)
goto err_unmap;
return 0;
err_unmap:
ath10k_skb_unmap(htc->ar->dev, skb);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
/* note: when using TX credit flow, the re-checking of queues happens
* when credits flow back from the target. in the non-TX credit case,
* we recheck after the packet completes */
spin_lock_bh(&htc->tx_lock);
if (!ep->tx_credit_flow_enabled && !htc->stopped)
queue_work(ar->workqueue, &ep->send_work);
spin_unlock_bh(&htc->tx_lock);
return 0;
}
/* flush endpoint TX queue */
static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
spin_lock_bh(&htc->tx_lock);
for (;;) {
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
break;
skb_cb = ATH10K_SKB_CB(skb);
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
}
spin_unlock_bh(&htc->tx_lock);
cancel_work_sync(&ep->send_work);
}
/***********/
/* Receive */
/***********/
@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
queue_work(htc->ar->workqueue, &ep->send_work);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ATH10K_DBG_HTC,
"HTC Service %s does not allocate target credits\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
INIT_COMPLETION(htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@ -873,19 +760,19 @@ setup:
if (status)
return status;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ATH10K_DBG_HTC,
"EP %d UL polled: %d, DL polled: %d\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s eid: %d TX flow control disabled\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
int i;
struct ath10k_htc_ep *ep;
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ath10k_htc_flush_endpoint_tx(htc, ep);
}
ath10k_hif_stop(htc->ar);
}

View file

@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
struct sk_buff_head tx_queue;
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {

View file

@ -104,21 +104,16 @@ err_htc_attach:
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
ath10k_dbg(ATH10K_DBG_HTT,
"htt target version %d.%d; host version %d.%d\n",
htt->target_version_major,
htt->target_version_minor,
HTT_CURRENT_VERSION_MAJOR,
HTT_CURRENT_VERSION_MINOR);
ath10k_info("htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
ath10k_err("htt major versions are incompatible!\n");
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
ath10k_warn("htt minor version differ but still compatible\n");
return 0;
}

View file

@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
#define HTT_CURRENT_VERSION_MAJOR 2
#define HTT_CURRENT_VERSION_MINOR 1
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
/* This command is used for sending management frames in HTT < 3.0.
* HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
#define ATH10K_HTT_MAX_NUM_REFILL 16
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);

View file

@ -20,6 +20,7 @@
#include "htt.h"
#include "txrx.h"
#include "debug.h"
#include "trace.h"
#include <linux/log2.h>
@ -40,6 +41,10 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
int size;
@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
int ret, num_to_fill;
int ret, num_deficit, num_to_fill;
/* Refilling the whole RX ring buffer proves to be a bad idea. The
* reason is RX may take up significant amount of CPU cycles and starve
* other tasks, e.g. TX on an ethernet device while acting as a bridge
* with ath10k wlan interface. This ended up with very poor performance
* once CPU the host system was overwhelmed with RX on ath10k.
*
* By limiting the number of refills the replenishing occurs
* progressively. This in turns makes use of the fact tasklets are
* processed in FIFO order. This means actual RX processing can starve
* out refilling. If there's not enough buffers on RX ring FW will not
* report RX until it is refilled with enough buffers. This
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
* improves the avarage and stability. */
spin_lock_bh(&htt->rx_ring.lock);
num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
tasklet_schedule(&htt->rx_replenish_task);
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
static void ath10k_htt_rx_replenish_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
ath10k_htt_rx_msdu_buff_replenish(htt);
}
int ath10k_htt_rx_attach(struct ath10k_htt *htt)
{
dma_addr_t paddr;
@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
goto err_fill_ring;
ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
return false;
}
static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
{
struct htt_rx_desc *rxd;
struct sk_buff *amsdu;
struct sk_buff *first;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
u8 hdr_buf[64], addr[ETH_ALEN], *qos;
unsigned int hdr_len;
int crypto_len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* FIXME: No idea what assumptions are safe here. Need logs */
if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
(fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
return -ENOTSUPP;
}
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(hdr_buf, hdr, hdr_len);
hdr = (struct ieee80211_hdr *)hdr_buf;
/* A-MSDU max is a little less than 8K */
amsdu = dev_alloc_skb(8*1024);
if (!amsdu) {
ath10k_warn("A-MSDU allocation failed\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
return -ENOMEM;
}
if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
int hdrlen;
hdr = (void *)rxd->rx_hdr_status;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
}
/* FIXME: Hopefully this is a temporary measure.
*
* Reporting individual A-MSDU subframes means each reported frame
* shares the same sequence number.
*
* mac80211 drops frames it recognizes as duplicates, i.e.
* retransmission flag is set and sequence number matches sequence
* number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
* "Duplicate detection and recovery")
*
* To avoid frames being dropped clear retransmission flag for all
* received A-MSDUs.
*
* Worst case: actual duplicate frames will be reported but this should
* still be handled gracefully by other OSI/ISO layers. */
hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
first = skb;
while (skb) {
void *decap_hdr;
int decap_len = 0;
int len;
rxd = (void *)skb->data - sizeof(*rxd);
fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
RX_MSDU_START_INFO1_DECAP_FORMAT);
decap_hdr = (void *)rxd->rx_hdr_status;
skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
/* First frame in an A-MSDU chain has more decapped data. */
if (skb == first) {
/* We receive linked A-MSDU subframe skbuffs. The
* first one contains the original 802.11 header (and
* possible crypto param) in the RX descriptor. The
* A-MSDU subframe header follows that. Each part is
* aligned to 4 byte boundary. */
hdr = (void *)amsdu->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
decap_hdr += roundup(hdr_len, 4);
decap_hdr += roundup(crypto_len, 4);
len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
4);
decap_hdr += len;
}
if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
/* Ethernet2 decap inserts ethernet header in place of
* A-MSDU subframe header. */
skb_pull(skb, 6 + 6 + 2);
/* A-MSDU subframe header length */
decap_len += 6 + 6 + 2;
/* Ethernet2 decap also strips the LLC/SNAP so we need
* to re-insert it. The LLC/SNAP follows A-MSDU
* subframe header. */
/* FIXME: Not all LLCs are 8 bytes long */
decap_len += 8;
memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
}
if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
/* Native Wifi decap inserts regular 802.11 header
* in place of A-MSDU subframe header. */
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
/* A-MSDU subframe header length */
decap_len += 6 + 6 + 2;
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)hdr_buf;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
}
if (fmt == RX_MSDU_DECAP_RAW)
skb_trim(skb, skb->len - 4); /* remove FCS */
memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
/* A-MSDU subframes are padded to 4bytes
* but relative to first subframe, not the whole MPDU */
if (skb->next && ((decap_len + skb->len) & 3)) {
int padlen = 4 - ((decap_len + skb->len) & 3);
memset(skb_put(amsdu, padlen), 0, padlen);
/* original A-MSDU header has the bit set but we're
* not including A-MSDU subframe header */
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
/* original 802.11 header has a different DA */
memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* strip ethernet header and insert decapped 802.11
* header, amsdu subframe header and rfc1042 header */
len = 0;
len += sizeof(struct rfc1042_hdr);
len += sizeof(struct amsdu_subframe_hdr);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, len), decap_hdr, len);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
/* insert decapped 802.11 header making a singly
* A-MSDU */
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
}
info->skb = skb;
info->encrypt_type = enctype;
skb = skb->next;
info->skb->next = NULL;
ath10k_process_rx(htt->ar, info);
}
info->skb = amsdu;
info->encrypt_type = enctype;
ath10k_htt_rx_free_msdu_chain(first);
return 0;
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
{
struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
int hdr_len;
void *rfc1042;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
RX_MSDU_START_INFO1_DECAP_FORMAT);
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
switch (fmt) {
case RX_MSDU_DECAP_RAW:
/* remove trailing FCS */
skb_trim(skb, skb->len - 4);
skb_trim(skb, skb->len - FCS_LEN);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
/* nothing to do here */
/* Pull decapped header */
hdr = (struct ieee80211_hdr *)skb->data;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
skb_pull(skb, hdr_len);
/* Push original header */
hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
/* macaddr[6] + macaddr[6] + ethertype[2] */
skb_pull(skb, 6 + 6 + 2);
/* strip ethernet header and insert decapped 802.11 header and
* rfc1042 header */
rfc1042 = hdr;
rfc1042 += roundup(hdr_len, 4);
rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
rfc1042, sizeof(struct rfc1042_hdr));
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
/* macaddr[6] + macaddr[6] + len[2] */
/* we don't need this for non-A-MSDU */
skb_pull(skb, 6 + 6 + 2);
/* remove A-MSDU subframe header and insert
* decapped 802.11 header. rfc1042 header is already there */
skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
break;
}
if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
void *llc;
int llclen;
llclen = 8;
llc = hdr;
llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
skb_push(skb, llclen);
memcpy(skb->data, llc, llclen);
}
if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
int len = ieee80211_hdrlen(hdr->frame_control);
skb_push(skb, len);
memcpy(skb->data, hdr, len);
}
info->skb = skb;
info->encrypt_type = enctype;
return 0;
ath10k_process_rx(htt->ar, info);
}
static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
int fw_desc_len;
u8 *fw_desc;
int i, j;
int ret;
int ip_summed;
memset(&info, 0, sizeof(info));
@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
/* The skb is not yet processed and it may be
* reallocated. Since the offload is in the original
* skb extract the checksum now and assign it later */
ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
ret = ath10k_htt_rx_amsdu(htt, &info);
ath10k_htt_rx_amsdu(htt, &info);
else
ret = ath10k_htt_rx_msdu(htt, &info);
if (ret && !info.fcs_err) {
ath10k_warn("error processing msdus %d\n", ret);
dev_kfree_skb_any(info.skb);
continue;
}
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
info.skb->ip_summed = ip_summed;
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
ath10k_htt_rx_msdu(htt, &info);
}
}
ath10k_htt_rx_msdu_buff_replenish(htt);
tasklet_schedule(&htt->rx_replenish_task);
}
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
ath10k_txrx_tx_completed(htt, &tx_done);
ath10k_txrx_tx_unref(htt, &tx_done);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
ath10k_txrx_tx_completed(htt, &tx_done);
ath10k_txrx_tx_unref(htt, &tx_done);
}
break;
}
@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(skb->data, skb->len);
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
case HTT_T2H_MSG_TYPE_RX_ADDBA:
case HTT_T2H_MSG_TYPE_RX_DELBA:
case HTT_T2H_MSG_TYPE_RX_FLUSH:

View file

@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
struct sk_buff *txdesc;
struct htt_tx_done tx_done = {0};
int msdu_id;
/* No locks needed. Called after communication with the device has
@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
txdesc = htt->pending_tx[msdu_id];
if (!txdesc)
continue;
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
ATH10K_SKB_CB(txdesc)->htt.discard = true;
ath10k_txrx_tx_unref(htt, txdesc);
ath10k_txrx_tx_unref(htt, &tx_done);
}
}
@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_htt *htt = &ar->htt;
if (skb_cb->htt.is_conf) {
dev_kfree_skb_any(skb);
return;
}
if (skb_cb->is_aborted) {
skb_cb->htt.discard = true;
/* if the skbuff is aborted we need to make sure we'll free up
* the tx resources, we can't simply run tx_unref() 2 times
* because if htt tx completion came in earlier we'd access
* unallocated memory */
if (skb_cb->htt.refcount > 1)
skb_cb->htt.refcount = 1;
}
ath10k_txrx_tx_unref(htt, skb);
dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0, ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
skb = ath10k_htc_alloc_skb(len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
req = &cmd->stats_req;
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
* about endian support */
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn("failed to send htt type stats request: %d", ret);
dev_kfree_skb_any(skb);
return ret;
}
@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
#undef desc_offset
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->htt.vdev_id;
int len = 0;
int msdu_id = -1;
int res;
@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(len);
if (!txdesc) {
res = -ENOMEM;
goto err;
goto err_free_msdu_id;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
}
htt->pending_tx[msdu_id] = txdesc;
spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.msdu = msdu;
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
goto err_unmap_msdu;
return 0;
err:
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
struct sk_buff *txfrag = NULL;
bool use_frags;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
u8 tid;
int prefetch_len, desc_len, frag_len;
dma_addr_t frags_paddr;
int prefetch_len, desc_len;
int msdu_id = -1;
int res;
u8 flags0;
@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
goto err;
goto err_free_msdu_id;
}
txfrag = dev_alloc_skb(frag_len);
if (!txfrag) {
res = -ENOMEM;
goto err;
}
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
goto err;
goto err_free_txdesc;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
if (use_frags) {
skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
skb_cb->htt.pad_len = (unsigned long)msdu->data -
round_down((unsigned long)msdu->data, 4);
skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
} else {
skb_cb->htt.frag_len = 0;
skb_cb->htt.pad_len = 0;
}
htt->pending_tx[msdu_id] = txdesc;
spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
goto err_pull_txfrag;
/* tx fragment list must be terminated with zero-entry */
skb_put(txfrag, frag_len);
tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
tx_frags[0].len = __cpu_to_le32(msdu->len);
tx_frags[1].paddr = __cpu_to_le32(0);
tx_frags[1].len = __cpu_to_le32(0);
if (use_frags) {
dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
res = ath10k_skb_map(dev, txfrag);
if (res)
goto err;
/* tx fragment list must be terminated with zero-entry */
tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
skb_cb->htt.frag_len +
skb_cb->htt.pad_len);
tx_frags[0].len = __cpu_to_le32(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
tx_frags[1].paddr = __cpu_to_le32(0);
tx_frags[1].len = __cpu_to_le32(0);
ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
DMA_TO_DEVICE);
}
ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
if (use_frags)
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
else
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
cmd->data_tx.len = __cpu_to_le16(msdu->len);
cmd->data_tx.len = __cpu_to_le16(msdu->len -
skb_cb->htt.frag_len -
skb_cb->htt.pad_len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.txfrag = txfrag;
skb_cb->htt.msdu = msdu;
memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
goto err_unmap_msdu;
return 0;
err:
if (txfrag)
ath10k_skb_unmap(dev, txfrag);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (txfrag)
dev_kfree_skb_any(txfrag);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
ath10k_htt_tx_dec_pending(htt);
err_unmap_msdu:
ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}

View file

@ -24,18 +24,14 @@
#define SUPPORTED_FW_MAJOR 1
#define SUPPORTED_FW_MINOR 0
#define SUPPORTED_FW_RELEASE 0
#define SUPPORTED_FW_BUILD 629
#define SUPPORTED_FW_BUILD 636
/* QCA988X 1.0 definitions */
#define QCA988X_HW_1_0_VERSION 0x4000002c
#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
@ -53,6 +49,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
@ -75,7 +74,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
@ -169,6 +172,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0

View file

@ -460,6 +460,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s\n",
arg.vdev_id, arg.channel.freq,
ath10k_wmi_phymode_str(arg.channel.mode));
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@ -503,13 +508,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
struct wmi_vdev_start_request_arg arg = {};
enum nl80211_channel_type type;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@ -607,7 +609,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
goto vdev_fail;
}
ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
ar->monitor_present = true;
@ -639,7 +641,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
ar->monitor_present = false;
ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
@ -668,7 +670,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->vdev_id);
return;
}
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
@ -752,14 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
psmode);
if (ar_iter->ret)
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
}
/**********************/
@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.num_rates = n;
arg->peer_num_spatial_streams = max((n+7) / 8, 1);
ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
}
@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_QOS;
if (sta->wme && sta->uapsd_queues) {
ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
arg->peer_flags |= WMI_PEER_APSD;
arg->peer_flags |= WMI_RC_UAPSD_FLAG;
arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@ -1048,7 +1051,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@ -1076,8 +1080,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
enum wmi_phy_mode phymode = MODE_UNKNOWN;
/* FIXME: add VHT */
switch (ar->hw->conf.chandef.chan->band) {
case IEEE80211_BAND_2GHZ:
if (sta->ht_cap.ht_supported) {
@ -1091,7 +1093,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
case IEEE80211_BAND_5GHZ:
if (sta->ht_cap.ht_supported) {
/*
* Check VHT first.
*/
if (sta->vht_cap.vht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AC_VHT80;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
phymode = MODE_11AC_VHT20;
} else if (sta->ht_cap.ht_supported) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
@ -1105,6 +1117,9 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
break;
}
ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
sta->addr, ath10k_wmi_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
}
@ -1162,15 +1177,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
bss_conf->bssid);
if (ret)
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
else
ath10k_dbg(ATH10K_DBG_MAC,
"VDEV: %d associated, BSSID: %pM, AID: %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
}
/*
@ -1191,10 +1206,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
arvif->vdev_id);
/* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
if (!ret)
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
arvif->vdev_id);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@ -1203,12 +1219,10 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
* interfaces as it expects there is no rx when no interface is
* running.
*/
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
arvif->vdev_id, ret);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
ath10k_wmi_flush_tx(ar);
/* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_index = 0;
}
@ -1333,8 +1347,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
continue;
ath10k_dbg(ATH10K_DBG_WMI,
"%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
__func__, ch - arg.channels, arg.n_channels,
"mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ch->max_antenna_gain, ch->mode);
@ -1421,10 +1435,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ieee80211_key_conf *key = info->control.hw_key;
int ret;
/* TODO AP mode should be implemented */
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (!ieee80211_has_protected(hdr->frame_control))
return;
@ -1438,7 +1448,8 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
if (key->keyidx == arvif->def_wep_key_index)
return;
ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
arvif->vdev_id, key->keyidx);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DEF_KEYID,
@ -1480,6 +1491,12 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int ret;
if (ar->htt.target_version_major >= 3) {
/* Since HTT 3.0 there is no separate mgmt tx command */
ret = ath10k_htt_tx(&ar->htt, skb);
goto exit;
}
if (ieee80211_is_mgmt(hdr->frame_control))
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else if (ieee80211_is_nullfunc(hdr->frame_control))
@ -1491,6 +1508,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
else
ret = ath10k_htt_tx(&ar->htt, skb);
exit:
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@ -1534,7 +1552,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
@ -1546,6 +1564,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
spin_unlock_bh(&ar->data_lock);
if (peer)
/* FIXME: should this use ath10k_warn()? */
ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
@ -1643,8 +1662,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
return -EIO;
}
ath10k_wmi_flush_tx(ar);
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0)
ath10k_warn("timed out while waiting for scan to stop\n");
@ -1678,10 +1695,6 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
/* make sure we submit the command so the completion
* timeout makes sense */
ath10k_wmi_flush_tx(ar);
ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
if (ret == 0) {
ath10k_abort_scan(ar);
@ -1727,8 +1740,10 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
if (ieee80211_is_data_qos(hdr->frame_control) &&
is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = HTT_DATA_TX_EXT_TID_MGMT;
} else if (ieee80211_is_data_qos(hdr->frame_control) &&
is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
@ -1742,7 +1757,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
ATH10K_SKB_CB(skb)->htt.tid = tid;
@ -1884,7 +1899,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
conf->chandef.chan->center_freq);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
@ -1901,7 +1916,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@ -1973,7 +1987,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
@ -2052,7 +2066,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
@ -2064,6 +2083,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
kfree(arvif->u.ap.noa_data);
}
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
ath10k_warn("WMI vdev delete failed: %d\n", ret);
@ -2105,18 +2127,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
!ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
ar->monitor_vdev_id);
ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Unable to start monitor mode\n");
else
ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
ar->monitor_vdev_id);
ret = ath10k_monitor_stop(ar);
if (ret)
ath10k_warn("Unable to stop monitor mode\n");
else
ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
}
mutex_unlock(&ar->conf_mutex);
@ -2141,41 +2165,41 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BEACON_INTERVAL,
arvif->beacon_interval);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d beacon_interval %d\n",
arvif->vdev_id, arvif->beacon_interval);
if (ret)
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Beacon interval: %d set for VDEV: %d\n",
arvif->beacon_interval, arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON) {
ath10k_dbg(ATH10K_DBG_MAC,
"vdev %d set beacon tx mode to staggered\n",
arvif->vdev_id);
ret = ath10k_wmi_pdev_set_param(ar,
WMI_PDEV_PARAM_BEACON_TX_MODE,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set staggered beacon mode for VDEV: %d\n",
arvif->vdev_id);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d dtim_period %d\n",
arvif->vdev_id, arvif->dtim_period);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_PERIOD,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set dtim period: %d for VDEV: %d\n",
arvif->dtim_period, arvif->vdev_id);
}
if (changed & BSS_CHANGED_SSID &&
@ -2188,16 +2212,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Added peer: %pM for VDEV: %d\n",
info->bssid, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION) {
/*
@ -2207,11 +2230,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(arvif->u.sta.bssid, info->bssid,
ETH_ALEN);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
/* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
if (!ret)
ath10k_dbg(ATH10K_DBG_MAC,
"VDEV: %d started with BSSID: %pM\n",
arvif->vdev_id, info->bssid);
}
/*
@ -2235,16 +2259,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
cts_prot = 0;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
arvif->vdev_id, cts_prot);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_ENABLE_RTSCTS,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set CTS prot: %d for VDEV: %d\n",
cts_prot, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@ -2255,16 +2278,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
arvif->vdev_id, slottime);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_SLOT_TIME,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set slottime: %d for VDEV: %d\n",
slottime, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@ -2274,16 +2296,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
else
preamble = WMI_VDEV_PREAMBLE_LONG;
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d preamble %dn",
arvif->vdev_id, preamble);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_PREAMBLE,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set preamble: %d for VDEV: %d\n",
preamble, arvif->vdev_id);
}
if (changed & BSS_CHANGED_ASSOC) {
@ -2474,27 +2496,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta)\n",
arvif->vdev_id, sta->addr);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Removed peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@ -2505,14 +2526,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New association.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to associate station: %pM\n",
sta->addr);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Station %pM moved to assoc state\n",
sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@ -2520,14 +2540,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* Disassociation.
*/
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
ath10k_warn("Failed to disassociate station: %pM\n",
sta->addr);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Station %pM moved to disassociated state\n",
sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@ -2747,14 +2766,13 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
arvif->vdev_id, rts);
ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set RTS threshold: %d for VDEV: %d\n",
rts, arvif->vdev_id);
}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@ -2789,14 +2807,13 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n",
arvif->vdev_id, frag);
ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
else
ath10k_dbg(ATH10K_DBG_MAC,
"Set frag threshold: %d for VDEV: %d\n",
frag, arvif->vdev_id);
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@ -2836,8 +2853,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
bool empty;
spin_lock_bh(&ar->htt.tx_lock);
empty = bitmap_empty(ar->htt.used_msdu_ids,
ar->htt.max_num_pending_tx);
empty = (ar->htt.num_pending_tx == 0);
spin_unlock_bh(&ar->htt.tx_lock);
skip = (ar->state == ATH10K_STATE_WEDGED);
@ -3326,6 +3342,10 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS;
/* MSDU can have HTT TX fragment pushed in front. The additional 4
* bytes is used for padding/alignment if necessary. */
ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;

View file

@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
#define QCA988X_1_0_DEVICE_ID (0xabcd)
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{0}
};
@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
/* could be moved to share CE3 */
/* target->host HTT + HTC control */
{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
/* target->host WMI */
{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
/* host->target WMI */
{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
/* host->target HTT */
{ /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
/* unused */
{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* Target autonomous hif_memcpy */
{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
/* ce_diag, the Diagnostic Window */
{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 16,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE1: target->host HTT + HTC control */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 512,
.dest_nentries = 512,
},
/* CE2: target->host WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 32,
},
/* CE3: host->target WMI */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 32,
.src_sz_max = 2048,
.dest_nentries = 0,
},
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
.src_sz_max = 256,
.dest_nentries = 0,
},
/* CE5: unused */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE6: target autonomous hif_memcpy */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 0,
.dest_nentries = 0,
},
/* CE7: ce_diag, the Diagnostic Window */
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 2,
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
},
};
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
{ /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
/* target->host HTT + HTC control */
{ /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
/* target->host WMI */
{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target WMI */
{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
/* host->target HTT */
{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
/* CE0: host->target HTC control and raw streams */
{
.pipenum = 0,
.pipedir = PIPEDIR_OUT,
.nentries = 32,
.nbytes_max = 256,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = 1,
.pipedir = PIPEDIR_IN,
.nentries = 32,
.nbytes_max = 512,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE2: target->host WMI */
{
.pipenum = 2,
.pipedir = PIPEDIR_IN,
.nentries = 32,
.nbytes_max = 2048,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE3: host->target WMI */
{
.pipenum = 3,
.pipedir = PIPEDIR_OUT,
.nentries = 32,
.nbytes_max = 2048,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE4: host->target HTT */
{
.pipenum = 4,
.pipedir = PIPEDIR_OUT,
.nentries = 256,
.nbytes_max = 256,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* NB: 50% of src nentries, since tx has 2 frags */
/* unused */
{ /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
/* Reserved for target autonomous hif_memcpy */
{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
/* CE5: unused */
{
.pipenum = 5,
.pipedir = PIPEDIR_OUT,
.nentries = 32,
.nbytes_max = 2048,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = 6,
.pipedir = PIPEDIR_INOUT,
.nentries = 32,
.nbytes_max = 4096,
.flags = CE_ATTR_FLAGS,
.reserved = 0,
},
/* CE7 used only by Host */
};
@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct ce_state *ce_diag;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
struct ce_state *ce_diag;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
ath10k_warn("Unable to wakeup target\n");
}
void ath10k_do_pci_wake(struct ath10k *ar)
int ath10k_do_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *pci_addr = ar_pci->mem;
@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
atomic_inc(&ar_pci->keep_awake_count);
if (ar_pci->verified_awake)
return;
return 0;
for (;;) {
if (ath10k_pci_target_is_awake(ar)) {
ar_pci->verified_awake = true;
break;
return 0;
}
if (tot_delay > PCIE_WAKE_TIMEOUT) {
ath10k_warn("target takes too long to wake up (awake count %d)\n",
ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
PCIE_WAKE_TIMEOUT,
atomic_read(&ar_pci->keep_awake_count));
break;
return -ETIMEDOUT;
}
udelay(curr_delay);
@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
* FIXME: Handle OOM properly.
*/
static inline
struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k_pci_compl *compl = NULL;
@ -511,39 +612,28 @@ exit:
}
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
void *transfer_context,
u32 ce_data,
unsigned int nbytes,
unsigned int transfer_id)
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
bool process = false;
do {
/*
* For the send completion of an item in sendlist, just
* increment num_sends_allowed. The upper layer callback will
* be triggered when last fragment is done with send.
*/
if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
spin_lock_bh(&pipe_info->pipe_lock);
pipe_info->num_sends_allowed++;
spin_unlock_bh(&pipe_info->pipe_lock);
continue;
}
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
compl->send_or_recv = HIF_CE_COMPLETE_SEND;
compl->state = ATH10K_PCI_COMPL_SEND;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
compl->transfer_context = transfer_context;
compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = 0;
@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
process = true;
} while (ath10k_ce_completed_send_next(ce_state,
&transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0);
/*
* If only some of the items within a sendlist have completed,
* don't invoke completion processing until the entire sendlist
* has been sent.
*/
if (!process)
return;
}
ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
void *transfer_context, u32 ce_data,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
struct ath10k_pci_compl *compl;
struct sk_buff *skb;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
do {
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
compl = get_free_compl(pipe_info);
if (!compl)
break;
compl->send_or_recv = HIF_CE_COMPLETE_RECV;
compl->state = ATH10K_PCI_COMPL_RECV;
compl->ce_state = ce_state;
compl->pipe_info = pipe_info;
compl->transfer_context = transfer_context;
compl->skb = transfer_context;
compl->nbytes = nbytes;
compl->transfer_id = transfer_id;
compl->flags = flags;
@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
spin_lock_bh(&ar_pci->compl_lock);
list_add_tail(&compl->list, &ar_pci->compl_process);
spin_unlock_bh(&ar_pci->compl_lock);
} while (ath10k_ce_completed_recv_next(ce_state,
&transfer_context,
&ce_data, &nbytes,
&transfer_id,
&flags) == 0);
}
ath10k_pci_process_ce(ar);
}
@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
struct ce_state *ce_hdl = pipe_info->ce_hdl;
struct ce_sendlist sendlist;
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
unsigned int len;
u32 flags = 0;
int ret;
memset(&sendlist, 0, sizeof(struct ce_sendlist));
len = min(bytes, nbuf->len);
bytes -= len;
@ -648,8 +720,6 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
/* Make sure we have resources to handle this request */
spin_lock_bh(&pipe_info->pipe_lock);
if (!pipe_info->num_sends_allowed) {
@ -660,7 +730,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
pipe_info->num_sends_allowed--;
spin_unlock_bh(&pipe_info->pipe_lock);
ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
skb_cb->paddr, len, flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
@ -670,7 +741,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
int ret;
spin_lock_bh(&pipe_info->pipe_lock);
@ -764,9 +835,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_state *ce_diag = ar_pci->ce_diag;
struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
int i, pipe_num, completions, disable_interrupts;
@ -805,15 +876,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
continue;
for (i = 0; i < completions; i++) {
compl = kmalloc(sizeof(struct ath10k_pci_compl),
GFP_KERNEL);
compl = kmalloc(sizeof(*compl), GFP_KERNEL);
if (!compl) {
ath10k_warn("No memory for completion state\n");
ath10k_pci_stop_ce(ar);
return -ENOMEM;
}
compl->send_or_recv = HIF_CE_COMPLETE_FREE;
compl->state = ATH10K_PCI_COMPL_FREE;
list_add_tail(&compl->list, &pipe_info->compl_free);
}
}
@ -840,7 +910,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
* their associated resources */
spin_lock_bh(&ar_pci->compl_lock);
list_for_each_entry(compl, &ar_pci->compl_process, list) {
skb = (struct sk_buff *)compl->transfer_context;
skb = compl->skb;
ATH10K_SKB_CB(skb)->is_aborted = true;
}
spin_unlock_bh(&ar_pci->compl_lock);
@ -850,7 +920,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_compl *compl, *tmp;
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
struct sk_buff *netbuf;
int pipe_num;
@ -861,7 +931,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
list_del(&compl->list);
netbuf = (struct sk_buff *)compl->transfer_context;
netbuf = compl->skb;
dev_kfree_skb_any(netbuf);
kfree(compl);
}
@ -912,12 +982,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
list_del(&compl->list);
spin_unlock_bh(&ar_pci->compl_lock);
if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
switch (compl->state) {
case ATH10K_PCI_COMPL_SEND:
cb->tx_completion(ar,
compl->transfer_context,
compl->skb,
compl->transfer_id);
send_done = 1;
} else {
break;
case ATH10K_PCI_COMPL_RECV:
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
if (ret) {
ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@ -925,7 +997,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
break;
}
skb = (struct sk_buff *)compl->transfer_context;
skb = compl->skb;
nbytes = compl->nbytes;
ath10k_dbg(ATH10K_DBG_PCI,
@ -944,9 +1016,17 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
nbytes,
skb->len + skb_tailroom(skb));
}
break;
case ATH10K_PCI_COMPL_FREE:
ath10k_warn("free completion cannot be processed\n");
break;
default:
ath10k_warn("invalid completion state (%d)\n",
compl->state);
break;
}
compl->send_or_recv = HIF_CE_COMPLETE_FREE;
compl->state = ATH10K_PCI_COMPL_FREE;
/*
* Add completion back to the pipe's free list.
@ -1037,12 +1117,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num)
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_state *ce_state = pipe_info->ce_hdl;
struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
@ -1097,7 +1177,7 @@ err:
static int ath10k_pci_post_rx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret = 0;
@ -1147,11 +1227,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0;
}
static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ce_state *ce_hdl;
struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
@ -1179,11 +1259,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
}
}
static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
struct ce_state *ce_hdl;
struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
@ -1206,15 +1286,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
if (netbuf != CE_SENDLIST_ITEM_CTXT)
/*
* Indicate the completion to higer layer to free
* the buffer
*/
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
/*
* Indicate the completion to higer layer to free
* the buffer
*/
ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
}
}
@ -1232,7 +1311,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
pipe_info = &ar_pci->pipe_info[pipe_num];
ath10k_pci_rx_pipe_cleanup(pipe_info);
@ -1243,7 +1322,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@ -1293,8 +1372,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
@ -1378,13 +1459,16 @@ err_dma:
return ret;
}
static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
unsigned int transfer_id)
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
struct bmi_xfer *xfer = transfer_context;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
&nbytes, &transfer_id))
return;
if (xfer->wait_for_resp)
return;
@ -1392,14 +1476,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
complete(&xfer->done);
}
static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct bmi_xfer *xfer = transfer_context;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
&nbytes, &transfer_id, &flags))
return;
if (!xfer->wait_for_resp) {
ath10k_warn("unexpected: BMI data received; ignoring\n");
@ -1679,7 +1766,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct hif_ce_pipe_info *pipe_info;
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num;
@ -1895,7 +1982,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
static void ath10k_pci_ce_tasklet(unsigned long ptr)
{
struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
struct ath10k_pci *ar_pci = pipe->ar_pci;
ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@ -2212,18 +2299,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
static void ath10k_pci_device_reset(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *mem = ar_pci->mem;
int i;
u32 val;
if (!SOC_GLOBAL_RESET_ADDRESS)
return;
if (!mem)
return;
ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
PCIE_SOC_WAKE_V_MASK);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_target_is_awake(ar))
@ -2232,12 +2314,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
}
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK)
break;
msleep(1);
@ -2245,16 +2327,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
/* Pull Target, including PCIe, out of RESET. */
val &= ~1;
ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
RTC_STATE_COLD_RESET_MASK))
break;
msleep(1);
}
ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
}
static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@ -2267,13 +2349,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
switch (i) {
case ATH10K_PCI_FEATURE_MSI_X:
ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
break;
}
}
@ -2286,7 +2365,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0;
struct ath10k *ar;
struct ath10k_pci *ar_pci;
u32 lcr_val;
u32 lcr_val, chip_id;
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
@ -2298,9 +2377,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
switch (pci_dev->device) {
case QCA988X_1_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
break;
@ -2322,10 +2398,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
/* Enable QCA988X_1.0 HW workarounds */
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
spin_lock_init(&ar_pci->hw_v1_workaround_lock);
ar_pci->ar = ar;
ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
@ -2395,9 +2467,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&ar_pci->ce_lock);
ar_pci->cacheline_sz = dma_get_cache_alignment();
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("Failed to get chip id: %d\n", ret);
return ret;
}
ret = ath10k_core_register(ar);
chip_id = ath10k_pci_read32(ar,
RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
ath10k_do_pci_sleep(ar);
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_iomap;
@ -2414,7 +2497,6 @@ err_region:
err_device:
pci_disable_device(pdev);
err_ar:
pci_set_drvdata(pdev, NULL);
ath10k_core_destroy(ar);
err_ar_pci:
/* call HIF PCI free here */
@ -2442,7 +2524,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
pci_clear_master(pdev);
@ -2483,9 +2564,6 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);

View file

@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
enum ath10k_pci_compl_state {
ATH10K_PCI_COMPL_FREE = 0,
ATH10K_PCI_COMPL_SEND,
ATH10K_PCI_COMPL_RECV,
};
struct ath10k_pci_compl {
struct list_head list;
int send_or_recv;
struct ce_state *ce_state;
struct hif_ce_pipe_info *pipe_info;
void *transfer_context;
enum ath10k_pci_compl_state state;
struct ath10k_ce_pipe *ce_state;
struct ath10k_pci_pipe *pipe_info;
struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
/* compl_state.send_or_recv */
#define HIF_CE_COMPLETE_FREE 0
#define HIF_CE_COMPLETE_SEND 1
#define HIF_CE_COMPLETE_RECV 2
/*
* PCI-specific Target state
*
@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
struct hif_ce_pipe_info {
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ce_state *ce_hdl;
struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
@ -190,7 +190,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
@ -219,7 +218,7 @@ struct ath10k_pci {
bool compl_processing;
struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
@ -227,16 +226,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
struct ce_state *ce_diag;
struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ce_state *ce_id_to_state[CE_COUNT_MAX];
/* makes sure that dummy reads are atomic */
spinlock_t hw_v1_workaround_lock;
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@ -244,14 +240,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@ -310,23 +310,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *addr = ar_pci->mem;
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
unsigned long irq_flags;
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
ioread32(addr+offset+4); /* 3rd read prior to write */
ioread32(addr+offset+4); /* 2nd read prior to write */
ioread32(addr+offset+4); /* 1st read prior to write */
iowrite32(value, addr+offset);
spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
irq_flags);
} else {
iowrite32(value, addr+offset);
}
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@ -336,15 +321,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
void ath10k_do_pci_wake(struct ath10k *ar);
int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_wake(ar);
return ath10k_do_pci_wake(ar);
return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)

View file

@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
* b) HW crypto parameter
* - 0 bytes for no security
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
* c) A-MSDU subframe header (14 bytes) if appliable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
RX_MSDU_DECAP_NATIVE_WIFI = 1,
RX_MSDU_DECAP_RAW = 0,
/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
* htt_rx_desc contains the original decapped 802.11 header. */
RX_MSDU_DECAP_NATIVE_WIFI = 1,
/* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
* total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};

View file

@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(int id, void *buf, size_t buf_len),
TP_PROTO(int id, void *buf, size_t buf_len, int ret),
TP_ARGS(id, buf, buf_len),
TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
__field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"id %d len %zu",
"id %d len %zu ret %d",
__entry->id,
__entry->buf_len
__entry->buf_len,
__entry->ret
)
);
@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
TRACE_EVENT(ath10k_htt_stats,
TP_PROTO(void *buf, size_t buf_len),
TP_ARGS(buf, buf_len),
TP_STRUCT__entry(
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"len %zu",
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */

View file

@ -44,70 +44,15 @@ out:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
int ret;
if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
return;
ATH10K_SKB_CB(txdesc)->htt.refcount--;
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
return;
if (txfrag) {
ret = ath10k_skb_unmap(dev, txfrag);
if (ret)
ath10k_warn("txfrag unmap failed (%d)\n", ret);
dev_kfree_skb_any(txfrag);
}
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (ATH10K_SKB_CB(txdesc)->htt.discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
dev_kfree_skb_any(txdesc);
}
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct sk_buff *txdesc;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@ -117,12 +62,42 @@ void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
return;
}
txdesc = htt->pending_tx[tx_done->msdu_id];
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
ath10k_txrx_tx_unref(htt, txdesc);
if (skb_cb->htt.frag_len)
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
}
static const u8 rx_legacy_rate_idx[] = {
@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}

View file

@ -19,9 +19,8 @@
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,

View file

@ -23,30 +23,6 @@
#include "wmi.h"
#include "mac.h"
void ath10k_wmi_flush_tx(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED) {
ath10k_warn("wmi flush skipped - device is wedged anyway\n");
return;
}
ret = wait_event_timeout(ar->wmi.wq,
atomic_read(&ar->wmi.pending_tx_count) == 0,
5*HZ);
if (atomic_read(&ar->wmi.pending_tx_count) == 0)
return;
if (ret == 0)
ret = -ETIMEDOUT;
if (ret < 0)
ath10k_warn("wmi flush failed (%d)\n", ret);
}
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
@ -85,18 +61,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb(skb);
if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
wake_up(&ar->wmi.wq);
}
/* WMI command API */
static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
int status;
int ret;
u32 cmd = 0;
if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@ -107,25 +79,87 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
WMI_MAX_PENDING_TX_COUNT) {
/* avoid using up memory when FW hangs */
atomic_dec(&ar->wmi.pending_tx_count);
return -EBUSY;
}
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
if (status) {
dev_kfree_skb_any(skb);
atomic_dec(&ar->wmi.pending_tx_count);
return status;
}
if (ret)
goto err_pull;
return 0;
err_pull:
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
return ret;
}
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
{
struct wmi_bcn_tx_arg arg = {0};
int ret;
lockdep_assert_held(&arvif->ar->data_lock);
if (arvif->beacon == NULL)
return;
arg.vdev_id = arvif->vdev_id;
arg.tx_rate = 0;
arg.tx_power = 0;
arg.bcn = arvif->beacon->data;
arg.bcn_len = arvif->beacon->len;
ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
if (ret)
return;
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
ath10k_wmi_tx_beacon_nowait(arvif);
}
static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_wmi_tx_beacons_iter,
NULL);
spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
{
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
wake_up(&ar->wmi.tx_credits_wq);
}
static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
{
int ret = -EINVAL;
wait_event_timeout(ar->wmi.tx_credits_wq, ({
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
(ret != -EAGAIN);
}), 3*HZ);
if (ret)
dev_kfree_skb_any(skb);
return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@ -315,7 +349,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u32 rx_status;
@ -325,13 +361,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
u32 rate;
u32 buf_len;
u16 fc;
int pull_len;
channel = __le32_to_cpu(event->hdr.channel);
buf_len = __le32_to_cpu(event->hdr.buf_len);
rx_status = __le32_to_cpu(event->hdr.status);
snr = __le32_to_cpu(event->hdr.snr);
phy_mode = __le32_to_cpu(event->hdr.phy_mode);
rate = __le32_to_cpu(event->hdr.rate);
if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
ev_hdr = &ev_v2->hdr.v1;
pull_len = sizeof(*ev_v2);
} else {
ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
ev_hdr = &ev_v1->hdr;
pull_len = sizeof(*ev_v1);
}
channel = __le32_to_cpu(ev_hdr->channel);
buf_len = __le32_to_cpu(ev_hdr->buf_len);
rx_status = __le32_to_cpu(ev_hdr->status);
snr = __le32_to_cpu(ev_hdr->snr);
phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
rate = __le32_to_cpu(ev_hdr->rate);
memset(status, 0, sizeof(*status));
@ -358,7 +405,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
skb_pull(skb, sizeof(event->hdr));
skb_pull(skb, pull_len);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@ -734,10 +781,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
int i = -1;
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct wmi_bcn_tx_arg arg;
struct sk_buff *bcn;
int vdev_id = 0;
int ret;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@ -794,17 +839,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
arg.vdev_id = arvif->vdev_id;
arg.tx_rate = 0;
arg.tx_power = 0;
arg.bcn = bcn->data;
arg.bcn_len = bcn->len;
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
ath10k_warn("SWBA overrun on vdev %d\n",
arvif->vdev_id);
dev_kfree_skb_any(arvif->beacon);
}
ret = ath10k_wmi_beacon_send(ar, &arg);
if (ret)
ath10k_warn("could not send beacon (%d)\n", ret);
arvif->beacon = bcn;
dev_kfree_skb_any(bcn);
ath10k_wmi_tx_beacon_nowait(arvif);
spin_unlock_bh(&ar->data_lock);
}
}
@ -943,6 +988,9 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->fw_version_build > 636)
set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@ -1007,7 +1055,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
@ -1126,64 +1174,18 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
static void ath10k_wmi_event_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k,
wmi.wmi_event_work);
struct sk_buff *skb;
for (;;) {
skb = skb_dequeue(&ar->wmi.wmi_event_list);
if (!skb)
break;
ath10k_wmi_event_process(ar, skb);
}
}
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
enum wmi_event_id event_id;
event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
/* some events require to be handled ASAP
* thus can't be defered to a worker thread */
switch (event_id) {
case WMI_HOST_SWBA_EVENTID:
case WMI_MGMT_RX_EVENTID:
ath10k_wmi_event_process(ar, skb);
return;
default:
break;
}
skb_queue_tail(&ar->wmi.wmi_event_list, skb);
queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.wq);
skb_queue_head_init(&ar->wmi.wmi_event_list);
INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
/* HTC should've drained the packets already */
if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
ath10k_warn("there are still pending packets\n");
cancel_work_sync(&ar->wmi.wmi_event_work);
skb_queue_purge(&ar->wmi.wmi_event_list);
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@ -1198,6 +1200,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* these fields are the same for all service endpoints */
conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@ -2108,7 +2111,8 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
}
int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
const struct wmi_bcn_tx_arg *arg)
{
struct wmi_bcn_tx_cmd *cmd;
struct sk_buff *skb;
@ -2124,7 +2128,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,

View file

@ -508,6 +508,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
{
switch (mode) {
case MODE_11A:
return "11a";
case MODE_11G:
return "11g";
case MODE_11B:
return "11b";
case MODE_11GONLY:
return "11gonly";
case MODE_11NA_HT20:
return "11na-ht20";
case MODE_11NG_HT20:
return "11ng-ht20";
case MODE_11NA_HT40:
return "11na-ht40";
case MODE_11NG_HT40:
return "11ng-ht40";
case MODE_11AC_VHT20:
return "11ac-vht20";
case MODE_11AC_VHT40:
return "11ac-vht40";
case MODE_11AC_VHT80:
return "11ac-vht80";
case MODE_11AC_VHT20_2G:
return "11ac-vht20-2g";
case MODE_11AC_VHT40_2G:
return "11ac-vht40-2g";
case MODE_11AC_VHT80_2G:
return "11ac-vht80-2g";
case MODE_UNKNOWN:
/* skip */
break;
/* no default handler to allow compiler to check that the
* enum is fully handled */
};
return "<unknown>";
}
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
@ -763,14 +805,6 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
/*
* status consists of upper 16 bits fo int status and lower 16 bits of
* module ID that retuned status
*/
#define WLAN_INIT_STATUS_SUCCESS 0x0
#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@ -1268,7 +1302,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
struct wmi_mgmt_rx_hdr {
struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
@ -1277,8 +1311,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
struct wmi_mgmt_rx_event {
struct wmi_mgmt_rx_hdr hdr;
struct wmi_mgmt_rx_hdr_v2 {
struct wmi_mgmt_rx_hdr_v1 v1;
__le32 rssi_ctl[4];
} __packed;
struct wmi_mgmt_rx_event_v1 {
struct wmi_mgmt_rx_hdr_v1 hdr;
u8 buf[0];
} __packed;
struct wmi_mgmt_rx_event_v2 {
struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
@ -3000,7 +3044,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
@ -3013,7 +3056,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@ -3066,7 +3108,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);

View file

@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
if (!pdev->dev.platform_data) {
if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;

View file

@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
if (!pdev->dev.platform_data) {
if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}

View file

@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x1;
}
ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x39;
ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x1;
}
ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x4;
}
ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x6;
ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x6;
}
ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
(antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
(antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {

View file

@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*

View file

@ -671,7 +671,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
nfcal_pending = ah->caldata->nfcal_pending;
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
@ -861,7 +861,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
ah->caldata->nfcal_pending = true;
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;

View file

@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}

View file

@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
if (caldata)
caldata->done_txiqcal_once = is_reusable;
if (caldata) {
if (is_reusable)
set_bit(TXIQCAL_DONE, &caldata->cal_flags);
else
clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
}
return;
}
@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
}
static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
struct ath9k_channel *chan,
bool run_rtt_cal)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
return;
if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
return;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
}
if (caldata)
set_bit(SW_PKDET_DONE, &caldata->cal_flags);
if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
if (IS_CHAN_2GHZ(chan)){
caldata->caldac[0] = REG_READ_FIELD(ah,
AR_PHY_65NM_RXRF_AGC(0),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
caldata->caldac[1] = REG_READ_FIELD(ah,
AR_PHY_65NM_RXRF_AGC(1),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
} else {
caldata->caldac[0] = REG_READ_FIELD(ah,
AR_PHY_65NM_RXRF_AGC(0),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
caldata->caldac[1] = REG_READ_FIELD(ah,
AR_PHY_65NM_RXRF_AGC(1),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
}
}
}
static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
if (caldata->done_txclcal_once) {
if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
continue;
@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
caldata->tx_clcal[i][j] =
REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
}
caldata->done_txclcal_once = true;
set_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
}
@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
u32 rx_delay = 0;
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
ar9003_hw_rtt_clear_hist(ah);
}
if (rtt && !run_rtt_cal) {
agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
agc_supp_cals &= agc_ctrl;
agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL);
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
if (rtt) {
if (!run_rtt_cal) {
agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
agc_supp_cals &= agc_ctrl;
agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL);
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
} else {
if (ah->ah_flags & AH_FASTCC)
run_agc_cal = true;
}
}
if (ah->enabled_cals & TX_CL_CAL) {
if (caldata && caldata->done_txclcal_once)
if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
else {
@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
* AGC calibration
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
if (caldata && !caldata->done_txiqcal_once)
if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
} else if (caldata && !caldata->done_txiqcal_once) {
} else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
run_agc_cal = true;
sep_iq_cal = true;
}
@ -1099,6 +1135,15 @@ skip_tx_iqcal:
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
/* Disable BB_active */
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@ -1110,7 +1155,12 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
ar9003_hw_do_manual_peak_cal(ah, chan);
ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
}
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
udelay(5);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@ -1133,19 +1183,23 @@ skip_tx_iqcal:
if (txiqcal_done)
ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
else if (caldata && caldata->done_txiqcal_once)
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
if (!ath9k_hw_rfbus_req(ah))
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah),
"Could not stop baseband\n");
else
} else {
ar9003_hw_rtt_fill_hist(ah);
if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
ar9003_hw_rtt_load_hist(ah);
}
ath9k_hw_rfbus_done(ah);
}

View file

@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
return eep->base_ext1.ant_div_control;
if (AR_SREV_9565(ah))
return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
else
return eep->base_ext1.ant_div_control;
case EEP_ANTENNA_GAIN_5G:
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct ar9300_base_eep_hdr *pBase;
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
ah->eeprom.ar9300_eep.macAddr);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
ah->eeprom.ar9300_eep.macAddr);
out:
if (len > size)
len = size;
@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9565(ah)) {
if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
REG_SET_BIT(ah, AR_PHY_RESTART,
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
/* Force WLAN LNA diversity ON */
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
/* Force WLAN LNA diversity OFF */
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
}
}
@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
if (AR_SREV_9485(ah) && common->bt_ant_diversity)
if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
&& common->bt_ant_diversity)
regval |= AR_FAST_DIV_ENABLE;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);

View file

@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for

View file

@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);

View file

@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
caldata->rtt_done = false;
clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))

View file

@ -627,11 +627,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
REG_WRITE(ah, AR_PCU_MISC_MODE2,
val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
val |= AR_AGG_WEP_ENABLE_FIX |
AR_AGG_WEP_ENABLE |
AR_PCU_MISC_MODE2_CFP_IGNORE;
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@ -1375,15 +1374,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else if (AR_SREV_9565(ah)) {
antconf->lna1_lna2_delta = -3;
antconf->lna1_lna2_switch_delta = 3;
antconf->lna1_lna2_delta = -9;
antconf->div_group = 3;
} else {
antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
@ -1488,18 +1491,25 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
}
} else if (AR_SREV_9565(ah)) {
if (enable) {
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
AR_ANT_DIV_ENABLE);
REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
REG_SET_BIT(ah, AR_PHY_RESTART,
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_FAST_DIV_ENABLE);
REG_SET_BIT(ah, AR_PHY_RESTART,
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);
} else {
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
AR_ANT_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
(1 << AR_PHY_ANT_SW_RX_PROT_S));
REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
AR_FAST_DIV_ENABLE);
REG_CLR_BIT(ah, AR_PHY_RESTART,
AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
AR_BTCOEX_WL_LNADIV_FORCE_ON);

View file

@ -343,8 +343,12 @@
#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118

View file

@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
}
}
static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
{
int agc, caldac;
if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
return;
if ((index != 5) || (chain >= 2))
return;
agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
if (!agc)
return;
caldac = ah->caldata->caldac[chain];
ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
ah->caldata->rtt_table[chain][index] |= (caldac << 4);
}
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
{
u32 val;
@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
ah->caldata->rtt_table[chain][i] =
ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
ar9003_hw_patch_rtt(ah, i, chain);
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"RTT value at idx %d, chain %d is: 0x%x\n",
i, chain, ah->caldata->rtt_table[chain][i]);
}
}
ah->caldata->rtt_done = true;
set_bit(RTT_DONE, &ah->caldata->cal_flags);
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
}
if (ah->caldata)
ah->caldata->rtt_done = false;
clear_bit(RTT_DONE, &ah->caldata->cal_flags);
}
bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ah->caldata)
return false;
if (!ah->caldata->rtt_done)
if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
if (IS_CHAN_2GHZ(chan)){
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
ah->caldata->caldac[0]);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
ah->caldata->caldac[1]);
} else {
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
ah->caldata->caldac[0]);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
ah->caldata->caldac[1]);
}
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
}
if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
return false;
ar9003_hw_rtt_enable(ah);
ar9003_hw_rtt_set_mask(ah, 0x10);
if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
ar9003_hw_rtt_set_mask(ah, 0x30);
else
ar9003_hw_rtt_set_mask(ah, 0x10);
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");

View file

@ -20,7 +20,17 @@
/* AR9485 1.1 */
#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
static const u32 ar9485_1_1_mac_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
};
static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
/* Addr allmodes */
@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
{0x00009e00, 0x037216a0},
{0x00009e04, 0x00182020},
{0x00009e18, 0x00000000},
{0x00009e20, 0x000003a8},
{0x00009e2c, 0x00004121},
{0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@ -341,7 +373,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@ -367,14 +399,14 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@ -521,12 +553,109 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
{0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
{0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
{0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
{0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
{0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
{0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
{0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
{0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
{0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
{0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
{0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
{0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
{0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
{0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
{0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
{0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
{0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
{0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
{0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
{0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
{0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
{0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
{0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
{0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
{0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
{0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x00009e00, 0x03721b20},
{0x00009e04, 0x00082020},
{0x00009e18, 0x0300501e},
{0x00009e20, 0x000003ba},
{0x00009e2c, 0x00002e21},
{0x00009e44, 0x02182324},
{0x0000a000, 0x00060005},
@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
/* Addr allmodes */
{0x0000a398, 0x00000000},
{0x0000a39c, 0x6f7f0301},
{0x0000a3a0, 0xca9228ee},
};
#endif /* INITVALS_9485_H */

View file

@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
{0x0000a3a4, 0x00000011},
{0x0000a3a8, 0xaaaaaa6e},
{0x0000a3ac, 0x3c466455},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
{0x0000a404, 0x00000000},
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739ce},
{0x0000a414, 0x1ce739c5},
{0x0000a418, 0x2d001dce},
{0x0000a41c, 0x1ce739ce},
{0x0000a41c, 0x1ce739c5},
{0x0000a420, 0x000001ce},
{0x0000a424, 0x1ce739ce},
{0x0000a424, 0x1ce739c5},
{0x0000a428, 0x000001ce},
{0x0000a42c, 0x1ce739ce},
{0x0000a430, 0x1ce739ce},
@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
/* Addr allmodes */
{0x00004050, 0x00300300},
{0x0000406c, 0x00100000},
{0x00009e20, 0x000003b6},
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
/* Addr allmodes */
{0x0000a398, 0x00000000},
{0x0000a39c, 0x6f7f0301},
{0x0000a3a0, 0xca9228ee},
};
#endif /* INITVALS_9565_1P0_H */

View file

@ -459,8 +459,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
#define ATH_DUMP_BTCOEX(_s, _val) \
do { \
len += snprintf(buf + len, size - len, \
"%20s : %10d\n", _s, (_val)); \
len += scnprintf(buf + len, size - len, \
"%20s : %10d\n", _s, (_val)); \
} while (0)
enum bt_op_flags {
@ -581,7 +581,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@ -626,12 +625,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
/* Main driver core */
/********************/
#define ATH9K_PCI_CUS198 0x0001
#define ATH9K_PCI_CUS230 0x0002
#define ATH9K_PCI_CUS217 0x0004
#define ATH9K_PCI_WOW 0x0008
#define ATH9K_PCI_BT_ANT_DIV 0x0010
#define ATH9K_PCI_D3_L1_WAR 0x0020
#define ATH9K_PCI_CUS198 0x0001
#define ATH9K_PCI_CUS230 0x0002
#define ATH9K_PCI_CUS217 0x0004
#define ATH9K_PCI_CUS252 0x0008
#define ATH9K_PCI_WOW 0x0010
#define ATH9K_PCI_BT_ANT_DIV 0x0020
#define ATH9K_PCI_D3_L1_WAR 0x0040
#define ATH9K_PCI_AR9565_1ANT 0x0080
#define ATH9K_PCI_AR9565_2ANT 0x0100
/*
* Default cache line size, in bytes.

View file

@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
ath9k_hw_check_nav(ah);
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);

View file

@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
(test_bit(NFCAL_INTF, &cal->cal_flags) ?
"not corrected (due to interference)" :
"correcting to MAX"));
@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* we bypass this limit here in order to better deal
* with our environment.
*/
if (!cal->nfcal_interference)
if (!test_bit(NFCAL_INTF, &cal->cal_flags))
h[i].privNF = limit->max;
}
}
@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
cal->nfcal_interference = false;
clear_bit(NFCAL_INTF, &cal->cal_flags);
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@ -220,7 +220,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
ah->caldata->nfcal_pending = true;
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
@ -391,7 +391,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
h = caldata->nfCalHist;
caldata->nfcal_pending = false;
clear_bit(NFCAL_PENDING, &caldata->cal_flags);
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
ah->noise = ath9k_hw_getchan_noise(ah, chan);
@ -437,12 +437,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
if (!caldata->nfcal_pending)
if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
caldata->nfcal_interference = true;
set_bit(NFCAL_INTF, &caldata->cal_flags);
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);

View file

@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
return -ENOMEM;
if (common->disable_ani) {
len += snprintf(buf + len, size - len, "%s: %s\n",
"ANI", "DISABLED");
len += scnprintf(buf + len, size - len, "%s: %s\n",
"ANI", "DISABLED");
goto exit;
}
len += snprintf(buf + len, size - len, "%15s: %s\n",
"ANI", "ENABLED");
len += snprintf(buf + len, size - len, "%15s: %u\n",
"ANI RESET", ah->stats.ast_ani_reset);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"SPUR UP", ah->stats.ast_ani_spurup);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"SPUR DOWN", ah->stats.ast_ani_spurup);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK ON", ah->stats.ast_ani_ccklow);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP UP", ah->stats.ast_ani_stepup);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
len += snprintf(buf + len, size - len, "%15s: %u\n",
"CCK ERRORS", ah->stats.ast_ani_cckerrs);
len += scnprintf(buf + len, size - len, "%15s: %s\n",
"ANI", "ENABLED");
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"ANI RESET", ah->stats.ast_ani_reset);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"SPUR UP", ah->stats.ast_ani_spurup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"SPUR DOWN", ah->stats.ast_ani_spurup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK ON", ah->stats.ast_ani_ccklow);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP UP", ah->stats.ast_ani_stepup);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
len += scnprintf(buf + len, size - len, "%15s: %u\n",
"CCK ERRORS", ah->stats.ast_ani_cckerrs);
exit:
if (len > size)
len = size;
@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
len += snprintf(buf + len, size - len, "%s\n",
"Antenna Diversity Combining is disabled");
len += scnprintf(buf + len, size - len, "%s\n",
"Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
lna_conf_str[div_ant_conf.main_lna_conf]);
len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
lna_conf_str[div_ant_conf.alt_lna_conf]);
len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
as_main->rssi_avg);
len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
as_alt->rssi_avg);
len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
lna_conf_str[div_ant_conf.main_lna_conf]);
len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
lna_conf_str[div_ant_conf.alt_lna_conf]);
len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
as_main->rssi_avg);
len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
as_alt->rssi_avg);
ath9k_ps_restore(sc);
len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
len += snprintf(buf + len, size - len, "-------------------\n");
len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
len += scnprintf(buf + len, size - len, "-------------------\n");
len += snprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"TOTAL COUNT",
as_main->recv_cnt,
as_alt->recv_cnt);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
len += scnprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"TOTAL COUNT",
as_main->recv_cnt,
as_alt->recv_cnt);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
len += snprintf(buf + len, size - len, "--------------------\n");
len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
len += scnprintf(buf + len, size - len, "--------------------\n");
len += snprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
len += scnprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
len += snprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
len += snprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
len += snprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
val[2] & (0x7 << (i * 3)) >> (i * 3),
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
val[2] & (0x7 << (i * 3)) >> (i * 3),
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
len += snprintf(buf + len, DMA_BUF_LEN - len,
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
#define PR_IS(a, s) \
do { \
len += snprintf(buf + len, mxlen - len, \
"%21s: %10u\n", a, \
sc->debug.stats.istats.s); \
len += scnprintf(buf + len, mxlen - len, \
"%21s: %10u\n", a, \
sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len,
"SYNC_CAUSE stats:\n");
len += scnprintf(buf + len, mxlen - len,
"SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
len += snprintf(buf + len, size - len, "%s: %d ",
"qnum", txq->axq_qnum);
len += snprintf(buf + len, size - len, "%s: %2d ",
"qdepth", txq->axq_depth);
len += snprintf(buf + len, size - len, "%s: %2d ",
"ampdu-depth", txq->axq_ampdu_depth);
len += snprintf(buf + len, size - len, "%s: %3d ",
"pending", txq->pending_frames);
len += snprintf(buf + len, size - len, "%s: %d\n",
"stopped", txq->stopped);
len += scnprintf(buf + len, size - len, "%s: %d ",
"qnum", txq->axq_qnum);
len += scnprintf(buf + len, size - len, "%s: %2d ",
"qdepth", txq->axq_depth);
len += scnprintf(buf + len, size - len, "%s: %2d ",
"ampdu-depth", txq->axq_ampdu_depth);
len += scnprintf(buf + len, size - len, "%s: %3d ",
"pending", txq->pending_frames);
len += scnprintf(buf + len, size - len, "%s: %d\n",
"stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
len += snprintf(buf + len, size - len, "(CAB): ");
len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int reg;
u32 rxfilter;
len += snprintf(buf + len, sizeof(buf) - len,
"BSSID: %pM\n", common->curbssid);
len += snprintf(buf + len, sizeof(buf) - len,
"BSSID-MASK: %pM\n", common->bssidmask);
len += snprintf(buf + len, sizeof(buf) - len,
"OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
len += scnprintf(buf + len, sizeof(buf) - len,
"BSSID: %pM\n", common->curbssid);
len += scnprintf(buf + len, sizeof(buf) - len,
"BSSID-MASK: %pM\n", common->bssidmask);
len += scnprintf(buf + len, sizeof(buf) - len,
"OPMODE: %s\n",
ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
len += snprintf(buf + len, sizeof(buf) - len,
"RXFILTER: 0x%x", rxfilter);
len += scnprintf(buf + len, sizeof(buf) - len,
"RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
len += snprintf(buf + len, sizeof(buf) - len, " PROM");
len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
len += snprintf(buf + len, sizeof(buf) - len, "\n");
len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
len += scnprintf(buf + len, sizeof(buf) - len,
"INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
len += snprintf(buf + len, sizeof(buf) - len, " CST");
len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
len += snprintf(buf + len, sizeof(buf) - len, " RX");
len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
len += snprintf(buf + len, sizeof(buf) - len, "\n");
len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
len += snprintf(buf + len, sizeof(buf) - len,
"VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
" ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.naps, iter_data.nstations, iter_data.nmeshes,
iter_data.nwds, iter_data.nadhocs,
sc->nvifs, sc->nbcnvifs);
len += scnprintf(buf + len, sizeof(buf) - len,
"VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
" ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.naps, iter_data.nstations, iter_data.nmeshes,
iter_data.nwds, iter_data.nadhocs,
sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Hang",
sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Watchdog",
sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Fatal HW Error",
sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX HW error",
sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX Path Hang",
sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MCI Reset",
sc->debug.stats.reset[RESET_TYPE_MCI]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Hang",
sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Watchdog",
sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Fatal HW Error",
sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX HW error",
sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX Path Hang",
sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MCI Reset",
sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
sc->debug.stats.rxstats.phy_err_stats[p]);
len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
sc->debug.stats.rxstats.phy_err_stats[p]);
#define RXS_ERR(s, e) \
do { \
len += snprintf(buf + len, size - len, \
"%22s : %10u\n", s, \
sc->debug.stats.rxstats.e); \
len += scnprintf(buf + len, size - len, \
"%22s : %10u\n", s, \
sc->debug.stats.rxstats.e);\
} while (0)
struct ath_softc *sc = file->private_data;
@ -1439,22 +1441,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
len += snprintf(buf + len, size - len,
"Channel Noise Floor : %d\n", ah->noise);
len += snprintf(buf + len, size - len,
"Chain | privNF | # Readings | NF Readings\n");
len += scnprintf(buf + len, size - len,
"Channel Noise Floor : %d\n", ah->noise);
len += scnprintf(buf + len, size - len,
"Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
i, h[i].privNF, nread);
len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
len += snprintf(buf + len, size - len,
" %d", h[i].nfCalBuffer[j]);
len += snprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" %d", h[i].nfCalBuffer[j]);
len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
@ -1543,8 +1545,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
len = snprintf(buf, size, "%s\n",
"BTCOEX is disabled");
len = scnprintf(buf, size, "%s\n",
"BTCOEX is disabled");
goto exit;
}
@ -1582,43 +1584,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
return -ENOMEM;
if (!an->sta->ht_cap.ht_supported) {
len = snprintf(buf, size, "%s\n",
"HT not supported");
len = scnprintf(buf, size, "%s\n",
"HT not supported");
goto exit;
}
len = snprintf(buf, size, "Max-AMPDU: %d\n",
an->maxampdu);
len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
an->mpdudensity);
len = scnprintf(buf, size, "Max-AMPDU: %d\n",
an->maxampdu);
len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
an->mpdudensity);
len += snprintf(buf + len, size - len,
"%2s%7s\n", "AC", "SCHED");
len += scnprintf(buf + len, size - len,
"%2s%7s\n", "AC", "SCHED");
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
txq = ac->txq;
ath_txq_lock(sc, txq);
len += snprintf(buf + len, size - len,
"%2d%7d\n",
acno, ac->sched);
len += scnprintf(buf + len, size - len,
"%2d%7d\n",
acno, ac->sched);
ath_txq_unlock(sc, txq);
}
len += snprintf(buf + len, size - len,
"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
"BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
len += scnprintf(buf + len, size - len,
"\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
"TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
"BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
txq = tid->ac->txq;
ath_txq_lock(sc, txq);
len += snprintf(buf + len, size - len,
"%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
tid->tidno, tid->seq_start, tid->seq_next,
tid->baw_size, tid->baw_head, tid->baw_tail,
tid->bar_index, tid->sched, tid->paused);
len += scnprintf(buf + len, size - len,
"%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
tid->tidno, tid->seq_start, tid->seq_next,
tid->baw_size, tid->baw_head, tid->baw_tail,
tid->bar_index, tid->sched, tid->paused);
ath_txq_unlock(sc, txq);
}
exit:

View file

@ -193,12 +193,12 @@ struct ath_tx_stats {
#define TXSTATS sc->debug.stats.txstats
#define PR(str, elem) \
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
len += scnprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
} while(0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)

View file

@ -25,11 +25,11 @@
struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
#define ATH9K_DFS_STAT(s, p) \
len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
sc->debug.stats.dfs_stats.p);
len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
sc->debug.stats.dfs_stats.p);
#define ATH9K_DFS_POOL_STAT(s, p) \
len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
global_dfs_pool_stats.p);
len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
global_dfs_pool_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@ -44,12 +44,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
len += snprintf(buf + len, size - len, "DFS support for "
"macVersion = 0x%x, macRev = 0x%x: %s\n",
hw_ver->macVersion, hw_ver->macRev,
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
len += scnprintf(buf + len, size - len, "DFS support for "
"macVersion = 0x%x, macRev = 0x%x: %s\n",
hw_ver->macVersion, hw_ver->macRev,
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
ATH9K_DFS_STAT("pulse events reported ", pulses_total);
ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
@ -59,11 +59,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
len += snprintf(buf + len, size - len, "Radar detector statistics "
"(current DFS region: %d)\n", sc->dfs_detector->region);
len += scnprintf(buf + len, size - len, "Radar detector statistics "
"(current DFS region: %d)\n",
sc->dfs_detector->region);
ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
ATH9K_DFS_STAT("Radars detected ", radar_detected);
len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);

View file

@ -392,7 +392,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
pri_detector_reset(de, ts);
return false;
return NULL;
}
ps = pseq_handler_check_detection(de);

View file

@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len = ath9k_dump_4k_modal_eeprom(buf, len, size,
&eep->modalHeader);
&eep->modalHeader);
goto out;
}
@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("TX Gain type", pBase->txGainType);
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
out:
if (len > size)

View file

@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Power Table Offset", pBase->pwrTableOffset);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
out:
if (len > size)

View file

@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
struct base_eep_header *pBase = &eep->baseEepHeader;
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len += scnprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
out:
if (len > size)

View file

@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
len += snprintf(buf + len, size - len, "BT Weights: ");
len += scnprintf(buf + len, size - len, "BT Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
len += snprintf(buf + len, size - len, "%08x ",
btcoex_hw->bt_weight[i]);
len += snprintf(buf + len, size - len, "\n");
len += snprintf(buf + len, size - len, "WLAN Weights: ");
len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->bt_weight[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "WLAN Weights: ");
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
len += snprintf(buf + len, size - len, "%08x ",
btcoex_hw->wlan_weight[i]);
len += snprintf(buf + len, size - len, "\n");
len += snprintf(buf + len, size - len, "Tx Priorities: ");
len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->wlan_weight[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "Tx Priorities: ");
for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
len += snprintf(buf + len, size - len, "%08x ",
len += scnprintf(buf + len, size - len, "%08x ",
btcoex_hw->tx_prio[i]);
len += snprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\n");
return len;
}

View file

@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RX",
be32_to_cpu(cmd_rsp.rx));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RX",
be32_to_cpu(cmd_rsp.rx));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RXORN",
be32_to_cpu(cmd_rsp.rxorn));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RXORN",
be32_to_cpu(cmd_rsp.rxorn));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RXEOL",
be32_to_cpu(cmd_rsp.rxeol));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "RXEOL",
be32_to_cpu(cmd_rsp.rxeol));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TXURN",
be32_to_cpu(cmd_rsp.txurn));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TXURN",
be32_to_cpu(cmd_rsp.txurn));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TXTO",
be32_to_cpu(cmd_rsp.txto));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TXTO",
be32_to_cpu(cmd_rsp.txto));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "CST",
be32_to_cpu(cmd_rsp.cst));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "CST",
be32_to_cpu(cmd_rsp.cst));
if (len > sizeof(buf))
len = sizeof(buf);
@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Xretries",
be32_to_cpu(cmd_rsp.xretries));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Xretries",
be32_to_cpu(cmd_rsp.xretries));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "FifoErr",
be32_to_cpu(cmd_rsp.fifoerr));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "FifoErr",
be32_to_cpu(cmd_rsp.fifoerr));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Filtered",
be32_to_cpu(cmd_rsp.filtered));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Filtered",
be32_to_cpu(cmd_rsp.filtered));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TimerExp",
be32_to_cpu(cmd_rsp.timer_exp));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "TimerExp",
be32_to_cpu(cmd_rsp.timer_exp));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "ShortRetries",
be32_to_cpu(cmd_rsp.shortretries));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "ShortRetries",
be32_to_cpu(cmd_rsp.shortretries));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "LongRetries",
be32_to_cpu(cmd_rsp.longretries));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "LongRetries",
be32_to_cpu(cmd_rsp.longretries));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "QueueNull",
be32_to_cpu(cmd_rsp.qnull));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "QueueNull",
be32_to_cpu(cmd_rsp.qnull));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "EncapFail",
be32_to_cpu(cmd_rsp.encap_fail));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "EncapFail",
be32_to_cpu(cmd_rsp.encap_fail));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "NoBuf",
be32_to_cpu(cmd_rsp.nobuf));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "NoBuf",
be32_to_cpu(cmd_rsp.nobuf));
if (len > sizeof(buf))
len = sizeof(buf);
@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
ath9k_htc_ps_restore(priv);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "NoBuf",
be32_to_cpu(cmd_rsp.nobuf));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "NoBuf",
be32_to_cpu(cmd_rsp.nobuf));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "HostSend",
be32_to_cpu(cmd_rsp.host_send));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "HostSend",
be32_to_cpu(cmd_rsp.host_send));
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "HostDone",
be32_to_cpu(cmd_rsp.host_done));
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "HostDone",
be32_to_cpu(cmd_rsp.host_done));
if (len > sizeof(buf))
len = sizeof(buf);
@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Buffers queued",
priv->debug.tx_stats.buf_queued);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Buffers completed",
priv->debug.tx_stats.buf_completed);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs queued",
priv->debug.tx_stats.skb_queued);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs success",
priv->debug.tx_stats.skb_success);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs failed",
priv->debug.tx_stats.skb_failed);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "CAB queued",
priv->debug.tx_stats.cab_queued);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Buffers queued",
priv->debug.tx_stats.buf_queued);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "Buffers completed",
priv->debug.tx_stats.buf_completed);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs queued",
priv->debug.tx_stats.skb_queued);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs success",
priv->debug.tx_stats.skb_success);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "SKBs failed",
priv->debug.tx_stats.skb_failed);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "CAB queued",
priv->debug.tx_stats.cab_queued);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BE queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BK queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VI queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VO queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BE queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BK queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VI queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VO queued",
priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
#define PHY_ERR(s, p) \
len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
priv->debug.rx_stats.err_phy_stats[p]);
len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
priv->debug.rx_stats.err_phy_stats[p]);
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs allocated",
priv->debug.rx_stats.skb_allocated);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs completed",
priv->debug.rx_stats.skb_completed);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs Dropped",
priv->debug.rx_stats.skb_dropped);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs allocated",
priv->debug.rx_stats.skb_allocated);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs completed",
priv->debug.rx_stats.skb_completed);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs Dropped",
priv->debug.rx_stats.skb_dropped);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "CRC ERR",
priv->debug.rx_stats.err_crc);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "DECRYPT CRC ERR",
priv->debug.rx_stats.err_decrypt_crc);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "MIC ERR",
priv->debug.rx_stats.err_mic);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "PRE-DELIM CRC ERR",
priv->debug.rx_stats.err_pre_delim);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "POST-DELIM CRC ERR",
priv->debug.rx_stats.err_post_delim);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "DECRYPT BUSY ERR",
priv->debug.rx_stats.err_decrypt_busy);
len += snprintf(buf + len, size - len,
"%20s : %10u\n", "TOTAL PHY ERR",
priv->debug.rx_stats.err_phy);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "CRC ERR",
priv->debug.rx_stats.err_crc);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "DECRYPT CRC ERR",
priv->debug.rx_stats.err_decrypt_crc);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "MIC ERR",
priv->debug.rx_stats.err_mic);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "PRE-DELIM CRC ERR",
priv->debug.rx_stats.err_pre_delim);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "POST-DELIM CRC ERR",
priv->debug.rx_stats.err_post_delim);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "DECRYPT BUSY ERR",
priv->debug.rx_stats.err_decrypt_busy);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "TOTAL PHY ERR",
priv->debug.rx_stats.err_phy);
PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
spin_lock_bh(&priv->tx.tx_lock);
len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
priv->tx.tx_slot, MAX_TX_BUF_NUM);
len += snprintf(buf + len, sizeof(buf) - len, "\n");
len += scnprintf(buf + len, sizeof(buf) - len, "\n");
len += snprintf(buf + len, sizeof(buf) - len,
"Used slots : %d\n",
bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
len += scnprintf(buf + len, sizeof(buf) - len,
"Used slots : %d\n",
bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
spin_unlock_bh(&priv->tx.tx_lock);
@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Failed queue", skb_queue_len(&priv->tx.tx_failed));
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Failed queue", skb_queue_len(&priv->tx.tx_failed));
spin_lock_bh(&priv->tx.tx_lock);
len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Queued count", priv->tx.queued_cnt);
len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
"Queued count", priv->tx.queued_cnt);
spin_unlock_bh(&priv->tx.tx_lock);
if (len > sizeof(buf))
@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "Major Version",
pBase->version >> 12);
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "Minor Version",
pBase->version & 0xFFF);
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "Checksum",
pBase->checksum);
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "Length",
pBase->length);
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "RegDomain1",
pBase->regDmn[0]);
len += snprintf(buf + len, size - len,
"%20s : %10d\n", "RegDomain2",
pBase->regDmn[1]);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"TX Mask", pBase->txMask);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"RX Mask", pBase->rxMask);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Allow 5GHz",
!!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Allow 2GHz",
!!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 2GHz HT20",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 2GHz HT40",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 5Ghz HT20",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 5Ghz HT40",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Big Endian",
!!(pBase->eepMisc & 0x01));
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Major Ver",
(pBase->binBuildNumber >> 24) & 0xFF);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Minor Ver",
(pBase->binBuildNumber >> 16) & 0xFF);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Build",
(pBase->binBuildNumber >> 8) & 0xFF);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "Major Version",
pBase->version >> 12);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "Minor Version",
pBase->version & 0xFFF);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "Checksum",
pBase->checksum);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "Length",
pBase->length);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "RegDomain1",
pBase->regDmn[0]);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n", "RegDomain2",
pBase->regDmn[1]);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"TX Mask", pBase->txMask);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"RX Mask", pBase->rxMask);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Allow 5GHz",
!!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Allow 2GHz",
!!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 2GHz HT20",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 2GHz HT40",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 5Ghz HT20",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Disable 5Ghz HT40",
!!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Big Endian",
!!(pBase->eepMisc & 0x01));
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Major Ver",
(pBase->binBuildNumber >> 24) & 0xFF);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Minor Ver",
(pBase->binBuildNumber >> 16) & 0xFF);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"Cal Bin Build",
(pBase->binBuildNumber >> 8) & 0xFF);
/*
* UB91 specific data.
@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_header_4k *pBase4k =
&priv->ah->eeprom.map4k.baseEepHeader;
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"TX Gain type",
pBase4k->txGainType);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"TX Gain type",
pBase4k->txGainType);
}
/*
@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
struct base_eep_ar9287_header *pBase9287 =
&priv->ah->eeprom.map9287.baseEepHeader;
len += snprintf(buf + len, size - len,
"%20s : %10ddB\n",
"Power Table Offset",
pBase9287->pwrTableOffset);
len += scnprintf(buf + len, size - len,
"%20s : %10ddB\n",
"Power Table Offset",
pBase9287->pwrTableOffset);
len += snprintf(buf + len, size - len,
"%20s : %10d\n",
"OpenLoop Power Ctrl",
pBase9287->openLoopPwrCntl);
len += scnprintf(buf + len, size - len,
"%20s : %10d\n",
"OpenLoop Power Ctrl",
pBase9287->openLoopPwrCntl);
}
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
pBase->macAddr);
if (len > size)
len = size;
@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
len += snprintf(buf + len, size - len, "%20s : %10d\n", \
_s, (_val)); \
len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
_s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;
@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
do { \
if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
pModal = &priv->ah->eeprom.def.modalHeader[1]; \
len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
_s, (_val), "|"); \
len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
_s, (_val), "|"); \
} \
if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
pModal = &priv->ah->eeprom.def.modalHeader[0]; \
len += snprintf(buf + len, size - len, "%9d\n", \
len += scnprintf(buf + len, size - len, "%9d\n",\
(_val)); \
} \
} while (0)
@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
if (buf == NULL)
return -ENOMEM;
len += snprintf(buf + len, size - len,
"%31s %15s\n", "2G", "5G");
len += snprintf(buf + len, size - len,
"%32s %16s\n", "====", "====\n");
len += scnprintf(buf + len, size - len,
"%31s %15s\n", "2G", "5G");
len += scnprintf(buf + len, size - len,
"%32s %16s\n", "====", "====\n");
PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
{
#define PR_EEP(_s, _val) \
do { \
len += snprintf(buf + len, size - len, "%20s : %10d\n", \
_s, (_val)); \
len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
_s, (_val)); \
} while (0)
struct ath9k_htc_priv *priv = file->private_data;

View file

@ -549,6 +549,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ath9k_hw_ani_init(ah);
/*
* EEPROM needs to be initialized before we do this.
* This is required for regulatory compliance.
*/
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
if ((regdmn & 0xF0) == CTL_FCC) {
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
}
}
return 0;
}
@ -1644,6 +1656,19 @@ hang_check_iter:
return true;
}
void ath9k_hw_check_nav(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 val;
val = REG_READ(ah, AR_NAV);
if (val != 0xdeadbeef && val > 0x7fff) {
ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
REG_WRITE(ah, AR_NAV, 0);
}
}
EXPORT_SYMBOL(ath9k_hw_check_nav);
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
@ -1822,9 +1847,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* re-using are present.
*/
if (AR_SREV_9462(ah) && (ah->caldata &&
(!ah->caldata->done_txiqcal_once ||
!ah->caldata->done_txclcal_once ||
!ah->caldata->rtt_done)))
(!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
!test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
!test_bit(RTT_DONE, &ah->caldata->cal_flags))))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@ -1880,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
} else if (caldata) {
caldata->paprd_packet_sent = false;
clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
}
ah->noise = ath9k_hw_getchan_noise(ah, chan);
@ -2017,8 +2042,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_bb(ah, chan);
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
@ -3240,19 +3265,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
used = snprintf(hw_name, len,
"Atheros AR%s Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev);
used = scnprintf(hw_name, len,
"Atheros AR%s Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev);
}
else {
used = snprintf(hw_name, len,
"Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev,
ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
AR_RADIO_SREV_MAJOR)),
ah->hw_version.phyRev);
used = scnprintf(hw_name, len,
"Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev,
ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
& AR_RADIO_SREV_MAJOR)),
ah->hw_version.phyRev);
}
hw_name[used] = '\0';

View file

@ -98,8 +98,8 @@
#define PR_EEP(_s, _val) \
do { \
len += snprintf(buf + len, size - len, "%20s : %10d\n", \
_s, (_val)); \
len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
_s, (_val)); \
} while (0)
#define SM(_v, _f) (((_v) << _f##_S) & _f)
@ -404,20 +404,26 @@ enum ath9k_int {
#define MAX_CL_TAB_ENTRY 16
#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
enum ath9k_cal_flags {
RTT_DONE,
PAPRD_PACKET_SENT,
PAPRD_DONE,
NFCAL_PENDING,
NFCAL_INTF,
TXIQCAL_DONE,
TXCLCAL_DONE,
SW_PKDET_DONE,
};
struct ath9k_hw_cal_data {
u16 channel;
u32 channelFlags;
u32 chanmode;
unsigned long cal_flags;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
bool rtt_done;
bool paprd_packet_sent;
bool paprd_done;
bool nfcal_pending;
bool nfcal_interference;
bool done_txiqcal_once;
bool done_txclcal_once;
u8 caldac[2];
u16 small_signal_gain[AR9300_MAX_CHAINS];
u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
u32 num_measures[AR9300_MAX_CHAINS];
@ -558,6 +564,7 @@ struct ath_hw_antcomb_conf {
u8 main_gaintb;
u8 alt_gaintb;
int lna1_lna2_delta;
int lna1_lna2_switch_delta;
u8 div_group;
};
@ -1030,6 +1037,7 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
void ath9k_hw_check_nav(struct ath_hw *ah);
bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);

View file

@ -547,6 +547,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
if (sc->driver_data & ATH9K_PCI_CUS252)
ath_info(common, "CUS252 card detected\n");
if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
ath_info(common, "WB335 1-ANT card detected\n");
if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
ath_info(common, "WB335 2-ANT card detected\n");
/*
* Some WB335 cards do not support antenna diversity. Since
* we use a hardcoded value for AR9565 instead of using the
* EEPROM/OTP data, remove the combining feature from
* the HW capabilities bitmap.
*/
if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
}
if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");

View file

@ -184,7 +184,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
if (!caldata || !caldata->paprd_done) {
if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
}
@ -256,7 +256,9 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
if (!caldata ||
!test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
test_bit(PAPRD_DONE, &caldata->cal_flags)) {
ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
}
@ -316,7 +318,7 @@ void ath_paprd_calibrate(struct work_struct *work)
kfree_skb(skb);
if (chain_ok) {
caldata->paprd_done = true;
set_bit(PAPRD_DONE, &caldata->cal_flags);
ath_paprd_activate(sc);
}
@ -343,7 +345,7 @@ void ath_ani_calibrate(unsigned long data)
u32 cal_interval, short_cal_interval, long_cal_interval;
unsigned long flags;
if (ah->caldata && ah->caldata->nfcal_interference)
if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
long_cal_interval = ATH_LONG_CALINTERVAL_INT;
else
long_cal_interval = ATH_LONG_CALINTERVAL;
@ -432,7 +434,7 @@ set_timer:
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
if (!ah->caldata->paprd_done) {
if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
} else if (!ah->paprd_table_write_done) {
ath9k_ps_wakeup(sc);

View file

@ -362,6 +362,13 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type);
/*
* Increment the ref. counter here so that
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
goto out;
}
@ -400,10 +407,9 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status);
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
out:
spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}

View file

@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
/* CUS252 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x3028),
.driver_data = ATH9K_PCI_CUS252 |
ATH9K_PCI_AR9565_2ANT |
ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_AZWAVE,
0x2176),
.driver_data = ATH9K_PCI_CUS252 |
ATH9K_PCI_AR9565_2ANT |
ATH9K_PCI_BT_ANT_DIV },
/* WB335 1-ANT */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_FOXCONN,
0xE068),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x185F, /* WNC */
0xA119),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0632),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x6671),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x1B9A, /* XAVI */
0x2811),
.driver_data = ATH9K_PCI_AR9565_1ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x1B9A, /* XAVI */
0x2812),
.driver_data = ATH9K_PCI_AR9565_1ANT },
/* WB335 1-ANT / Antenna Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x3025),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x3026),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x302B),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_FOXCONN,
0xE069),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x185F, /* WNC */
0x3028),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0622),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0672),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0662),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_AZWAVE,
0x213A),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_LENOVO,
0x3026),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_HP,
0x18E3),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_HP,
0x217F),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_DELL,
0x020E),
.driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
/* WB335 2-ANT */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411A),
.driver_data = ATH9K_PCI_AR9565_2ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411B),
.driver_data = ATH9K_PCI_AR9565_2ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411C),
.driver_data = ATH9K_PCI_AR9565_2ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411D),
.driver_data = ATH9K_PCI_AR9565_2ANT },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_SAMSUNG,
0x411E),
.driver_data = ATH9K_PCI_AR9565_2ANT },
/* WB335 2-ANT / Antenna-Diversity */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_ATHEROS,
0x302C),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0642),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0652),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x11AD, /* LITEON */
0x0612),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
PCI_VENDOR_ID_AZWAVE,
0x2130),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x144F, /* ASKEY */
0x7202),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x1B9A, /* XAVI */
0x2810),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0036,
0x185F, /* WNC */
0x3027),
.driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E AR9565 (WB335) */
{ PCI_VDEVICE(ATHEROS, 0x0036),
.driver_data = ATH9K_PCI_BT_ANT_DIV },
{ 0 }
};

View file

@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
int used_mcs = 0, used_htmode = 0;
if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
used_mcs = snprintf(mcs, 5, "%d",
rc->rate_table->info[i].ratecode);
used_mcs = scnprintf(mcs, 5, "%d",
rc->rate_table->info[i].ratecode);
if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
used_htmode = snprintf(htmode, 5, "HT40");
used_htmode = scnprintf(htmode, 5, "HT40");
else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
used_htmode = snprintf(htmode, 5, "HT20");
used_htmode = scnprintf(htmode, 5, "HT20");
else
used_htmode = snprintf(htmode, 5, "????");
used_htmode = scnprintf(htmode, 5, "????");
}
mcs[used_mcs] = '\0';
htmode[used_htmode] = '\0';
len += snprintf(buf + len, max - len,
"%6s %6s %3u.%d: "
"%10u %10u %10u %10u\n",
htmode,
mcs,
ratekbps / 1000,
(ratekbps % 1000) / 100,
stats->success,
stats->retries,
stats->xretries,
stats->per);
len += scnprintf(buf + len, max - len,
"%6s %6s %3u.%d: "
"%10u %10u %10u %10u\n",
htmode,
mcs,
ratekbps / 1000,
(ratekbps % 1000) / 100,
stats->success,
stats->retries,
stats->xretries,
stats->per);
}
if (len > max)

View file

@ -39,7 +39,7 @@ struct wmi_fw_version {
struct wmi_event_swba {
__be64 tsf;
u8 beacon_pending;
};
} __packed;
/*
* 64 - HTC header - WMI header - 1 / txstatus

View file

@ -2326,7 +2326,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (sc->sc_ah->caldata)
sc->sc_ah->caldata->paprd_packet_sent = true;
set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */

View file

@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_iounmap(pdev, wil->csr);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {

View file

@ -34,6 +34,7 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
#include "sdio_chip.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
@ -41,13 +42,6 @@
#define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_43143 43143
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@ -58,7 +52,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
SDIO_DEVICE_ID_BROADCOM_4335_4339)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@ -466,7 +461,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
brcmfmac_sdio_pdata = pdev->dev.platform_data;
brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
if (brcmfmac_sdio_pdata->power_on)
brcmfmac_sdio_pdata->power_on();

View file

@ -97,8 +97,6 @@
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_NULL 0xf
#define BRCMF_EVENTING_MASK_LEN 16
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002

View file

@ -136,7 +136,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
int brcmf_attach(uint bus_hdrlen, struct device *dev);

View file

@ -509,9 +509,8 @@ netif_rx:
}
}
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
{
struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
skb_queue_len(skb_list));
brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
/* process and remove protocol-specific header */
ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
ifp = drvr->iflist[ifidx];
/* process and remove protocol-specific header */
ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) {
if ((ret != -ENODATA) && ifp)
ifp->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
continue;
}
rd = (struct brcmf_skb_reorder_data *)skb->cb;
if (rd->reorder)
brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else
brcmf_netif_rx(ifp, skb);
if (ret || !ifp || !ifp->ndev) {
if ((ret != -ENODATA) && ifp)
ifp->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
return;
}
rd = (struct brcmf_skb_reorder_data *)skb->cb;
if (rd->reorder)
brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else
brcmf_netif_rx(ifp, skb);
}
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,

View file

@ -275,11 +275,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
#define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
#define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
@ -454,9 +449,6 @@ struct brcmf_sdio {
struct work_struct datawork;
atomic_t dpc_tskcnt;
const struct firmware *firmware;
u32 fw_ptr;
bool txoff; /* Transmit flow-controlled */
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
u32 revmsk;
const char *bin;
const char *nv;
};
enum brcmf_firmware_type {
BRCMF_FIRMWARE_BIN,
BRCMF_FIRMWARE_NVRAM
};
#define BRCMF_FIRMWARE_NVRAM(name) \
name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
{ BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
{ BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
};
static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
enum brcmf_firmware_type type)
{
const struct firmware *fw;
const char *name;
int err, i;
for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
switch (type) {
case BRCMF_FIRMWARE_BIN:
name = brcmf_fwname_data[i].bin;
break;
case BRCMF_FIRMWARE_NVRAM:
name = brcmf_fwname_data[i].nv;
break;
default:
brcmf_err("invalid firmware type (%d)\n", type);
return NULL;
}
goto found;
}
}
brcmf_err("Unknown chipid %d [%d]\n",
bus->ci->chip, bus->ci->chiprev);
return NULL;
found:
err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
if ((err) || (!fw)) {
brcmf_err("fail to request firmware %s (%d)\n", name, err);
return NULL;
}
return fw;
}
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@ -1406,13 +1492,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
skb_unlink(pfirst, &bus->glom);
brcmf_rx_frame(bus->sdiodev->dev, pfirst);
bus->sdcnt.rxglompkts++;
}
/* sent any remaining packets up */
if (bus->glom.qlen)
brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@ -1557,7 +1642,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
@ -1759,9 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
}
skb_queue_head_init(&pktlist);
skb_queue_tail(&pktlist, pkt);
brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@ -1786,10 +1868,15 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
/**
* struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
* bus layer usage.
*/
/* flag marking a dummy skb added for DMA alignment requirement */
#define DUMMY_SKB_FLAG 0x10000
#define ALIGN_SKB_FLAG 0x8000
/* bit mask of data length chopped from the previous packet */
#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
@ -1854,7 +1941,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
memcpy(pkt_new->data,
pkt_next->data + pkt_next->len - tail_chop,
tail_chop);
*(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
*(u32 *)(pkt_new->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt_next, pkt_next->len - tail_chop);
__skb_queue_after(pktq, pkt_next, pkt_new);
} else {
@ -1908,8 +1995,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
if (dummy_flags & DUMMY_SKB_FLAG) {
chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
if (dummy_flags & ALIGN_SKB_FLAG) {
chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
@ -3037,69 +3124,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
return true;
}
static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
bus->fw_ptr += len;
return len;
}
static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
const struct firmware *fw;
int err;
int offset;
uint len;
u8 *memblock = NULL, *memptr;
int ret;
u8 idx;
int address;
int len;
brcmf_dbg(INFO, "Enter\n");
fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL)
return -ENOENT;
ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_err("Fail to request firmware %d\n", ret);
return ret;
}
bus->fw_ptr = 0;
if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
BRCMF_MAX_CORENUM)
memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
if (memblock == NULL) {
ret = -ENOMEM;
goto err;
}
if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
memptr += (BRCMF_SDALIGN -
((u32)(unsigned long)memblock % BRCMF_SDALIGN));
offset = bus->ci->rambase;
/* Download image */
len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
if (BRCMF_MAX_CORENUM != idx)
memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
while (len) {
ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
if (ret) {
err = 0;
offset = 0;
address = bus->ci->rambase;
while (offset < fw->size) {
len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
fw->size - offset;
err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
(u8 *)&fw->data[offset], len);
if (err) {
brcmf_err("error %d on writing %d membytes at 0x%08x\n",
ret, MEMBLOCK, offset);
goto err;
err, len, address);
goto failure;
}
offset += MEMBLOCK;
len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
offset += len;
address += len;
}
err:
kfree(memblock);
failure:
release_firmware(fw);
release_firmware(bus->firmware);
bus->fw_ptr = 0;
return ret;
return err;
}
/*
@ -3111,7 +3172,8 @@ err:
* by two NULs.
*/
static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
const struct firmware *nv)
{
char *varbuf;
char *dp;
@ -3120,12 +3182,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
int ret = 0;
uint buf_len, n, len;
len = bus->firmware->size;
len = nv->size;
varbuf = vmalloc(len);
if (!varbuf)
return -ENOMEM;
memcpy(varbuf, bus->firmware->data, len);
memcpy(varbuf, nv->data, len);
dp = varbuf;
findNewline = false;
@ -3177,18 +3239,16 @@ err:
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
const struct firmware *nv;
int ret;
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_err("Fail to request nvram %d\n", ret);
return ret;
}
nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
if (nv == NULL)
return -ENOENT;
ret = brcmf_process_nvram_vars(bus);
ret = brcmf_process_nvram_vars(bus, nv);
release_firmware(bus->firmware);
release_firmware(nv);
return ret;
}

View file

@ -102,7 +102,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
};
#undef BRCMF_ENUM_DEF
#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02

View file

@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
* @bus_flags: 2 bytes reserved for bus specific parameters
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
u16 bus_flags;
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;

View file

@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/bcma/bcma.h>
@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return false;
regdata = brcmf_sdio_regrl(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return false;
regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
NULL);
@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
/* if core is already in reset, just return */
regdata = brcmf_sdio_regrl(sdiodev,
@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
/*
* Must do the disable sequence first to work for
@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
if (idx == BRCMF_MAX_CORENUM)
return;
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
ci->chiprev >= 2)
ci->chip = BCM4339_CHIP_ID;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
case BCM4339_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x2e084411;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18005000;
ci->c_inf[1].wrapbase = 0x18105000;
ci->c_inf[1].cib = 0x15004211;
ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
ci->c_inf[2].base = 0x18002000;
ci->c_inf[2].wrapbase = 0x18102000;
ci->c_inf[2].cib = 0x04084411;
ci->ramsize = 0xc0000;
ci->rambase = 0x180000;
break;
default:
brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;

View file

@ -54,6 +54,14 @@
#define BRCMF_MAX_CORENUM 6
/* SDIO device ID */
#define SDIO_DEVICE_ID_BROADCOM_43143 43143
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
struct chip_core_info {
u16 id;
u16 rev;

View file

@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
skb_queue_head_init(&skbq);
skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
brcmf_rx_frames(devinfo->dev, &skbq);
brcmf_rx_frame(devinfo->dev, skb);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);

View file

@ -41,5 +41,6 @@
#define BCM4331_CHIP_ID 0x4331
#define BCM4334_CHIP_ID 0x4334
#define BCM4335_CHIP_ID 0x4335
#define BCM4339_CHIP_ID 0x4339
#endif /* _BRCM_HW_IDS_H_ */

View file

@ -363,7 +363,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
static int cw1200_spi_probe(struct spi_device *func)
{
const struct cw1200_platform_data_spi *plat_data =
func->dev.platform_data;
dev_get_platdata(&func->dev);
struct hwbus_priv *self;
int status;
@ -441,7 +441,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
}
kfree(self);
}
cw1200_spi_off(func->dev.platform_data);
cw1200_spi_off(dev_get_platdata(&func->dev));
return 0;
}

View file

@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
out_free_libipw:
free_libipw(priv->net_dev, 0);
out:
@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
iounmap(priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
/* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);

View file

@ -3811,7 +3811,6 @@ out_iounmap:
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(il->hw);
@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
il_free_channel_map(il);
il_free_geos(il);

View file

@ -6706,7 +6706,6 @@ out_free_eeprom:
out_iounmap:
iounmap(il->hw_base);
out_pci_release_regions:
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
iounmap(il->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
il4965_uninit_drv(il);

View file

@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
err = pci_enable_device(pdev);
if (err)
goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
err = pci_enable_device(pdev);
if (err)
goto out_no_pci;
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));

View file

@ -1128,7 +1128,7 @@ static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);

View file

@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_err("Card or adapter structure is not valid\n");
return 0;
@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
goto exit;
}
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
card = pci_get_drvdata(pdev);
if (!card || !card->adapter) {
pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
card ? card->adapter : NULL);

View file

@ -6093,7 +6093,6 @@ err_iounmap:
if (priv->sram != NULL)
pci_iounmap(pdev, priv->sram);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
err_free_reg:
@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
pci_release_regions(pdev);
pci_disable_device(pdev);

View file

@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);

View file

@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);

View file

@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);

View file

@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->bridge_io);

View file

@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
pci_set_drvdata(pdev, NULL);
p54_free_common(dev);
err_free_reg:

View file

@ -52,6 +52,7 @@
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5592 2.4G/5G 2T2R
* RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@ -70,6 +71,7 @@
#define RF3322 0x000c
#define RF3053 0x000d
#define RF5592 0x000f
#define RF3070 0x3070
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370

View file

@ -3152,6 +3152,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
case RF3070:
case RF5360:
case RF5370:
case RF5372:
@ -3166,7 +3167,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
if (rt2x00_rf(rt2x00dev, RF3290) ||
if (rt2x00_rf(rt2x00dev, RF3070) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
@ -3315,29 +3317,37 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
if (rt2x00_rt(rt2x00dev, RT3593)) {
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
/* Band selection. GPIO #8 controls all paths */
/* Band selection */
if (rt2x00_is_usb(rt2x00dev) ||
rt2x00_is_pcie(rt2x00dev)) {
/* GPIO #8 controls all paths */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
if (rf->channel <= 14)
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
else
rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
}
/* LNA PE control. */
if (rt2x00_is_usb(rt2x00dev)) {
/* GPIO #4 controls PE0 and PE1,
* GPIO #7 controls PE2
*/
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
/* LNA PE control.
* GPIO #4 controls PE0 and PE1,
* GPIO #7 controls PE2
*/
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
} else if (rt2x00_is_pcie(rt2x00dev)) {
/* GPIO #4 controls PE0, PE1 and PE2 */
rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
}
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
/* AGC init */
if (rf->channel <= 14)
reg = 0x1c + 2 * rt2x00dev->lna_gain;
@ -4264,6 +4274,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
case RF3053:
case RF3070:
case RF3290:
case RF5360:
case RF5370:
@ -5985,7 +5996,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@ -6653,17 +6664,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
u16 word;
/*
* Initialize all registers.
* Initialize MAC registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev)))
return -EIO;
/*
* Wait BBP/RF to wake up.
*/
if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
return -EIO;
/*
* Send signal to firmware during boot time.
* Send signal during boot time to initialize firmware.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@ -6672,9 +6686,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
/*
* Make sure BBP is up and running.
*/
if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
/*
* Initialize BBP/RF registers.
*/
rt2800_init_bbp(rt2x00dev);
rt2800_init_rfcsr(rt2x00dev);
@ -7021,6 +7041,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3052:
case RF3053:
case RF3070:
case RF3290:
case RF3320:
case RF3322:
@ -7543,6 +7564,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3070) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF3322) ||
@ -7671,6 +7693,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3320:
case RF3052:
case RF3053:
case RF3070:
case RF3290:
case RF5360:
case RF5370:

View file

@ -1176,6 +1176,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x002f) },
{ USB_DEVICE(0x1737, 0x0079) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0170) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
@ -1199,6 +1201,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x1103) },
/* Cameo */
{ USB_DEVICE(0x148f, 0xf301) },
/* D-Link */
{ USB_DEVICE(0x2001, 0x3c1f) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7733) },
/* Hawking */
@ -1212,6 +1216,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x016b) },
/* NETGEAR */
{ USB_DEVICE(0x0846, 0x9012) },
{ USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
{ USB_DEVICE(0x2019, 0xed19) },
@ -1220,6 +1225,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0067) },
{ USB_DEVICE(0x0df6, 0x006a) },
{ USB_DEVICE(0x0df6, 0x006e) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3421) },
#endif
@ -1236,6 +1242,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x2001, 0x3c1c) },
{ USB_DEVICE(0x2001, 0x3c1d) },
{ USB_DEVICE(0x2001, 0x3c1e) },
{ USB_DEVICE(0x2001, 0x3c20) },
{ USB_DEVICE(0x2001, 0x3c22) },
{ USB_DEVICE(0x2001, 0x3c23) },
/* LG innotek */
{ USB_DEVICE(0x043e, 0x7a22) },
{ USB_DEVICE(0x043e, 0x7a42) },
@ -1258,12 +1267,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x043e, 0x7a32) },
/* AVM GmbH */
{ USB_DEVICE(0x057c, 0x8501) },
/* D-Link DWA-160-B2 */
/* Buffalo */
{ USB_DEVICE(0x0411, 0x0241) },
/* D-Link */
{ USB_DEVICE(0x2001, 0x3c1a) },
{ USB_DEVICE(0x2001, 0x3c21) },
/* Proware */
{ USB_DEVICE(0x043e, 0x7a13) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x5572) },
/* TRENDnet */
{ USB_DEVICE(0x20f4, 0x724a) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
@ -1333,6 +1347,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24) },
{ USB_DEVICE(0x2019, 0xab29) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259) },
/* RadioShack */

View file

@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
iounmap(priv->map);
err_free_dev:
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:

View file

@ -1613,6 +1613,35 @@ err_free:
}
EXPORT_SYMBOL(rtl_send_smps_action);
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
enum io_type iotype;
if (!is_hal_stop(rtlhal)) {
switch (operation) {
case SCAN_OPT_BACKUP:
iotype = IO_CMD_PAUSE_DM_BY_SCAN;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_IO_CMD,
(u8 *)&iotype);
break;
case SCAN_OPT_RESTORE:
iotype = IO_CMD_RESUME_DM_BY_SCAN;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_IO_CMD,
(u8 *)&iotype);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Unknown Scan Backup operation.\n");
break;
}
}
}
EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
/* There seem to be issues in mac80211 regarding when del ba frames can be
* received. As a work around, we make a fake del_ba if we receive a ba_req;
* however, rx_agg was opened to let mac80211 release some ba related

View file

@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
void rtl_watch_dog_timer_callback(unsigned long data);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
u8 *sa, u8 *bssid, u16 tid);
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif

Some files were not shown because too many files have changed in this diff Show more