From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/net/wireless/ti/wlcore/Kconfig | 35 + drivers/net/wireless/ti/wlcore/Makefile | 12 + drivers/net/wireless/ti/wlcore/acx.c | 1853 +++++++ drivers/net/wireless/ti/wlcore/acx.h | 1136 +++++ drivers/net/wireless/ti/wlcore/boot.c | 532 ++ drivers/net/wireless/ti/wlcore/boot.h | 55 + drivers/net/wireless/ti/wlcore/cmd.c | 2061 ++++++++ drivers/net/wireless/ti/wlcore/cmd.h | 712 +++ drivers/net/wireless/ti/wlcore/conf.h | 1392 ++++++ drivers/net/wireless/ti/wlcore/debug.h | 112 + drivers/net/wireless/ti/wlcore/debugfs.c | 1339 +++++ drivers/net/wireless/ti/wlcore/debugfs.h | 120 + drivers/net/wireless/ti/wlcore/event.c | 311 ++ drivers/net/wireless/ti/wlcore/event.h | 87 + drivers/net/wireless/ti/wlcore/hw_ops.h | 332 ++ drivers/net/wireless/ti/wlcore/ini.h | 232 + drivers/net/wireless/ti/wlcore/init.c | 760 +++ drivers/net/wireless/ti/wlcore/init.h | 39 + drivers/net/wireless/ti/wlcore/io.c | 200 + drivers/net/wireless/ti/wlcore/io.h | 238 + drivers/net/wireless/ti/wlcore/main.c | 6556 +++++++++++++++++++++++++ drivers/net/wireless/ti/wlcore/ps.c | 332 ++ drivers/net/wireless/ti/wlcore/ps.h | 41 + drivers/net/wireless/ti/wlcore/rx.c | 342 ++ drivers/net/wireless/ti/wlcore/rx.h | 152 + drivers/net/wireless/ti/wlcore/scan.c | 488 ++ drivers/net/wireless/ti/wlcore/scan.h | 172 + drivers/net/wireless/ti/wlcore/sdio.c | 458 ++ drivers/net/wireless/ti/wlcore/spi.c | 422 ++ drivers/net/wireless/ti/wlcore/sysfs.c | 216 + drivers/net/wireless/ti/wlcore/sysfs.h | 28 + drivers/net/wireless/ti/wlcore/testmode.c | 397 ++ drivers/net/wireless/ti/wlcore/testmode.h | 32 + drivers/net/wireless/ti/wlcore/tx.c | 1329 +++++ drivers/net/wireless/ti/wlcore/tx.h | 287 ++ drivers/net/wireless/ti/wlcore/vendor_cmd.c | 197 + drivers/net/wireless/ti/wlcore/vendor_cmd.h | 45 + drivers/net/wireless/ti/wlcore/wl12xx_80211.h | 137 + drivers/net/wireless/ti/wlcore/wlcore.h | 649 +++ drivers/net/wireless/ti/wlcore/wlcore_i.h | 558 +++ 40 files changed, 24396 insertions(+) create mode 100644 drivers/net/wireless/ti/wlcore/Kconfig create mode 100644 drivers/net/wireless/ti/wlcore/Makefile create mode 100644 drivers/net/wireless/ti/wlcore/acx.c create mode 100644 drivers/net/wireless/ti/wlcore/acx.h create mode 100644 drivers/net/wireless/ti/wlcore/boot.c create mode 100644 drivers/net/wireless/ti/wlcore/boot.h create mode 100644 drivers/net/wireless/ti/wlcore/cmd.c create mode 100644 drivers/net/wireless/ti/wlcore/cmd.h create mode 100644 drivers/net/wireless/ti/wlcore/conf.h create mode 100644 drivers/net/wireless/ti/wlcore/debug.h create mode 100644 drivers/net/wireless/ti/wlcore/debugfs.c create mode 100644 drivers/net/wireless/ti/wlcore/debugfs.h create mode 100644 drivers/net/wireless/ti/wlcore/event.c create mode 100644 drivers/net/wireless/ti/wlcore/event.h create mode 100644 drivers/net/wireless/ti/wlcore/hw_ops.h create mode 100644 drivers/net/wireless/ti/wlcore/ini.h create mode 100644 drivers/net/wireless/ti/wlcore/init.c create mode 100644 drivers/net/wireless/ti/wlcore/init.h create mode 100644 drivers/net/wireless/ti/wlcore/io.c create mode 100644 drivers/net/wireless/ti/wlcore/io.h create mode 100644 drivers/net/wireless/ti/wlcore/main.c create mode 100644 drivers/net/wireless/ti/wlcore/ps.c create mode 100644 drivers/net/wireless/ti/wlcore/ps.h create mode 100644 drivers/net/wireless/ti/wlcore/rx.c create mode 100644 drivers/net/wireless/ti/wlcore/rx.h create mode 100644 drivers/net/wireless/ti/wlcore/scan.c create mode 100644 drivers/net/wireless/ti/wlcore/scan.h create mode 100644 drivers/net/wireless/ti/wlcore/sdio.c create mode 100644 drivers/net/wireless/ti/wlcore/spi.c create mode 100644 drivers/net/wireless/ti/wlcore/sysfs.c create mode 100644 drivers/net/wireless/ti/wlcore/sysfs.h create mode 100644 drivers/net/wireless/ti/wlcore/testmode.c create mode 100644 drivers/net/wireless/ti/wlcore/testmode.h create mode 100644 drivers/net/wireless/ti/wlcore/tx.c create mode 100644 drivers/net/wireless/ti/wlcore/tx.h create mode 100644 drivers/net/wireless/ti/wlcore/vendor_cmd.c create mode 100644 drivers/net/wireless/ti/wlcore/vendor_cmd.h create mode 100644 drivers/net/wireless/ti/wlcore/wl12xx_80211.h create mode 100644 drivers/net/wireless/ti/wlcore/wlcore.h create mode 100644 drivers/net/wireless/ti/wlcore/wlcore_i.h (limited to 'drivers/net/wireless/ti/wlcore') diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig new file mode 100644 index 000000000..7c099542b --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -0,0 +1,35 @@ +config WLCORE + tristate "TI wlcore support" + depends on WL_TI && MAC80211 + select FW_LOADER + ---help--- + This module contains the main code for TI WLAN chips. It abstracts + hardware-specific differences among different chipset families. + Each chipset family needs to implement its own lower-level module + that will depend on this module for the common code. + + If you choose to build a module, it will be called wlcore. Say N if + unsure. + +config WLCORE_SPI + tristate "TI wlcore SPI support" + depends on WLCORE && SPI_MASTER + select CRC7 + ---help--- + This module adds support for the SPI interface of adapters using + TI WLAN chipsets. Select this if your platform is using + the SPI bus. + + If you choose to build a module, it'll be called wlcore_spi. + Say N if unsure. + +config WLCORE_SDIO + tristate "TI wlcore SDIO support" + depends on WLCORE && MMC + ---help--- + This module adds support for the SDIO interface of adapters using + TI WLAN chipsets. Select this if your platform is using + the SDIO bus. + + If you choose to build a module, it'll be called wlcore_sdio. + Say N if unsure. diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile new file mode 100644 index 000000000..0a69c1373 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/Makefile @@ -0,0 +1,12 @@ +wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ + boot.o init.o debugfs.o scan.o sysfs.o vendor_cmd.o + +wlcore_spi-objs = spi.o +wlcore_sdio-objs = sdio.o + +wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o +obj-$(CONFIG_WLCORE) += wlcore.o +obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o +obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c new file mode 100644 index 000000000..f28fa3b50 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -0,0 +1,1853 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "acx.h" + +#include +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "wl12xx_80211.h" +#include "ps.h" +#include "hw_ops.h" + +int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 wake_up_event, u8 listen_interval) +{ + struct acx_wake_up_condition *wake_up; + int ret; + + wl1271_debug(DEBUG_ACX, "acx wake up conditions (wake_up_event %d listen_interval %d)", + wake_up_event, listen_interval); + + wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); + if (!wake_up) { + ret = -ENOMEM; + goto out; + } + + wake_up->role_id = wlvif->role_id; + wake_up->wake_up_event = wake_up_event; + wake_up->listen_interval = listen_interval; + + ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, + wake_up, sizeof(*wake_up)); + if (ret < 0) { + wl1271_warning("could not set wake up conditions: %d", ret); + goto out; + } + +out: + kfree(wake_up); + return ret; +} + +int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) +{ + struct acx_sleep_auth *auth; + int ret; + + wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth); + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) { + ret = -ENOMEM; + goto out; + } + + auth->sleep_auth = sleep_auth; + + ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); + if (ret < 0) { + wl1271_error("could not configure sleep_auth to %d: %d", + sleep_auth, ret); + goto out; + } + + wl->sleep_auth = sleep_auth; +out: + kfree(auth); + return ret; +} +EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth); + +int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, + int power) +{ + struct acx_current_tx_power *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr %d", power); + + if (power < 0 || power > 25) + return -EINVAL; + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->current_tx_power = power * 10; + + ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("configure of tx power failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct acx_feature_config *feature; + int ret; + + wl1271_debug(DEBUG_ACX, "acx feature cfg"); + + feature = kzalloc(sizeof(*feature), GFP_KERNEL); + if (!feature) { + ret = -ENOMEM; + goto out; + } + + /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ + feature->role_id = wlvif->role_id; + feature->data_flow_options = 0; + feature->options = 0; + + ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG, + feature, sizeof(*feature)); + if (ret < 0) { + wl1271_error("Couldnt set HW encryption"); + goto out; + } + +out: + kfree(feature); + return ret; +} + +int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, + size_t len) +{ + int ret; + + wl1271_debug(DEBUG_ACX, "acx mem map"); + + ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, + sizeof(struct acx_header), len); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl) +{ + struct acx_rx_msdu_lifetime *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx rx msdu life time"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time); + ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set rx msdu life time: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_slot_type slot_time) +{ + struct acx_slot *slot; + int ret; + + wl1271_debug(DEBUG_ACX, "acx slot"); + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + ret = -ENOMEM; + goto out; + } + + slot->role_id = wlvif->role_id; + slot->wone_index = STATION_WONE_INDEX; + slot->slot_time = slot_time; + + ret = wl1271_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); + if (ret < 0) { + wl1271_warning("failed to set slot time: %d", ret); + goto out; + } + +out: + kfree(slot); + return ret; +} + +int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, void *mc_list, u32 mc_list_len) +{ + struct acx_dot11_grp_addr_tbl *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx group address tbl"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* MAC filtering */ + acx->role_id = wlvif->role_id; + acx->enabled = enable; + acx->num_groups = mc_list_len; + memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); + + ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set group addr table: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_service_period_timeout(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct acx_rx_timeout *rx_timeout; + int ret; + + rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); + if (!rx_timeout) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_ACX, "acx service period timeout"); + + rx_timeout->role_id = wlvif->role_id; + rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); + rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); + + ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, + rx_timeout, sizeof(*rx_timeout)); + if (ret < 0) { + wl1271_warning("failed to set service period timeout: %d", + ret); + goto out; + } + +out: + kfree(rx_timeout); + return ret; +} + +int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u32 rts_threshold) +{ + struct acx_rts_threshold *rts; + int ret; + + /* + * If the RTS threshold is not configured or out of range, use the + * default value. + */ + if (rts_threshold > IEEE80211_MAX_RTS_THRESHOLD) + rts_threshold = wl->conf.rx.rts_threshold; + + wl1271_debug(DEBUG_ACX, "acx rts threshold: %d", rts_threshold); + + rts = kzalloc(sizeof(*rts), GFP_KERNEL); + if (!rts) { + ret = -ENOMEM; + goto out; + } + + rts->role_id = wlvif->role_id; + rts->threshold = cpu_to_le16((u16)rts_threshold); + + ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); + if (ret < 0) { + wl1271_warning("failed to set rts threshold: %d", ret); + goto out; + } + +out: + kfree(rts); + return ret; +} + +int wl1271_acx_dco_itrim_params(struct wl1271 *wl) +{ + struct acx_dco_itrim_params *dco; + struct conf_itrim_settings *c = &wl->conf.itrim; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dco itrim parameters"); + + dco = kzalloc(sizeof(*dco), GFP_KERNEL); + if (!dco) { + ret = -ENOMEM; + goto out; + } + + dco->enable = c->enable; + dco->timeout = cpu_to_le32(c->timeout); + + ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS, + dco, sizeof(*dco)); + if (ret < 0) { + wl1271_warning("failed to set dco itrim parameters: %d", ret); + goto out; + } + +out: + kfree(dco); + return ret; +} + +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable_filter) +{ + struct acx_beacon_filter_option *beacon_filter = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d", + enable_filter); + + if (enable_filter && + wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED) + goto out; + + beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); + if (!beacon_filter) { + ret = -ENOMEM; + goto out; + } + + beacon_filter->role_id = wlvif->role_id; + beacon_filter->enable = enable_filter; + + /* + * When set to zero, and the filter is enabled, beacons + * without the unicast TIM bit set are dropped. + */ + beacon_filter->max_num_beacons = 0; + + ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, + beacon_filter, sizeof(*beacon_filter)); + if (ret < 0) { + wl1271_warning("failed to set beacon filter opt: %d", ret); + goto out; + } + +out: + kfree(beacon_filter); + return ret; +} + +int wl1271_acx_beacon_filter_table(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct acx_beacon_filter_ie_table *ie_table; + int i, idx = 0; + int ret; + bool vendor_spec = false; + + wl1271_debug(DEBUG_ACX, "acx beacon filter table"); + + ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); + if (!ie_table) { + ret = -ENOMEM; + goto out; + } + + /* configure default beacon pass-through rules */ + ie_table->role_id = wlvif->role_id; + ie_table->num_ie = 0; + for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { + struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); + ie_table->table[idx++] = r->ie; + ie_table->table[idx++] = r->rule; + + if (r->ie == WLAN_EID_VENDOR_SPECIFIC) { + /* only one vendor specific ie allowed */ + if (vendor_spec) + continue; + + /* for vendor specific rules configure the + additional fields */ + memcpy(&(ie_table->table[idx]), r->oui, + CONF_BCN_IE_OUI_LEN); + idx += CONF_BCN_IE_OUI_LEN; + ie_table->table[idx++] = r->type; + memcpy(&(ie_table->table[idx]), r->version, + CONF_BCN_IE_VER_LEN); + idx += CONF_BCN_IE_VER_LEN; + vendor_spec = true; + } + + ie_table->num_ie++; + } + + ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, + ie_table, sizeof(*ie_table)); + if (ret < 0) { + wl1271_warning("failed to set beacon filter table: %d", ret); + goto out; + } + +out: + kfree(ie_table); + return ret; +} + +#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff + +int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) +{ + struct acx_conn_monit_params *acx; + u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE; + u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE; + int ret; + + wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s", + enable ? "enabled" : "disabled"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + if (enable) { + threshold = wl->conf.conn.synch_fail_thold; + timeout = wl->conf.conn.bss_lose_timeout; + } + + acx->role_id = wlvif->role_id; + acx->synch_fail_thold = cpu_to_le32(threshold); + acx->bss_lose_timeout = cpu_to_le32(timeout); + + ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set connection monitor " + "parameters: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + + +int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable) +{ + struct acx_bt_wlan_coex *pta; + int ret; + + wl1271_debug(DEBUG_ACX, "acx sg enable"); + + pta = kzalloc(sizeof(*pta), GFP_KERNEL); + if (!pta) { + ret = -ENOMEM; + goto out; + } + + if (enable) + pta->enable = wl->conf.sg.state; + else + pta->enable = CONF_SG_DISABLE; + + ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); + if (ret < 0) { + wl1271_warning("failed to set softgemini enable: %d", ret); + goto out; + } + +out: + kfree(pta); + return ret; +} + +int wl12xx_acx_sg_cfg(struct wl1271 *wl) +{ + struct acx_bt_wlan_coex_param *param; + struct conf_sg_settings *c = &wl->conf.sg; + int i, ret; + + wl1271_debug(DEBUG_ACX, "acx sg cfg"); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto out; + } + + /* BT-WLAN coext parameters */ + for (i = 0; i < CONF_SG_PARAMS_MAX; i++) + param->params[i] = cpu_to_le32(c->params[i]); + param->param_idx = CONF_SG_PARAMS_ALL; + + ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); + if (ret < 0) { + wl1271_warning("failed to set sg config: %d", ret); + goto out; + } + +out: + kfree(param); + return ret; +} + +int wl1271_acx_cca_threshold(struct wl1271 *wl) +{ + struct acx_energy_detection *detection; + int ret; + + wl1271_debug(DEBUG_ACX, "acx cca threshold"); + + detection = kzalloc(sizeof(*detection), GFP_KERNEL); + if (!detection) { + ret = -ENOMEM; + goto out; + } + + detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold); + detection->tx_energy_detection = wl->conf.tx.tx_energy_detection; + + ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, + detection, sizeof(*detection)); + if (ret < 0) + wl1271_warning("failed to set cca threshold: %d", ret); + +out: + kfree(detection); + return ret; +} + +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct acx_beacon_broadcast *bb; + int ret; + + wl1271_debug(DEBUG_ACX, "acx bcn dtim options"); + + bb = kzalloc(sizeof(*bb), GFP_KERNEL); + if (!bb) { + ret = -ENOMEM; + goto out; + } + + bb->role_id = wlvif->role_id; + bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); + bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); + bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; + bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold; + + ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); + if (ret < 0) { + wl1271_warning("failed to set rx config: %d", ret); + goto out; + } + +out: + kfree(bb); + return ret; +} + +int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid) +{ + struct acx_aid *acx_aid; + int ret; + + wl1271_debug(DEBUG_ACX, "acx aid"); + + acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); + if (!acx_aid) { + ret = -ENOMEM; + goto out; + } + + acx_aid->role_id = wlvif->role_id; + acx_aid->aid = cpu_to_le16(aid); + + ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); + if (ret < 0) { + wl1271_warning("failed to set aid: %d", ret); + goto out; + } + +out: + kfree(acx_aid); + return ret; +} + +int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask) +{ + struct acx_event_mask *mask; + int ret; + + wl1271_debug(DEBUG_ACX, "acx event mbox mask"); + + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + ret = -ENOMEM; + goto out; + } + + /* high event mask is unused */ + mask->high_event_mask = cpu_to_le32(0xffffffff); + mask->event_mask = cpu_to_le32(event_mask); + + ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, + mask, sizeof(*mask)); + if (ret < 0) { + wl1271_warning("failed to set acx_event_mbox_mask: %d", ret); + goto out; + } + +out: + kfree(mask); + return ret; +} + +int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_preamble_type preamble) +{ + struct acx_preamble *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx_set_preamble"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->preamble = preamble; + + ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of preamble failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_ctsprotect_type ctsprotect) +{ + struct acx_ctsprotect *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx_set_ctsprotect"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->ctsprotect = ctsprotect; + + ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of ctsprotect failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_statistics(struct wl1271 *wl, void *stats) +{ + int ret; + + wl1271_debug(DEBUG_ACX, "acx statistics"); + + ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, + sizeof(struct acx_header), + wl->stats.fw_stats_len); + if (ret < 0) { + wl1271_warning("acx statistics failed: %d", ret); + return -ENOMEM; + } + + return 0; +} + +int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct acx_rate_policy *acx; + struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx rate policies"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", + wlvif->basic_rate, wlvif->rate_set); + + /* configure one basic rate class */ + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx); + acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate); + acx->rate_policy.short_retry_limit = c->short_retry_limit; + acx->rate_policy.long_retry_limit = c->long_retry_limit; + acx->rate_policy.aflags = c->aflags; + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of rate policies failed: %d", ret); + goto out; + } + + /* configure one AP supported rate class */ + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); + + /* the AP policy is HW specific */ + acx->rate_policy.enabled_rates = + cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif)); + acx->rate_policy.short_retry_limit = c->short_retry_limit; + acx->rate_policy.long_retry_limit = c->long_retry_limit; + acx->rate_policy.aflags = c->aflags; + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of rate policies failed: %d", ret); + goto out; + } + + /* + * configure one rate class for basic p2p operations. + * (p2p packets should always go out with OFDM rates, even + * if we are currently connected to 11b AP) + */ + acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx); + acx->rate_policy.enabled_rates = + cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P); + acx->rate_policy.short_retry_limit = c->short_retry_limit; + acx->rate_policy.long_retry_limit = c->long_retry_limit; + acx->rate_policy.aflags = c->aflags; + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of rate policies failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, + u8 idx) +{ + struct acx_rate_policy *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x", + idx, c->enabled_rates); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates); + acx->rate_policy.short_retry_limit = c->short_retry_limit; + acx->rate_policy.long_retry_limit = c->long_retry_limit; + acx->rate_policy.aflags = c->aflags; + + acx->rate_policy_idx = cpu_to_le32(idx); + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of ap rate policy failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop) +{ + struct acx_ac_cfg *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d " + "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->ac = ac; + acx->cw_min = cw_min; + acx->cw_max = cpu_to_le16(cw_max); + acx->aifsn = aifsn; + acx->tx_op_limit = cpu_to_le16(txop); + + ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ac cfg failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue_id, u8 channel_type, + u8 tsid, u8 ps_scheme, u8 ack_policy, + u32 apsd_conf0, u32 apsd_conf1) +{ + struct acx_tid_config *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx tid config"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->queue_id = queue_id; + acx->channel_type = channel_type; + acx->tsid = tsid; + acx->ps_scheme = ps_scheme; + acx->ack_policy = ack_policy; + acx->apsd_conf[0] = cpu_to_le32(apsd_conf0); + acx->apsd_conf[1] = cpu_to_le32(apsd_conf1); + + ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of tid config failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold) +{ + struct acx_frag_threshold *acx; + int ret = 0; + + /* + * If the fragmentation is not configured or out of range, use the + * default value. + */ + if (frag_threshold > IEEE80211_MAX_FRAG_THRESHOLD) + frag_threshold = wl->conf.tx.frag_threshold; + + wl1271_debug(DEBUG_ACX, "acx frag threshold: %d", frag_threshold); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->frag_threshold = cpu_to_le16((u16)frag_threshold); + ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of frag threshold failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_tx_config_options(struct wl1271 *wl) +{ + struct acx_tx_config_options *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx tx config options"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout); + acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold); + ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of tx options failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl12xx_acx_mem_cfg(struct wl1271 *wl) +{ + struct wl12xx_acx_config_memory *mem_conf; + struct conf_memory_settings *mem; + int ret; + + wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); + + mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); + if (!mem_conf) { + ret = -ENOMEM; + goto out; + } + + mem = &wl->conf.mem; + + /* memory config */ + mem_conf->num_stations = mem->num_stations; + mem_conf->rx_mem_block_num = mem->rx_block_num; + mem_conf->tx_min_mem_block_num = mem->tx_min_block_num; + mem_conf->num_ssid_profiles = mem->ssid_profiles; + mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc); + mem_conf->dyn_mem_enable = mem->dynamic_memory; + mem_conf->tx_free_req = mem->min_req_tx_blocks; + mem_conf->rx_free_req = mem->min_req_rx_blocks; + mem_conf->tx_min = mem->tx_min; + mem_conf->fwlog_blocks = wl->conf.fwlog.mem_blocks; + + ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, + sizeof(*mem_conf)); + if (ret < 0) { + wl1271_warning("wl1271 mem config failed: %d", ret); + goto out; + } + +out: + kfree(mem_conf); + return ret; +} +EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg); + +int wl1271_acx_init_mem_config(struct wl1271 *wl) +{ + int ret; + + wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), + GFP_KERNEL); + if (!wl->target_mem_map) { + wl1271_error("couldn't allocate target memory map"); + return -ENOMEM; + } + + /* we now ask for the firmware built memory map */ + ret = wl1271_acx_mem_map(wl, (void *)wl->target_mem_map, + sizeof(struct wl1271_acx_mem_map)); + if (ret < 0) { + wl1271_error("couldn't retrieve firmware memory map"); + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + return ret; + } + + /* initialize TX block book keeping */ + wl->tx_blocks_available = + le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks); + wl1271_debug(DEBUG_TX, "available tx blocks: %d", + wl->tx_blocks_available); + + return 0; +} +EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config); + +int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) +{ + struct wl1271_acx_rx_config_opt *rx_conf; + int ret; + + wl1271_debug(DEBUG_ACX, "wl1271 rx interrupt config"); + + rx_conf = kzalloc(sizeof(*rx_conf), GFP_KERNEL); + if (!rx_conf) { + ret = -ENOMEM; + goto out; + } + + rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold); + rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout); + rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold); + rx_conf->queue_type = wl->conf.rx.queue_type; + + ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, + sizeof(*rx_conf)); + if (ret < 0) { + wl1271_warning("wl1271 rx config opt failed: %d", ret); + goto out; + } + +out: + kfree(rx_conf); + return ret; +} + +int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) +{ + struct wl1271_acx_bet_enable *acx = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx bet enable"); + + if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE) + goto out; + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; + acx->max_consecutive = wl->conf.conn.bet_max_consecutive; + + ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx bet enable failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 enable, __be32 address) +{ + struct wl1271_acx_arp_filter *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->version = ACX_IPV4_VERSION; + acx->enable = enable; + + if (enable) + memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE); + + ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set arp ip filter: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_pm_config(struct wl1271 *wl) +{ + struct wl1271_acx_pm_config *acx = NULL; + struct conf_pm_config_settings *c = &wl->conf.pm_config; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx pm config"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time); + acx->host_fast_wakeup_support = c->host_fast_wakeup_support; + + ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx pm config failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} +EXPORT_SYMBOL_GPL(wl1271_acx_pm_config); + +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) +{ + struct wl1271_acx_keep_alive_mode *acx = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->enabled = enable; + + ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx keep alive mode failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 index, u8 tpl_valid) +{ + struct wl1271_acx_keep_alive_config *acx = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx keep alive config"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); + acx->index = index; + acx->tpl_validation = tpl_valid; + acx->trigger = ACX_KEEP_ALIVE_NO_TX; + + ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx keep alive config failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, s16 thold, u8 hyst) +{ + struct wl1271_acx_rssi_snr_trigger *acx = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx rssi snr trigger"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + wlvif->last_rssi_event = -1; + + acx->role_id = wlvif->role_id; + acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); + acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; + acx->type = WL1271_ACX_TRIG_TYPE_EDGE; + if (enable) + acx->enable = WL1271_ACX_TRIG_ENABLE; + else + acx->enable = WL1271_ACX_TRIG_DISABLE; + + acx->index = WL1271_ACX_TRIG_IDX_RSSI; + acx->dir = WL1271_ACX_TRIG_DIR_BIDIR; + acx->threshold = cpu_to_le16(thold); + acx->hysteresis = hyst; + + ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx rssi snr trigger setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct wl1271_acx_rssi_snr_avg_weights *acx = NULL; + struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->rssi_beacon = c->avg_weight_rssi_beacon; + acx->rssi_data = c->avg_weight_rssi_data; + acx->snr_beacon = c->avg_weight_snr_beacon; + acx->snr_data = c->avg_weight_snr_data; + + ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx rssi snr trigger weights failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, u8 hlid) +{ + struct wl1271_acx_ht_capabilities *acx; + int ret = 0; + u32 ht_capabilites = 0; + + wl1271_debug(DEBUG_ACX, "acx ht capabilities setting " + "sta supp: %d sta cap: %d", ht_cap->ht_supported, + ht_cap->cap); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + if (allow_ht_operation && ht_cap->ht_supported) { + /* no need to translate capabilities - use the spec values */ + ht_capabilites = ht_cap->cap; + + /* + * this bit is not employed by the spec but only by FW to + * indicate peer HT support + */ + ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION; + + /* get data from A-MPDU parameters field */ + acx->ampdu_max_length = ht_cap->ampdu_factor; + acx->ampdu_min_spacing = ht_cap->ampdu_density; + } + + acx->hlid = hlid; + acx->ht_capabilites = cpu_to_le32(ht_capabilites); + + ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ht capabilities setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} +EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities); + + +int wl1271_acx_set_ht_information(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u16 ht_operation_mode) +{ + struct wl1271_acx_ht_information *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ht information setting"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + acx->ht_protection = + (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); + acx->rifs_mode = 0; + acx->gf_protection = + !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + acx->ht_tx_burst_limit = 0; + acx->dual_cts_protection = 0; + + ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx)); + + if (ret < 0) { + wl1271_warning("acx ht information setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +/* Configure BA session initiator/receiver parameters setting in the FW. */ +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct wl1271_acx_ba_initiator_policy *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx ba initiator policy"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* set for the current role */ + acx->role_id = wlvif->role_id; + acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap; + acx->win_size = wl->conf.ht.tx_ba_win_size; + acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; + + ret = wl1271_cmd_configure(wl, + ACX_BA_SESSION_INIT_POLICY, + acx, + sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ba initiator policy failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +/* setup BA session receiver setting in the FW. */ +int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, + u16 ssn, bool enable, u8 peer_hlid) +{ + struct wl1271_acx_ba_receiver_setup *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx ba receiver session setting"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->hlid = peer_hlid; + acx->tid = tid_index; + acx->enable = enable; + acx->win_size = wl->conf.ht.rx_ba_win_size; + acx->ssn = ssn; + + ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx, + sizeof(*acx), + BIT(CMD_STATUS_NO_RX_BA_SESSION)); + if (ret < 0) { + wl1271_warning("acx ba receiver session failed: %d", ret); + goto out; + } + + /* sometimes we can't start the session */ + if (ret == CMD_STATUS_NO_RX_BA_SESSION) { + wl1271_warning("no fw rx ba on tid %d", tid_index); + ret = -EBUSY; + goto out; + } + + ret = 0; +out: + kfree(acx); + return ret; +} + +int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u64 *mactime) +{ + struct wl12xx_acx_fw_tsf_information *tsf_info; + int ret; + + tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL); + if (!tsf_info) { + ret = -ENOMEM; + goto out; + } + + tsf_info->role_id = wlvif->role_id; + + ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info, + sizeof(struct acx_header), sizeof(*tsf_info)); + if (ret < 0) { + wl1271_warning("acx tsf info interrogate failed"); + goto out; + } + + *mactime = le32_to_cpu(tsf_info->current_tsf_low) | + ((u64) le32_to_cpu(tsf_info->current_tsf_high) << 32); + +out: + kfree(tsf_info); + return ret; +} + +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) +{ + struct wl1271_acx_ps_rx_streaming *rx_streaming; + u32 conf_queues, enable_queues; + int i, ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ps rx streaming"); + + rx_streaming = kzalloc(sizeof(*rx_streaming), GFP_KERNEL); + if (!rx_streaming) { + ret = -ENOMEM; + goto out; + } + + conf_queues = wl->conf.rx_streaming.queues; + if (enable) + enable_queues = conf_queues; + else + enable_queues = 0; + + for (i = 0; i < 8; i++) { + /* + * Skip non-changed queues, to avoid redundant acxs. + * this check assumes conf.rx_streaming.queues can't + * be changed while rx_streaming is enabled. + */ + if (!(conf_queues & BIT(i))) + continue; + + rx_streaming->role_id = wlvif->role_id; + rx_streaming->tid = i; + rx_streaming->enable = enable_queues & BIT(i); + rx_streaming->period = wl->conf.rx_streaming.interval; + rx_streaming->timeout = wl->conf.rx_streaming.interval; + + ret = wl1271_cmd_configure(wl, ACX_PS_RX_STREAMING, + rx_streaming, + sizeof(*rx_streaming)); + if (ret < 0) { + wl1271_warning("acx ps rx streaming failed: %d", ret); + goto out; + } + } +out: + kfree(rx_streaming); + return ret; +} + +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl1271_acx_ap_max_tx_retry *acx = NULL; + int ret; + + wl1271_debug(DEBUG_ACX, "acx ap max tx retry"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + acx->role_id = wlvif->role_id; + acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); + + ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ap max tx retry failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl1271_acx_config_ps *config_ps; + int ret; + + wl1271_debug(DEBUG_ACX, "acx config ps"); + + config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL); + if (!config_ps) { + ret = -ENOMEM; + goto out; + } + + config_ps->exit_retries = wl->conf.conn.psm_exit_retries; + config_ps->enter_retries = wl->conf.conn.psm_entry_retries; + config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate); + + ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, + sizeof(*config_ps)); + + if (ret < 0) { + wl1271_warning("acx config ps failed: %d", ret); + goto out; + } + +out: + kfree(config_ps); + return ret; +} + +int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 *addr) +{ + struct wl1271_acx_inconnection_sta *acx = NULL; + int ret; + + wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + memcpy(acx->addr, addr, ETH_ALEN); + acx->role_id = wlvif->role_id; + + ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx set inconnaction sta failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_fm_coex(struct wl1271 *wl) +{ + struct wl1271_acx_fm_coex *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx fm coex setting"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->enable = wl->conf.fm_coex.enable; + acx->swallow_period = wl->conf.fm_coex.swallow_period; + acx->n_divider_fref_set_1 = wl->conf.fm_coex.n_divider_fref_set_1; + acx->n_divider_fref_set_2 = wl->conf.fm_coex.n_divider_fref_set_2; + acx->m_divider_fref_set_1 = + cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_1); + acx->m_divider_fref_set_2 = + cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_2); + acx->coex_pll_stabilization_time = + cpu_to_le32(wl->conf.fm_coex.coex_pll_stabilization_time); + acx->ldo_stabilization_time = + cpu_to_le16(wl->conf.fm_coex.ldo_stabilization_time); + acx->fm_disturbed_band_margin = + wl->conf.fm_coex.fm_disturbed_band_margin; + acx->swallow_clk_diff = wl->conf.fm_coex.swallow_clk_diff; + + ret = wl1271_cmd_configure(wl, ACX_FM_COEX_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx fm coex setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl) +{ + struct wl12xx_acx_set_rate_mgmt_params *acx = NULL; + struct conf_rate_policy_settings *conf = &wl->conf.rate; + int ret; + + wl1271_debug(DEBUG_ACX, "acx set rate mgmt params"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + acx->index = ACX_RATE_MGMT_ALL_PARAMS; + acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score); + acx->per_add = cpu_to_le16(conf->per_add); + acx->per_th1 = cpu_to_le16(conf->per_th1); + acx->per_th2 = cpu_to_le16(conf->per_th2); + acx->max_per = cpu_to_le16(conf->max_per); + acx->inverse_curiosity_factor = conf->inverse_curiosity_factor; + acx->tx_fail_low_th = conf->tx_fail_low_th; + acx->tx_fail_high_th = conf->tx_fail_high_th; + acx->per_alpha_shift = conf->per_alpha_shift; + acx->per_add_shift = conf->per_add_shift; + acx->per_beta1_shift = conf->per_beta1_shift; + acx->per_beta2_shift = conf->per_beta2_shift; + acx->rate_check_up = conf->rate_check_up; + acx->rate_check_down = conf->rate_check_down; + memcpy(acx->rate_retry_policy, conf->rate_retry_policy, + sizeof(acx->rate_retry_policy)); + + ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx set rate mgmt params failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl12xx_acx_config_hangover(struct wl1271 *wl) +{ + struct wl12xx_acx_config_hangover *acx; + struct conf_hangover_settings *conf = &wl->conf.hangover; + int ret; + + wl1271_debug(DEBUG_ACX, "acx config hangover"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->recover_time = cpu_to_le32(conf->recover_time); + acx->hangover_period = conf->hangover_period; + acx->dynamic_mode = conf->dynamic_mode; + acx->early_termination_mode = conf->early_termination_mode; + acx->max_period = conf->max_period; + acx->min_period = conf->min_period; + acx->increase_delta = conf->increase_delta; + acx->decrease_delta = conf->decrease_delta; + acx->quiet_time = conf->quiet_time; + acx->increase_time = conf->increase_time; + acx->window_size = conf->window_size; + + ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx, + sizeof(*acx)); + + if (ret < 0) { + wl1271_warning("acx config hangover failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; + +} + +int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif, + s8 *avg_rssi) +{ + struct acx_roaming_stats *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx roaming statistics"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->role_id = wlvif->role_id; + ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL, + acx, sizeof(*acx), sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx roaming statistics failed: %d", ret); + ret = -ENOMEM; + goto out; + } + + *avg_rssi = acx->rssi_beacon; +out: + kfree(acx); + return ret; +} + +#ifdef CONFIG_PM +/* Set the global behaviour of RX filters - On/Off + default action */ +int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, + enum rx_filter_action action) +{ + struct acx_default_rx_filter *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx default rx filter en: %d act: %d", + enable, action); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + acx->enable = enable; + acx->default_action = action; + + ret = wl1271_cmd_configure(wl, ACX_ENABLE_RX_DATA_FILTER, acx, + sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx default rx filter enable failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +/* Configure or disable a specific RX filter pattern */ +int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable, + struct wl12xx_rx_filter *filter) +{ + struct acx_rx_filter_cfg *acx; + int fields_size = 0; + int acx_size; + int ret; + + WARN_ON(enable && !filter); + WARN_ON(index >= WL1271_MAX_RX_FILTERS); + + wl1271_debug(DEBUG_ACX, + "acx set rx filter idx: %d enable: %d filter: %p", + index, enable, filter); + + if (enable) { + fields_size = wl1271_rx_filter_get_fields_size(filter); + + wl1271_debug(DEBUG_ACX, "act: %d num_fields: %d field_size: %d", + filter->action, filter->num_fields, fields_size); + } + + acx_size = ALIGN(sizeof(*acx) + fields_size, 4); + acx = kzalloc(acx_size, GFP_KERNEL); + + if (!acx) + return -ENOMEM; + + acx->enable = enable; + acx->index = index; + + if (enable) { + acx->num_fields = filter->num_fields; + acx->action = filter->action; + wl1271_rx_filter_flatten_fields(filter, acx->fields); + } + + wl1271_dump(DEBUG_ACX, "RX_FILTER: ", acx, acx_size); + + ret = wl1271_cmd_configure(wl, ACX_SET_RX_DATA_FILTER, acx, acx_size); + if (ret < 0) { + wl1271_warning("setting rx filter failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} +#endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h new file mode 100644 index 000000000..954d57ec9 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -0,0 +1,1136 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __ACX_H__ +#define __ACX_H__ + +#include "wlcore.h" +#include "cmd.h" + +/************************************************************************* + + Host Interrupt Register (WiLink -> Host) + +**************************************************************************/ +/* HW Initiated interrupt Watchdog timer expiration */ +#define WL1271_ACX_INTR_WATCHDOG BIT(0) +/* Init sequence is done (masked interrupt, detection through polling only ) */ +#define WL1271_ACX_INTR_INIT_COMPLETE BIT(1) +/* Event was entered to Event MBOX #A*/ +#define WL1271_ACX_INTR_EVENT_A BIT(2) +/* Event was entered to Event MBOX #B*/ +#define WL1271_ACX_INTR_EVENT_B BIT(3) +/* Command processing completion*/ +#define WL1271_ACX_INTR_CMD_COMPLETE BIT(4) +/* Signaling the host on HW wakeup */ +#define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) +/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ +#define WL1271_ACX_INTR_DATA BIT(6) +/* Trace message on MBOX #A */ +#define WL1271_ACX_INTR_TRACE_A BIT(7) +/* Trace message on MBOX #B */ +#define WL1271_ACX_INTR_TRACE_B BIT(8) +/* SW FW Initiated interrupt Watchdog timer expiration */ +#define WL1271_ACX_SW_INTR_WATCHDOG BIT(9) + +#define WL1271_ACX_INTR_ALL 0xFFFFFFFF + +/* all possible interrupts - only appropriate ones will be masked in */ +#define WLCORE_ALL_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \ + WL1271_ACX_INTR_EVENT_A | \ + WL1271_ACX_INTR_EVENT_B | \ + WL1271_ACX_INTR_HW_AVAILABLE | \ + WL1271_ACX_INTR_DATA | \ + WL1271_ACX_SW_INTR_WATCHDOG) + +/* Target's information element */ +struct acx_header { + struct wl1271_cmd_header cmd; + + /* acx (or information element) header */ + __le16 id; + + /* payload length (not including headers */ + __le16 len; +} __packed; + +struct acx_error_counter { + struct acx_header header; + + /* The number of PLCP errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + __le32 PLCP_error; + + /* The number of FCS errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + __le32 FCS_error; + + /* The number of MPDUs without PLCP header errors received*/ + /* since the last time this information element was interrogated. */ + /* This field is automatically cleared when it is interrogated.*/ + __le32 valid_frame; + + /* the number of missed sequence numbers in the squentially */ + /* values of frames seq numbers */ + __le32 seq_num_miss; +} __packed; + +enum wl12xx_role { + WL1271_ROLE_STA = 0, + WL1271_ROLE_IBSS, + WL1271_ROLE_AP, + WL1271_ROLE_DEVICE, + WL1271_ROLE_P2P_CL, + WL1271_ROLE_P2P_GO, + + WL12XX_INVALID_ROLE_TYPE = 0xff +}; + +enum wl1271_psm_mode { + /* Active mode */ + WL1271_PSM_CAM = 0, + + /* Power save mode */ + WL1271_PSM_PS = 1, + + /* Extreme low power */ + WL1271_PSM_ELP = 2, + + WL1271_PSM_MAX = WL1271_PSM_ELP, + + /* illegal out of band value of PSM mode */ + WL1271_PSM_ILLEGAL = 0xff +}; + +struct acx_sleep_auth { + struct acx_header header; + + /* The sleep level authorization of the device. */ + /* 0 - Always active*/ + /* 1 - Power down mode: light / fast sleep*/ + /* 2 - ELP mode: Deep / Max sleep*/ + u8 sleep_auth; + u8 padding[3]; +} __packed; + +enum { + HOSTIF_PCI_MASTER_HOST_INDIRECT, + HOSTIF_PCI_MASTER_HOST_DIRECT, + HOSTIF_SLAVE, + HOSTIF_PKT_RING, + HOSTIF_DONTCARE = 0xFF +}; + +#define DEFAULT_UCAST_PRIORITY 0 +#define DEFAULT_RX_Q_PRIORITY 0 +#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ +#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ +#define TRACE_BUFFER_MAX_SIZE 256 + +#define DP_RX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_TX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_RX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_COMPLETE_TIME_OUT 20 + +#define TX_MSDU_LIFETIME_MIN 0 +#define TX_MSDU_LIFETIME_MAX 3000 +#define TX_MSDU_LIFETIME_DEF 512 +#define RX_MSDU_LIFETIME_MIN 0 +#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF +#define RX_MSDU_LIFETIME_DEF 512000 + +struct acx_rx_msdu_lifetime { + struct acx_header header; + + /* + * The maximum amount of time, in TU, before the + * firmware discards the MSDU. + */ + __le32 lifetime; +} __packed; + +enum acx_slot_type { + SLOT_TIME_LONG = 0, + SLOT_TIME_SHORT = 1, + DEFAULT_SLOT_TIME = SLOT_TIME_SHORT, + MAX_SLOT_TIMES = 0xFF +}; + +#define STATION_WONE_INDEX 0 + +struct acx_slot { + struct acx_header header; + + u8 role_id; + u8 wone_index; /* Reserved */ + u8 slot_time; + u8 reserved[5]; +} __packed; + + +#define ACX_MC_ADDRESS_GROUP_MAX (8) +#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX) + +struct acx_dot11_grp_addr_tbl { + struct acx_header header; + + u8 role_id; + u8 enabled; + u8 num_groups; + u8 pad[1]; + u8 mac_table[ADDRESS_GROUP_MAX_LEN]; +} __packed; + +struct acx_rx_timeout { + struct acx_header header; + + u8 role_id; + u8 reserved; + __le16 ps_poll_timeout; + __le16 upsd_timeout; + u8 padding[2]; +} __packed; + +struct acx_rts_threshold { + struct acx_header header; + + u8 role_id; + u8 reserved; + __le16 threshold; +} __packed; + +struct acx_beacon_filter_option { + struct acx_header header; + + u8 role_id; + u8 enable; + /* + * The number of beacons without the unicast TIM + * bit set that the firmware buffers before + * signaling the host about ready frames. + * When set to 0 and the filter is enabled, beacons + * without the unicast TIM bit set are dropped. + */ + u8 max_num_beacons; + u8 pad[1]; +} __packed; + +/* + * ACXBeaconFilterEntry (not 221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * + * ACXBeaconFilterEntry (221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * 2 3 OUI + * 5 1 Type + * 6 2 Version + * + * + * Treatment bit mask - The information element handling: + * bit 0 - The information element is compared and transferred + * in case of change. + * bit 1 - The information element is transferred to the host + * with each appearance or disappearance. + * Note that both bits can be set at the same time. + */ +#define BEACON_FILTER_TABLE_MAX_IE_NUM (32) +#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6) +#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2) +#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6) +#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \ + BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \ + (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ + BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) + +struct acx_beacon_filter_ie_table { + struct acx_header header; + + u8 role_id; + u8 num_ie; + u8 pad[2]; + u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; +} __packed; + +struct acx_conn_monit_params { + struct acx_header header; + + u8 role_id; + u8 padding[3]; + __le32 synch_fail_thold; /* number of beacons missed */ + __le32 bss_lose_timeout; /* number of TU's from synch fail */ +} __packed; + +struct acx_bt_wlan_coex { + struct acx_header header; + + u8 enable; + u8 pad[3]; +} __packed; + +struct acx_bt_wlan_coex_param { + struct acx_header header; + + __le32 params[CONF_SG_PARAMS_MAX]; + u8 param_idx; + u8 padding[3]; +} __packed; + +struct acx_dco_itrim_params { + struct acx_header header; + + u8 enable; + u8 padding[3]; + __le32 timeout; +} __packed; + +struct acx_energy_detection { + struct acx_header header; + + /* The RX Clear Channel Assessment threshold in the PHY */ + __le16 rx_cca_threshold; + u8 tx_energy_detection; + u8 pad; +} __packed; + +struct acx_beacon_broadcast { + struct acx_header header; + + u8 role_id; + /* Enables receiving of broadcast packets in PS mode */ + u8 rx_broadcast_in_ps; + + __le16 beacon_rx_timeout; + __le16 broadcast_timeout; + + /* Consecutive PS Poll failures before updating the host */ + u8 ps_poll_threshold; + u8 pad[1]; +} __packed; + +struct acx_event_mask { + struct acx_header header; + + __le32 event_mask; + __le32 high_event_mask; /* Unused */ +} __packed; + +#define SCAN_PASSIVE BIT(0) +#define SCAN_5GHZ_BAND BIT(1) +#define SCAN_TRIGGERED BIT(2) +#define SCAN_PRIORITY_HIGH BIT(3) + +/* When set, disable HW encryption */ +#define DF_ENCRYPTION_DISABLE 0x01 +#define DF_SNIFF_MODE_ENABLE 0x80 + +struct acx_feature_config { + struct acx_header header; + + u8 role_id; + u8 padding[3]; + __le32 options; + __le32 data_flow_options; +} __packed; + +struct acx_current_tx_power { + struct acx_header header; + + u8 role_id; + u8 current_tx_power; + u8 padding[2]; +} __packed; + +struct acx_wake_up_condition { + struct acx_header header; + + u8 role_id; + u8 wake_up_event; /* Only one bit can be set */ + u8 listen_interval; + u8 pad[1]; +} __packed; + +struct acx_aid { + struct acx_header header; + + /* + * To be set when associated with an AP. + */ + u8 role_id; + u8 reserved; + __le16 aid; +} __packed; + +enum acx_preamble_type { + ACX_PREAMBLE_LONG = 0, + ACX_PREAMBLE_SHORT = 1 +}; + +struct acx_preamble { + struct acx_header header; + + /* + * When set, the WiLink transmits the frames with a short preamble and + * when cleared, the WiLink transmits the frames with a long preamble. + */ + u8 role_id; + u8 preamble; + u8 padding[2]; +} __packed; + +enum acx_ctsprotect_type { + CTSPROTECT_DISABLE = 0, + CTSPROTECT_ENABLE = 1 +}; + +struct acx_ctsprotect { + struct acx_header header; + u8 role_id; + u8 ctsprotect; + u8 padding[2]; +} __packed; + +struct acx_rate_class { + __le32 enabled_rates; + u8 short_retry_limit; + u8 long_retry_limit; + u8 aflags; + u8 reserved; +}; + +struct acx_rate_policy { + struct acx_header header; + + __le32 rate_policy_idx; + struct acx_rate_class rate_policy; +} __packed; + +struct acx_ac_cfg { + struct acx_header header; + u8 role_id; + u8 ac; + u8 aifsn; + u8 cw_min; + __le16 cw_max; + __le16 tx_op_limit; +} __packed; + +struct acx_tid_config { + struct acx_header header; + u8 role_id; + u8 queue_id; + u8 channel_type; + u8 tsid; + u8 ps_scheme; + u8 ack_policy; + u8 padding[2]; + __le32 apsd_conf[2]; +} __packed; + +struct acx_frag_threshold { + struct acx_header header; + __le16 frag_threshold; + u8 padding[2]; +} __packed; + +struct acx_tx_config_options { + struct acx_header header; + __le16 tx_compl_timeout; /* msec */ + __le16 tx_compl_threshold; /* number of packets */ +} __packed; + +struct wl12xx_acx_config_memory { + struct acx_header header; + + u8 rx_mem_block_num; + u8 tx_min_mem_block_num; + u8 num_stations; + u8 num_ssid_profiles; + __le32 total_tx_descriptors; + u8 dyn_mem_enable; + u8 tx_free_req; + u8 rx_free_req; + u8 tx_min; + u8 fwlog_blocks; + u8 padding[3]; +} __packed; + +struct wl1271_acx_mem_map { + struct acx_header header; + + __le32 code_start; + __le32 code_end; + + __le32 wep_defkey_start; + __le32 wep_defkey_end; + + __le32 sta_table_start; + __le32 sta_table_end; + + __le32 packet_template_start; + __le32 packet_template_end; + + /* Address of the TX result interface (control block) */ + __le32 tx_result; + __le32 tx_result_queue_start; + + __le32 queue_memory_start; + __le32 queue_memory_end; + + __le32 packet_memory_pool_start; + __le32 packet_memory_pool_end; + + __le32 debug_buffer1_start; + __le32 debug_buffer1_end; + + __le32 debug_buffer2_start; + __le32 debug_buffer2_end; + + /* Number of blocks FW allocated for TX packets */ + __le32 num_tx_mem_blocks; + + /* Number of blocks FW allocated for RX packets */ + __le32 num_rx_mem_blocks; + + /* the following 4 fields are valid in SLAVE mode only */ + u8 *tx_cbuf; + u8 *rx_cbuf; + __le32 rx_ctrl; + __le32 tx_ctrl; +} __packed; + +struct wl1271_acx_rx_config_opt { + struct acx_header header; + + __le16 mblk_threshold; + __le16 threshold; + __le16 timeout; + u8 queue_type; + u8 reserved; +} __packed; + + +struct wl1271_acx_bet_enable { + struct acx_header header; + + u8 role_id; + u8 enable; + u8 max_consecutive; + u8 padding[1]; +} __packed; + +#define ACX_IPV4_VERSION 4 +#define ACX_IPV6_VERSION 6 +#define ACX_IPV4_ADDR_SIZE 4 + +/* bitmap of enabled arp_filter features */ +#define ACX_ARP_FILTER_ARP_FILTERING BIT(0) +#define ACX_ARP_FILTER_AUTO_ARP BIT(1) + +struct wl1271_acx_arp_filter { + struct acx_header header; + u8 role_id; + u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */ + u8 enable; /* bitmap of enabled ARP filtering features */ + u8 padding[1]; + u8 address[16]; /* The configured device IP address - all ARP + requests directed to this IP address will pass + through. For IPv4, the first four bytes are + used. */ +} __packed; + +struct wl1271_acx_pm_config { + struct acx_header header; + + __le32 host_clk_settling_time; + u8 host_fast_wakeup_support; + u8 padding[3]; +} __packed; + +struct wl1271_acx_keep_alive_mode { + struct acx_header header; + + u8 role_id; + u8 enabled; + u8 padding[2]; +} __packed; + +enum { + ACX_KEEP_ALIVE_NO_TX = 0, + ACX_KEEP_ALIVE_PERIOD_ONLY +}; + +enum { + ACX_KEEP_ALIVE_TPL_INVALID = 0, + ACX_KEEP_ALIVE_TPL_VALID +}; + +struct wl1271_acx_keep_alive_config { + struct acx_header header; + + u8 role_id; + u8 index; + u8 tpl_validation; + u8 trigger; + __le32 period; +} __packed; + +/* TODO: maybe this needs to be moved somewhere else? */ +#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) +#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) +#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) +#define HOST_IF_CFG_RX_PAD_TO_SDIO_BLK BIT(4) +#define HOST_IF_CFG_ADD_RX_ALIGNMENT BIT(6) + +enum { + WL1271_ACX_TRIG_TYPE_LEVEL = 0, + WL1271_ACX_TRIG_TYPE_EDGE, +}; + +enum { + WL1271_ACX_TRIG_DIR_LOW = 0, + WL1271_ACX_TRIG_DIR_HIGH, + WL1271_ACX_TRIG_DIR_BIDIR, +}; + +enum { + WL1271_ACX_TRIG_ENABLE = 1, + WL1271_ACX_TRIG_DISABLE, +}; + +enum { + WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0, + WL1271_ACX_TRIG_METRIC_RSSI_DATA, + WL1271_ACX_TRIG_METRIC_SNR_BEACON, + WL1271_ACX_TRIG_METRIC_SNR_DATA, +}; + +enum { + WL1271_ACX_TRIG_IDX_RSSI = 0, + WL1271_ACX_TRIG_COUNT = 8, +}; + +struct wl1271_acx_rssi_snr_trigger { + struct acx_header header; + + u8 role_id; + u8 metric; + u8 type; + u8 dir; + __le16 threshold; + __le16 pacing; /* 0 - 60000 ms */ + u8 hysteresis; + u8 index; + u8 enable; + u8 padding[1]; +}; + +struct wl1271_acx_rssi_snr_avg_weights { + struct acx_header header; + + u8 role_id; + u8 padding[3]; + u8 rssi_beacon; + u8 rssi_data; + u8 snr_beacon; + u8 snr_data; +}; + + +/* special capability bit (not employed by the 802.11n spec) */ +#define WL12XX_HT_CAP_HT_OPERATION BIT(16) + +/* + * ACX_PEER_HT_CAP + * Configure HT capabilities - declare the capabilities of the peer + * we are connected to. + */ +struct wl1271_acx_ht_capabilities { + struct acx_header header; + + /* bitmask of capability bits supported by the peer */ + __le32 ht_capabilites; + + /* Indicates to which link these capabilities apply. */ + u8 hlid; + + /* + * This the maximum A-MPDU length supported by the AP. The FW may not + * exceed this length when sending A-MPDUs + */ + u8 ampdu_max_length; + + /* This is the minimal spacing required when sending A-MPDUs to the AP*/ + u8 ampdu_min_spacing; + + u8 padding; +} __packed; + +/* + * ACX_HT_BSS_OPERATION + * Configure HT capabilities - AP rules for behavior in the BSS. + */ +struct wl1271_acx_ht_information { + struct acx_header header; + + u8 role_id; + + /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */ + u8 rifs_mode; + + /* Values: 0 - 3 like in spec */ + u8 ht_protection; + + /* Values: 0 - GF protection not required, 1 - GF protection required */ + u8 gf_protection; + + /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/ + u8 ht_tx_burst_limit; + + /* + * Values: 0 - Dual CTS protection not required, + * 1 - Dual CTS Protection required + * Note: When this value is set to 1 FW will protect all TXOP with RTS + * frame and will not use CTS-to-self regardless of the value of the + * ACX_CTS_PROTECTION information element + */ + u8 dual_cts_protection; + + u8 padding[2]; +} __packed; + +struct wl1271_acx_ba_initiator_policy { + struct acx_header header; + + /* Specifies role Id, Range 0-7, 0xFF means ANY role. */ + u8 role_id; + + /* + * Per TID setting for allowing TX BA. Set a bit to 1 to allow + * TX BA sessions for the corresponding TID. + */ + u8 tid_bitmap; + + /* Windows size in number of packets */ + u8 win_size; + + u8 padding1[1]; + + /* As initiator inactivity timeout in time units(TU) of 1024us */ + u16 inactivity_timeout; + + u8 padding[2]; +} __packed; + +struct wl1271_acx_ba_receiver_setup { + struct acx_header header; + + /* Specifies link id, range 0-31 */ + u8 hlid; + + u8 tid; + + u8 enable; + + /* Windows size in number of packets */ + u8 win_size; + + /* BA session starting sequence number. RANGE 0-FFF */ + u16 ssn; + + u8 padding[2]; +} __packed; + +struct wl12xx_acx_fw_tsf_information { + struct acx_header header; + + u8 role_id; + u8 padding1[3]; + __le32 current_tsf_high; + __le32 current_tsf_low; + __le32 last_bttt_high; + __le32 last_tbtt_low; + u8 last_dtim_count; + u8 padding2[3]; +} __packed; + +struct wl1271_acx_ps_rx_streaming { + struct acx_header header; + + u8 role_id; + u8 tid; + u8 enable; + + /* interval between triggers (10-100 msec) */ + u8 period; + + /* timeout before first trigger (0-200 msec) */ + u8 timeout; + u8 padding[3]; +} __packed; + +struct wl1271_acx_ap_max_tx_retry { + struct acx_header header; + + u8 role_id; + u8 padding_1; + + /* + * the number of frames transmission failures before + * issuing the aging event. + */ + __le16 max_tx_retry; +} __packed; + +struct wl1271_acx_config_ps { + struct acx_header header; + + u8 exit_retries; + u8 enter_retries; + u8 padding[2]; + __le32 null_data_rate; +} __packed; + +struct wl1271_acx_inconnection_sta { + struct acx_header header; + + u8 addr[ETH_ALEN]; + u8 role_id; + u8 padding; +} __packed; + +/* + * ACX_FM_COEX_CFG + * set the FM co-existence parameters. + */ +struct wl1271_acx_fm_coex { + struct acx_header header; + /* enable(1) / disable(0) the FM Coex feature */ + u8 enable; + /* + * Swallow period used in COEX PLL swallowing mechanism. + * 0xFF = use FW default + */ + u8 swallow_period; + /* + * The N divider used in COEX PLL swallowing mechanism for Fref of + * 38.4/19.2 Mhz. 0xFF = use FW default + */ + u8 n_divider_fref_set_1; + /* + * The N divider used in COEX PLL swallowing mechanism for Fref of + * 26/52 Mhz. 0xFF = use FW default + */ + u8 n_divider_fref_set_2; + /* + * The M divider used in COEX PLL swallowing mechanism for Fref of + * 38.4/19.2 Mhz. 0xFFFF = use FW default + */ + __le16 m_divider_fref_set_1; + /* + * The M divider used in COEX PLL swallowing mechanism for Fref of + * 26/52 Mhz. 0xFFFF = use FW default + */ + __le16 m_divider_fref_set_2; + /* + * The time duration in uSec required for COEX PLL to stabilize. + * 0xFFFFFFFF = use FW default + */ + __le32 coex_pll_stabilization_time; + /* + * The time duration in uSec required for LDO to stabilize. + * 0xFFFFFFFF = use FW default + */ + __le16 ldo_stabilization_time; + /* + * The disturbed frequency band margin around the disturbed frequency + * center (single sided). + * For example, if 2 is configured, the following channels will be + * considered disturbed channel: + * 80 +- 0.1 MHz, 91 +- 0.1 MHz, 98 +- 0.1 MHz, 102 +- 0.1 MH + * 0xFF = use FW default + */ + u8 fm_disturbed_band_margin; + /* + * The swallow clock difference of the swallowing mechanism. + * 0xFF = use FW default + */ + u8 swallow_clk_diff; +} __packed; + +#define ACX_RATE_MGMT_ALL_PARAMS 0xff +struct wl12xx_acx_set_rate_mgmt_params { + struct acx_header header; + + u8 index; /* 0xff to configure all params */ + u8 padding1; + __le16 rate_retry_score; + __le16 per_add; + __le16 per_th1; + __le16 per_th2; + __le16 max_per; + u8 inverse_curiosity_factor; + u8 tx_fail_low_th; + u8 tx_fail_high_th; + u8 per_alpha_shift; + u8 per_add_shift; + u8 per_beta1_shift; + u8 per_beta2_shift; + u8 rate_check_up; + u8 rate_check_down; + u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES]; + u8 padding2[2]; +} __packed; + +struct wl12xx_acx_config_hangover { + struct acx_header header; + + __le32 recover_time; + u8 hangover_period; + u8 dynamic_mode; + u8 early_termination_mode; + u8 max_period; + u8 min_period; + u8 increase_delta; + u8 decrease_delta; + u8 quiet_time; + u8 increase_time; + u8 window_size; + u8 padding[2]; +} __packed; + + +struct acx_default_rx_filter { + struct acx_header header; + u8 enable; + + /* action of type FILTER_XXX */ + u8 default_action; + + u8 pad[2]; +} __packed; + + +struct acx_rx_filter_cfg { + struct acx_header header; + + u8 enable; + + /* 0 - WL1271_MAX_RX_FILTERS-1 */ + u8 index; + + u8 action; + + u8 num_fields; + u8 fields[0]; +} __packed; + +struct acx_roaming_stats { + struct acx_header header; + + u8 role_id; + u8 pad[3]; + u32 missed_beacons; + u8 snr_data; + u8 snr_bacon; + s8 rssi_data; + s8 rssi_beacon; +} __packed; + +enum { + ACX_WAKE_UP_CONDITIONS = 0x0000, + ACX_MEM_CFG = 0x0001, + ACX_SLOT = 0x0002, + ACX_AC_CFG = 0x0003, + ACX_MEM_MAP = 0x0004, + ACX_AID = 0x0005, + ACX_MEDIUM_USAGE = 0x0006, + ACX_STATISTICS = 0x0007, + ACX_PWR_CONSUMPTION_STATISTICS = 0x0008, + ACX_TID_CFG = 0x0009, + ACX_PS_RX_STREAMING = 0x000A, + ACX_BEACON_FILTER_OPT = 0x000B, + ACX_NOISE_HIST = 0x000C, + ACX_HDK_VERSION = 0x000D, + ACX_PD_THRESHOLD = 0x000E, + ACX_TX_CONFIG_OPT = 0x000F, + ACX_CCA_THRESHOLD = 0x0010, + ACX_EVENT_MBOX_MASK = 0x0011, + ACX_CONN_MONIT_PARAMS = 0x0012, + ACX_DISABLE_BROADCASTS = 0x0013, + ACX_BCN_DTIM_OPTIONS = 0x0014, + ACX_SG_ENABLE = 0x0015, + ACX_SG_CFG = 0x0016, + ACX_FM_COEX_CFG = 0x0017, + ACX_BEACON_FILTER_TABLE = 0x0018, + ACX_ARP_IP_FILTER = 0x0019, + ACX_ROAMING_STATISTICS_TBL = 0x001A, + ACX_RATE_POLICY = 0x001B, + ACX_CTS_PROTECTION = 0x001C, + ACX_SLEEP_AUTH = 0x001D, + ACX_PREAMBLE_TYPE = 0x001E, + ACX_ERROR_CNT = 0x001F, + ACX_IBSS_FILTER = 0x0020, + ACX_SERVICE_PERIOD_TIMEOUT = 0x0021, + ACX_TSF_INFO = 0x0022, + ACX_CONFIG_PS_WMM = 0x0023, + ACX_ENABLE_RX_DATA_FILTER = 0x0024, + ACX_SET_RX_DATA_FILTER = 0x0025, + ACX_GET_DATA_FILTER_STATISTICS = 0x0026, + ACX_RX_CONFIG_OPT = 0x0027, + ACX_FRAG_CFG = 0x0028, + ACX_BET_ENABLE = 0x0029, + ACX_RSSI_SNR_TRIGGER = 0x002A, + ACX_RSSI_SNR_WEIGHTS = 0x002B, + ACX_KEEP_ALIVE_MODE = 0x002C, + ACX_SET_KEEP_ALIVE_CONFIG = 0x002D, + ACX_BA_SESSION_INIT_POLICY = 0x002E, + ACX_BA_SESSION_RX_SETUP = 0x002F, + ACX_PEER_HT_CAP = 0x0030, + ACX_HT_BSS_OPERATION = 0x0031, + ACX_COEX_ACTIVITY = 0x0032, + ACX_BURST_MODE = 0x0033, + ACX_SET_RATE_MGMT_PARAMS = 0x0034, + ACX_GET_RATE_MGMT_PARAMS = 0x0035, + ACX_SET_RATE_ADAPT_PARAMS = 0x0036, + ACX_SET_DCO_ITRIM_PARAMS = 0x0037, + ACX_GEN_FW_CMD = 0x0038, + ACX_HOST_IF_CFG_BITMAP = 0x0039, + ACX_MAX_TX_FAILURE = 0x003A, + ACX_UPDATE_INCONNECTION_STA_LIST = 0x003B, + DOT11_RX_MSDU_LIFE_TIME = 0x003C, + DOT11_CUR_TX_PWR = 0x003D, + DOT11_RTS_THRESHOLD = 0x003E, + DOT11_GROUP_ADDRESS_TBL = 0x003F, + ACX_PM_CONFIG = 0x0040, + ACX_CONFIG_PS = 0x0041, + ACX_CONFIG_HANGOVER = 0x0042, + ACX_FEATURE_CFG = 0x0043, + ACX_PROTECTION_CFG = 0x0044, +}; + + +int wl1271_acx_wake_up_conditions(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 wake_up_event, u8 listen_interval); +int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); +int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, + int power); +int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_mem_map(struct wl1271 *wl, + struct acx_header *mem_map, size_t len); +int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); +int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_slot_type slot_time); +int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, void *mc_list, u32 mc_list_len); +int wl1271_acx_service_period_timeout(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u32 rts_threshold); +int wl1271_acx_dco_itrim_params(struct wl1271 *wl); +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable_filter); +int wl1271_acx_beacon_filter_table(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); +int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); +int wl12xx_acx_sg_cfg(struct wl1271 *wl); +int wl1271_acx_cca_threshold(struct wl1271 *wl); +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid); +int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); +int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_preamble_type preamble); +int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum acx_ctsprotect_type ctsprotect); +int wl1271_acx_statistics(struct wl1271 *wl, void *stats); +int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, + u8 idx); +int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop); +int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue_id, u8 channel_type, + u8 tsid, u8 ps_scheme, u8 ack_policy, + u32 apsd_conf0, u32 apsd_conf1); +int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); +int wl1271_acx_tx_config_options(struct wl1271 *wl); +int wl12xx_acx_mem_cfg(struct wl1271 *wl); +int wl1271_acx_init_mem_config(struct wl1271 *wl); +int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); +int wl1271_acx_smart_reflex(struct wl1271 *wl); +int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 enable, __be32 address); +int wl1271_acx_pm_config(struct wl1271 *wl); +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif, + bool enable); +int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 index, u8 tpl_valid); +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable, s16 thold, u8 hyst); +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, u8 hlid); +int wl1271_acx_set_ht_information(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u16 ht_operation_mode); +int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, + u16 ssn, bool enable, u8 peer_hlid); +int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u64 *mactime); +int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable); +int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 *addr); +int wl1271_acx_fm_coex(struct wl1271 *wl); +int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); +int wl12xx_acx_config_hangover(struct wl1271 *wl); +int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif, + s8 *avg_rssi); + +#ifdef CONFIG_PM +int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, + enum rx_filter_action action); +int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable, + struct wl12xx_rx_filter *filter); +#endif /* CONFIG_PM */ +#endif /* __WL1271_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c new file mode 100644 index 000000000..19b7ec7b6 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -0,0 +1,532 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "debug.h" +#include "acx.h" +#include "boot.h" +#include "io.h" +#include "event.h" +#include "rx.h" +#include "hw_ops.h" + +static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) +{ + u32 cpu_ctrl; + int ret; + + /* 10.5.0 run the firmware (I) */ + ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl); + if (ret < 0) + goto out; + + /* 10.5.1 run the firmware (II) */ + cpu_ctrl |= flag; + ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl); + +out: + return ret; +} + +static int wlcore_boot_parse_fw_ver(struct wl1271 *wl, + struct wl1271_static_data *static_data) +{ + int ret; + + strncpy(wl->chip.fw_ver_str, static_data->fw_version, + sizeof(wl->chip.fw_ver_str)); + + /* make sure the string is NULL-terminated */ + wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; + + ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", + &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], + &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], + &wl->chip.fw_ver[4]); + + if (ret != 5) { + wl1271_warning("fw version incorrect value"); + memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); + ret = -EINVAL; + goto out; + } + + ret = wlcore_identify_fw(wl); + if (ret < 0) + goto out; +out: + return ret; +} + +static int wlcore_validate_fw_ver(struct wl1271 *wl) +{ + unsigned int *fw_ver = wl->chip.fw_ver; + unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ? + wl->min_mr_fw_ver : wl->min_sr_fw_ver; + char min_fw_str[32] = ""; + int i; + + /* the chip must be exactly equal */ + if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])) + goto fail; + + /* the firmware type must be equal */ + if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE])) + goto fail; + + /* the project number must be equal */ + if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE])) + goto fail; + + /* the API version must be greater or equal */ + if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])) + goto fail; + + /* if the API version is equal... */ + if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) || + (min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) && + /* ...the minor must be greater or equal */ + ((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) && + (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR]))) + goto fail; + + return 0; + +fail: + for (i = 0; i < NUM_FW_VER; i++) + if (min_ver[i] == WLCORE_FW_VER_IGNORE) + snprintf(min_fw_str, sizeof(min_fw_str), + "%s*.", min_fw_str); + else + snprintf(min_fw_str, sizeof(min_fw_str), + "%s%u.", min_fw_str, min_ver[i]); + + wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n" + "Please use at least FW %s\n" + "You can get the latest firmwares at:\n" + "git://github.com/TI-OpenLink/firmwares.git", + fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE], + fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE], + fw_ver[FW_VER_MINOR], min_fw_str); + return -EINVAL; +} + +static int wlcore_boot_static_data(struct wl1271 *wl) +{ + struct wl1271_static_data *static_data; + size_t len = sizeof(*static_data) + wl->static_data_priv_len; + int ret; + + static_data = kmalloc(len, GFP_KERNEL); + if (!static_data) { + ret = -ENOMEM; + goto out; + } + + ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false); + if (ret < 0) + goto out_free; + + ret = wlcore_boot_parse_fw_ver(wl, static_data); + if (ret < 0) + goto out_free; + + ret = wlcore_validate_fw_ver(wl); + if (ret < 0) + goto out_free; + + ret = wlcore_handle_static_data(wl, static_data); + if (ret < 0) + goto out_free; + +out_free: + kfree(static_data); +out: + return ret; +} + +static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, + size_t fw_data_len, u32 dest) +{ + struct wlcore_partition_set partition; + int addr, chunk_num, partition_limit; + u8 *p, *chunk; + int ret; + + /* whal_FwCtrl_LoadFwImageSm() */ + + wl1271_debug(DEBUG_BOOT, "starting firmware upload"); + + wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", + fw_data_len, CHUNK_SIZE); + + if ((fw_data_len % 4) != 0) { + wl1271_error("firmware length not multiple of four"); + return -EIO; + } + + chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL); + if (!chunk) { + wl1271_error("allocation for firmware upload chunk failed"); + return -ENOMEM; + } + + memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition)); + partition.mem.start = dest; + ret = wlcore_set_partition(wl, &partition); + if (ret < 0) + goto out; + + /* 10.1 set partition limit and chunk num */ + chunk_num = 0; + partition_limit = wl->ptable[PART_DOWN].mem.size; + + while (chunk_num < fw_data_len / CHUNK_SIZE) { + /* 10.2 update partition, if needed */ + addr = dest + (chunk_num + 2) * CHUNK_SIZE; + if (addr > partition_limit) { + addr = dest + chunk_num * CHUNK_SIZE; + partition_limit = chunk_num * CHUNK_SIZE + + wl->ptable[PART_DOWN].mem.size; + partition.mem.start = addr; + ret = wlcore_set_partition(wl, &partition); + if (ret < 0) + goto out; + } + + /* 10.3 upload the chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", + p, addr); + ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false); + if (ret < 0) + goto out; + + chunk_num++; + } + + /* 10.4 upload the last chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, fw_data_len % CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", + fw_data_len % CHUNK_SIZE, p, addr); + ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); + +out: + kfree(chunk); + return ret; +} + +int wlcore_boot_upload_firmware(struct wl1271 *wl) +{ + u32 chunks, addr, len; + int ret = 0; + u8 *fw; + + fw = wl->fw; + chunks = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); + + while (chunks--) { + addr = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + len = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + if (len > 300000) { + wl1271_info("firmware chunk too long: %u", len); + return -EINVAL; + } + wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", + chunks, addr, len); + ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); + if (ret != 0) + break; + fw += len; + } + + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware); + +int wlcore_boot_upload_nvs(struct wl1271 *wl) +{ + size_t nvs_len, burst_len; + int i; + u32 dest_addr, val; + u8 *nvs_ptr, *nvs_aligned; + int ret; + + if (wl->nvs == NULL) { + wl1271_error("NVS file is needed during boot"); + return -ENODEV; + } + + if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) { + struct wl1271_nvs_file *nvs = + (struct wl1271_nvs_file *)wl->nvs; + /* + * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz + * band configurations) can be removed when those NVS files stop + * floating around. + */ + if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || + wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { + if (nvs->general_params.dual_mode_select) + wl->enable_11a = true; + } + + if (wl->nvs_len != sizeof(struct wl1271_nvs_file) && + (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE || + wl->enable_11a)) { + wl1271_error("nvs size is not as expected: %zu != %zu", + wl->nvs_len, sizeof(struct wl1271_nvs_file)); + kfree(wl->nvs); + wl->nvs = NULL; + wl->nvs_len = 0; + return -EILSEQ; + } + + /* only the first part of the NVS needs to be uploaded */ + nvs_len = sizeof(nvs->nvs); + nvs_ptr = (u8 *) nvs->nvs; + } else { + struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs; + + if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) { + if (nvs->general_params.dual_mode_select) + wl->enable_11a = true; + } else { + wl1271_error("nvs size is not as expected: %zu != %zu", + wl->nvs_len, + sizeof(struct wl128x_nvs_file)); + kfree(wl->nvs); + wl->nvs = NULL; + wl->nvs_len = 0; + return -EILSEQ; + } + + /* only the first part of the NVS needs to be uploaded */ + nvs_len = sizeof(nvs->nvs); + nvs_ptr = (u8 *)nvs->nvs; + } + + /* update current MAC address to NVS */ + nvs_ptr[11] = wl->addresses[0].addr[0]; + nvs_ptr[10] = wl->addresses[0].addr[1]; + nvs_ptr[6] = wl->addresses[0].addr[2]; + nvs_ptr[5] = wl->addresses[0].addr[3]; + nvs_ptr[4] = wl->addresses[0].addr[4]; + nvs_ptr[3] = wl->addresses[0].addr[5]; + + /* + * Layout before the actual NVS tables: + * 1 byte : burst length. + * 2 bytes: destination address. + * n bytes: data to burst copy. + * + * This is ended by a 0 length, then the NVS tables. + */ + + /* FIXME: Do we need to check here whether the LSB is 1? */ + while (nvs_ptr[0]) { + burst_len = nvs_ptr[0]; + dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); + + /* + * Due to our new wl1271_translate_reg_addr function, + * we need to add the register partition start address + * to the destination + */ + dest_addr += wl->curr_part.reg.start; + + /* We move our pointer to the data */ + nvs_ptr += 3; + + for (i = 0; i < burst_len; i++) { + if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) + | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); + + wl1271_debug(DEBUG_BOOT, + "nvs burst write 0x%x: 0x%x", + dest_addr, val); + ret = wlcore_write32(wl, dest_addr, val); + if (ret < 0) + return ret; + + nvs_ptr += 4; + dest_addr += 4; + } + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + } + + /* + * We've reached the first zero length, the first NVS table + * is located at an aligned offset which is at least 7 bytes further. + * NOTE: The wl->nvs->nvs element must be first, in order to + * simplify the casting, we assume it is at the beginning of + * the wl->nvs structure. + */ + nvs_ptr = (u8 *)wl->nvs + + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + + nvs_len -= nvs_ptr - (u8 *)wl->nvs; + + /* Now we must set the partition correctly */ + ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]); + if (ret < 0) + return ret; + + /* Copy the NVS tables to a new block to ensure alignment */ + nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); + if (!nvs_aligned) + return -ENOMEM; + + /* And finally we upload the NVS tables */ + ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, + false); + + kfree(nvs_aligned); + return ret; + +out_badnvs: + wl1271_error("nvs data is malformed"); + return -EILSEQ; +} +EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs); + +int wlcore_boot_run_firmware(struct wl1271 *wl) +{ + int loop, ret; + u32 chip_id, intr; + + /* Make sure we have the boot partition */ + ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); + if (ret < 0) + return ret; + + ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); + if (ret < 0) + return ret; + + ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id); + if (ret < 0) + return ret; + + wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); + + if (chip_id != wl->chip.id) { + wl1271_error("chip id doesn't match after firmware boot"); + return -EIO; + } + + /* wait for init to complete */ + loop = 0; + while (loop++ < INIT_LOOP) { + udelay(INIT_LOOP_DELAY); + ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr); + if (ret < 0) + return ret; + + if (intr == 0xffffffff) { + wl1271_error("error reading hardware complete " + "init indication"); + return -EIO; + } + /* check that ACX_INTR_INIT_COMPLETE is enabled */ + else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) { + ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK, + WL1271_ACX_INTR_INIT_COMPLETE); + if (ret < 0) + return ret; + break; + } + } + + if (loop > INIT_LOOP) { + wl1271_error("timeout waiting for the hardware to " + "complete initialization"); + return -EIO; + } + + /* get hardware config command mail box */ + ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr); + if (ret < 0) + return ret; + + wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr); + + /* get hardware config event mail box */ + ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]); + if (ret < 0) + return ret; + + wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size; + + wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x", + wl->mbox_ptr[0], wl->mbox_ptr[1]); + + ret = wlcore_boot_static_data(wl); + if (ret < 0) { + wl1271_error("error getting static data"); + return ret; + } + + /* + * in case of full asynchronous mode the firmware event must be + * ready to receive event from the command mailbox + */ + + /* unmask required mbox events */ + ret = wl1271_event_unmask(wl); + if (ret < 0) { + wl1271_error("EVENT mask setting failed"); + return ret; + } + + /* set the working partition to its "running" mode offset */ + ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]); + + /* firmware startup completed */ + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware); diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h new file mode 100644 index 000000000..a525225f9 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/boot.h @@ -0,0 +1,55 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BOOT_H__ +#define __BOOT_H__ + +#include "wlcore.h" + +int wlcore_boot_upload_firmware(struct wl1271 *wl); +int wlcore_boot_upload_nvs(struct wl1271 *wl); +int wlcore_boot_run_firmware(struct wl1271 *wl); + +#define WL1271_NO_SUBBANDS 8 +#define WL1271_NO_POWER_LEVELS 4 +#define WL1271_FW_VERSION_MAX_LEN 20 + +struct wl1271_static_data { + u8 mac_address[ETH_ALEN]; + u8 padding[2]; + u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; + u32 hw_version; + u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; + u8 priv[0]; +}; + +/* number of times we try to read the INIT interrupt */ +#define INIT_LOOP 20000 + +/* delay between retries */ +#define INIT_LOOP_DELAY 50 + +#define WU_COUNTER_PAUSE_VAL 0x3FF +#define WELP_ARM_COMMAND_VAL 0x4 + +#endif diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c new file mode 100644 index 000000000..68919f8d4 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -0,0 +1,2061 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "io.h" +#include "acx.h" +#include "wl12xx_80211.h" +#include "cmd.h" +#include "event.h" +#include "tx.h" +#include "hw_ops.h" + +#define WL1271_CMD_FAST_POLL_COUNT 50 +#define WL1271_WAIT_EVENT_FAST_POLL_COUNT 20 + +/* + * send command to firmware + * + * @wl: wl struct + * @id: command id + * @buf: buffer containing the command, must work with dma + * @len: length of the buffer + * return the cmd status code on success. + */ +static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf, + size_t len, size_t res_len) +{ + struct wl1271_cmd_header *cmd; + unsigned long timeout; + u32 intr; + int ret; + u16 status; + u16 poll_count = 0; + + if (unlikely(wl->state == WLCORE_STATE_RESTARTING && + id != CMD_STOP_FWLOGGER)) + return -EIO; + + if (WARN_ON_ONCE(len < sizeof(*cmd))) + return -EIO; + + cmd = buf; + cmd->id = cpu_to_le16(id); + cmd->status = 0; + + WARN_ON(len % 4 != 0); + WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); + + ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false); + if (ret < 0) + return ret; + + /* + * TODO: we just need this because one bit is in a different + * place. Is there any better way? + */ + ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len); + if (ret < 0) + return ret; + + timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); + + ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr); + if (ret < 0) + return ret; + + while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { + if (time_after(jiffies, timeout)) { + wl1271_error("command complete timeout"); + return -ETIMEDOUT; + } + + poll_count++; + if (poll_count < WL1271_CMD_FAST_POLL_COUNT) + udelay(10); + else + msleep(1); + + ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr); + if (ret < 0) + return ret; + } + + /* read back the status code of the command */ + if (res_len == 0) + res_len = sizeof(struct wl1271_cmd_header); + + ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false); + if (ret < 0) + return ret; + + status = le16_to_cpu(cmd->status); + + ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK, + WL1271_ACX_INTR_CMD_COMPLETE); + if (ret < 0) + return ret; + + return status; +} + +/* + * send command to fw and return cmd status on success + * valid_rets contains a bitmap of allowed error codes + */ +static int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, size_t res_len, + unsigned long valid_rets) +{ + int ret = __wlcore_cmd_send(wl, id, buf, len, res_len); + + if (ret < 0) + goto fail; + + /* success is always a valid status */ + valid_rets |= BIT(CMD_STATUS_SUCCESS); + + if (ret >= MAX_COMMAND_STATUS || + !test_bit(ret, &valid_rets)) { + wl1271_error("command execute failure %d", ret); + ret = -EIO; + goto fail; + } + return ret; +fail: + wl12xx_queue_recovery_work(wl); + return ret; +} + +/* + * wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS + * return 0 on success. + */ +int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len) +{ + int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0); + + if (ret < 0) + return ret; + return 0; +} +EXPORT_SYMBOL_GPL(wl1271_cmd_send); + +/* + * Poll the mailbox event field until any of the bits in the mask is set or a + * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) + */ +int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, + u32 mask, bool *timeout) +{ + u32 *events_vector; + u32 event; + unsigned long timeout_time; + u16 poll_count = 0; + int ret = 0; + + *timeout = false; + + events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA); + if (!events_vector) + return -ENOMEM; + + timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); + + do { + if (time_after(jiffies, timeout_time)) { + wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", + (int)mask); + *timeout = true; + goto out; + } + + poll_count++; + if (poll_count < WL1271_WAIT_EVENT_FAST_POLL_COUNT) + usleep_range(50, 51); + else + usleep_range(1000, 5000); + + /* read from both event fields */ + ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector, + sizeof(*events_vector), false); + if (ret < 0) + goto out; + + event = *events_vector & mask; + + ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector, + sizeof(*events_vector), false); + if (ret < 0) + goto out; + + event |= *events_vector & mask; + } while (!event); + +out: + kfree(events_vector); + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout); + +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, + u8 *role_id) +{ + struct wl12xx_cmd_role_enable *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd role enable"); + + if (WARN_ON(*role_id != WL12XX_INVALID_ROLE_ID)) + return -EBUSY; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + /* get role id */ + cmd->role_id = find_first_zero_bit(wl->roles_map, WL12XX_MAX_ROLES); + if (cmd->role_id >= WL12XX_MAX_ROLES) { + ret = -EBUSY; + goto out_free; + } + + memcpy(cmd->mac_address, addr, ETH_ALEN); + cmd->role_type = role_type; + + ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role enable"); + goto out_free; + } + + __set_bit(cmd->role_id, wl->roles_map); + *role_id = cmd->role_id; + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id) +{ + struct wl12xx_cmd_role_disable *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd role disable"); + + if (WARN_ON(*role_id == WL12XX_INVALID_ROLE_ID)) + return -ENOENT; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + cmd->role_id = *role_id; + + ret = wl1271_cmd_send(wl, CMD_ROLE_DISABLE, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role disable"); + goto out_free; + } + + __clear_bit(*role_id, wl->roles_map); + *role_id = WL12XX_INVALID_ROLE_ID; + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid) +{ + if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX) + wl->session_ids[hlid] = 0; + + wl->session_ids[hlid]++; + + return wl->session_ids[hlid]; +} + +int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) +{ + unsigned long flags; + u8 link = find_first_zero_bit(wl->links_map, wl->num_links); + if (link >= wl->num_links) + return -EBUSY; + + wl->session_ids[link] = wlcore_get_new_session_id(wl, link); + + /* these bits are used by op_tx */ + spin_lock_irqsave(&wl->wl_lock, flags); + __set_bit(link, wl->links_map); + __set_bit(link, wlvif->links_map); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + /* + * take the last "freed packets" value from the current FW status. + * on recovery, we might not have fw_status yet, and + * tx_lnk_free_pkts will be NULL. check for it. + */ + if (wl->fw_status->counters.tx_lnk_free_pkts) + wl->links[link].prev_freed_pkts = + wl->fw_status->counters.tx_lnk_free_pkts[link]; + wl->links[link].wlvif = wlvif; + + /* + * Take saved value for total freed packets from wlvif, in case this is + * recovery/resume + */ + if (wlvif->bss_type != BSS_TYPE_AP_BSS) + wl->links[link].total_freed_pkts = wlvif->total_freed_pkts; + + *hlid = link; + + wl->active_link_count++; + return 0; +} + +void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) +{ + unsigned long flags; + + if (*hlid == WL12XX_INVALID_LINK_ID) + return; + + /* these bits are used by op_tx */ + spin_lock_irqsave(&wl->wl_lock, flags); + __clear_bit(*hlid, wl->links_map); + __clear_bit(*hlid, wlvif->links_map); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl->links[*hlid].allocated_pkts = 0; + wl->links[*hlid].prev_freed_pkts = 0; + wl->links[*hlid].ba_bitmap = 0; + eth_zero_addr(wl->links[*hlid].addr); + + /* + * At this point op_tx() will not add more packets to the queues. We + * can purge them. + */ + wl1271_tx_reset_link_queues(wl, *hlid); + wl->links[*hlid].wlvif = NULL; + + if (wlvif->bss_type == BSS_TYPE_AP_BSS && + *hlid == wlvif->ap.bcast_hlid) { + u32 sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING; + /* + * save the total freed packets in the wlvif, in case this is + * recovery or suspend + */ + wlvif->total_freed_pkts = wl->links[*hlid].total_freed_pkts; + + /* + * increment the initial seq number on recovery to account for + * transmitted packets that we haven't yet got in the FW status + */ + if (wlvif->encryption_type == KEY_GEM) + sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM; + + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) + wlvif->total_freed_pkts += sqn_padding; + } + + wl->links[*hlid].total_freed_pkts = 0; + + *hlid = WL12XX_INVALID_LINK_ID; + wl->active_link_count--; + WARN_ON_ONCE(wl->active_link_count < 0); +} + +u8 wlcore_get_native_channel_type(u8 nl_channel_type) +{ + switch (nl_channel_type) { + case NL80211_CHAN_NO_HT: + return WLCORE_CHAN_NO_HT; + case NL80211_CHAN_HT20: + return WLCORE_CHAN_HT20; + case NL80211_CHAN_HT40MINUS: + return WLCORE_CHAN_HT40MINUS; + case NL80211_CHAN_HT40PLUS: + return WLCORE_CHAN_HT40PLUS; + default: + WARN_ON(1); + return WLCORE_CHAN_NO_HT; + } +} +EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type); + +static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + enum ieee80211_band band, + int channel) +{ + struct wl12xx_cmd_role_start *cmd; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id); + + cmd->role_id = wlvif->dev_role_id; + if (band == IEEE80211_BAND_5GHZ) + cmd->band = WLCORE_BAND_5GHZ; + cmd->channel = channel; + + if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid); + if (ret) + goto out_free; + } + cmd->device.hlid = wlvif->dev_hlid; + cmd->device.session = wl->session_ids[wlvif->dev_hlid]; + + wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", + cmd->role_id, cmd->device.hlid, cmd->device.session); + + ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role enable"); + goto err_hlid; + } + + goto out_free; + +err_hlid: + /* clear links on error */ + wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct wl12xx_cmd_role_stop *cmd; + int ret; + + if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID)) + return -EINVAL; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role stop dev"); + + cmd->role_id = wlvif->dev_role_id; + cmd->disc_type = DISCONNECT_IMMEDIATE; + cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); + + ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role stop"); + goto out_free; + } + + wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wl12xx_cmd_role_start *cmd; + u32 supported_rates; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id); + + cmd->role_id = wlvif->role_id; + if (wlvif->band == IEEE80211_BAND_5GHZ) + cmd->band = WLCORE_BAND_5GHZ; + cmd->channel = wlvif->channel; + cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY; + cmd->sta.ssid_len = wlvif->ssid_len; + memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len); + memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN); + + supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | + wlcore_hw_sta_get_ap_rate_mask(wl, wlvif); + if (wlvif->p2p) + supported_rates &= ~CONF_TX_CCK_RATES; + + cmd->sta.local_rates = cpu_to_le32(supported_rates); + + cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type); + + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); + if (ret) + goto out_free; + } + cmd->sta.hlid = wlvif->sta.hlid; + cmd->sta.session = wl->session_ids[wlvif->sta.hlid]; + /* + * We don't have the correct remote rates in this stage. The + * rates will be reconfigured later, after association, if the + * firmware supports ACX_PEER_CAP. Otherwise, there's nothing + * we can do, so use all supported_rates here. + */ + cmd->sta.remote_rates = cpu_to_le32(supported_rates); + + wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " + "basic_rate_set: 0x%x, remote_rates: 0x%x", + wlvif->role_id, cmd->sta.hlid, cmd->sta.session, + wlvif->basic_rate_set, wlvif->rate_set); + + ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role start sta"); + goto err_hlid; + } + + wlvif->sta.role_chan_type = wlvif->channel_type; + goto out_free; + +err_hlid: + /* clear links on error. */ + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +/* use this function to stop ibss as well */ +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl12xx_cmd_role_stop *cmd; + int ret; + + if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)) + return -EINVAL; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id); + + cmd->role_id = wlvif->role_id; + cmd->disc_type = DISCONNECT_IMMEDIATE; + cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); + + ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role stop sta"); + goto out_free; + } + + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl12xx_cmd_role_start *cmd; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u32 supported_rates; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); + + /* trying to use hidden SSID with an old hostapd version */ + if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) { + wl1271_error("got a null SSID from beacon/bss"); + ret = -EINVAL; + goto out; + } + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid); + if (ret < 0) + goto out_free; + + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid); + if (ret < 0) + goto out_free_global; + + /* use the previous security seq, if this is a recovery/resume */ + wl->links[wlvif->ap.bcast_hlid].total_freed_pkts = + wlvif->total_freed_pkts; + + cmd->role_id = wlvif->role_id; + cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); + cmd->ap.bss_index = WL1271_AP_BSS_INDEX; + cmd->ap.global_hlid = wlvif->ap.global_hlid; + cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid; + cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid]; + cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid]; + cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->ap.dtim_interval = bss_conf->dtim_period; + cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; + /* FIXME: Change when adding DFS */ + cmd->ap.reset_tsf = 1; /* By default reset AP TSF */ + cmd->ap.wmm = wlvif->wmm_enabled; + cmd->channel = wlvif->channel; + cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type); + + if (!bss_conf->hidden_ssid) { + /* take the SSID from the beacon for backward compatibility */ + cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC; + cmd->ap.ssid_len = wlvif->ssid_len; + memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len); + } else { + cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN; + cmd->ap.ssid_len = bss_conf->ssid_len; + memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len); + } + + supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | + wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif); + if (wlvif->p2p) + supported_rates &= ~CONF_TX_CCK_RATES; + + wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x", + supported_rates); + + cmd->ap.local_rates = cpu_to_le32(supported_rates); + + switch (wlvif->band) { + case IEEE80211_BAND_2GHZ: + cmd->band = WLCORE_BAND_2_4GHZ; + break; + case IEEE80211_BAND_5GHZ: + cmd->band = WLCORE_BAND_5GHZ; + break; + default: + wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); + cmd->band = WLCORE_BAND_2_4GHZ; + break; + } + + ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role start ap"); + goto out_free_bcast; + } + + goto out_free; + +out_free_bcast: + wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); + +out_free_global: + wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl12xx_cmd_role_stop *cmd; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id); + + cmd->role_id = wlvif->role_id; + + ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role stop ap"); + goto out_free; + } + + wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid); + wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wl12xx_cmd_role_start *cmd; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + int ret; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id); + + cmd->role_id = wlvif->role_id; + if (wlvif->band == IEEE80211_BAND_5GHZ) + cmd->band = WLCORE_BAND_5GHZ; + cmd->channel = wlvif->channel; + cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); + cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); + cmd->ibss.dtim_interval = bss_conf->dtim_period; + cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY; + cmd->ibss.ssid_len = wlvif->ssid_len; + memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len); + memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN); + cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set); + + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) { + ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid); + if (ret) + goto out_free; + } + cmd->ibss.hlid = wlvif->sta.hlid; + cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set); + + wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " + "basic_rate_set: 0x%x, remote_rates: 0x%x", + wlvif->role_id, cmd->sta.hlid, cmd->sta.session, + wlvif->basic_rate_set, wlvif->rate_set); + + wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM", + vif->bss_conf.bssid); + + ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd role enable"); + goto err_hlid; + } + + goto out_free; + +err_hlid: + /* clear links on error. */ + wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid); + +out_free: + kfree(cmd); + +out: + return ret; +} + + +/** + * send test command to firmware + * + * @wl: wl struct + * @buf: buffer containing the command, with all headers, must work with dma + * @len: length of the buffer + * @answer: is answer needed + */ +int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) +{ + int ret; + size_t res_len = 0; + + wl1271_debug(DEBUG_CMD, "cmd test"); + + if (answer) + res_len = buf_len; + + ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len); + + if (ret < 0) { + wl1271_warning("TEST command failed"); + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(wl1271_cmd_test); + +/** + * read acx from firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer for the response, including all headers, must work with dma + * @len: length of buf + */ +int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, + size_t cmd_len, size_t res_len) +{ + struct acx_header *acx = buf; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd interrogate"); + + acx->id = cpu_to_le16(id); + + /* response payload length, does not include any headers */ + acx->len = cpu_to_le16(res_len - sizeof(*acx)); + + ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, cmd_len, res_len); + if (ret < 0) + wl1271_error("INTERROGATE command failed"); + + return ret; +} + +/** + * write acx value to firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer containing acx, including all headers, must work with dma + * @len: length of buf + * @valid_rets: bitmap of valid cmd status codes (i.e. return values). + * return the cmd status on success. + */ +int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, unsigned long valid_rets) +{ + struct acx_header *acx = buf; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id); + + if (WARN_ON_ONCE(len < sizeof(*acx))) + return -EIO; + + acx->id = cpu_to_le16(id); + + /* payload length, does not include any headers */ + acx->len = cpu_to_le16(len - sizeof(*acx)); + + ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0, + valid_rets); + if (ret < 0) { + wl1271_warning("CONFIGURE command NOK"); + return ret; + } + + return ret; +} + +/* + * wrapper for wlcore_cmd_configure that accepts only success status. + * return 0 on success + */ +int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) +{ + int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0); + + if (ret < 0) + return ret; + return 0; +} +EXPORT_SYMBOL_GPL(wl1271_cmd_configure); + +int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) +{ + struct cmd_enabledisable_path *cmd; + int ret; + u16 cmd_rx, cmd_tx; + + wl1271_debug(DEBUG_CMD, "cmd data path"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + /* the channel here is only used for calibration, so hardcoded to 1 */ + cmd->channel = 1; + + if (enable) { + cmd_rx = CMD_ENABLE_RX; + cmd_tx = CMD_ENABLE_TX; + } else { + cmd_rx = CMD_DISABLE_RX; + cmd_tx = CMD_DISABLE_TX; + } + + ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("rx %s cmd for channel %d failed", + enable ? "start" : "stop", cmd->channel); + goto out; + } + + wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", + enable ? "start" : "stop", cmd->channel); + + ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("tx %s cmd for channel %d failed", + enable ? "start" : "stop", cmd->channel); + goto out; + } + + wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", + enable ? "start" : "stop", cmd->channel); + +out: + kfree(cmd); + return ret; +} +EXPORT_SYMBOL_GPL(wl1271_cmd_data_path); + +int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ps_mode, u16 auto_ps_timeout) +{ + struct wl1271_cmd_ps_params *ps_params = NULL; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd set ps mode"); + + ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); + if (!ps_params) { + ret = -ENOMEM; + goto out; + } + + ps_params->role_id = wlvif->role_id; + ps_params->ps_mode = ps_mode; + ps_params->auto_ps_timeout = auto_ps_timeout; + + ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, + sizeof(*ps_params), 0); + if (ret < 0) { + wl1271_error("cmd set_ps_mode failed"); + goto out; + } + +out: + kfree(ps_params); + return ret; +} + +int wl1271_cmd_template_set(struct wl1271 *wl, u8 role_id, + u16 template_id, void *buf, size_t buf_len, + int index, u32 rates) +{ + struct wl1271_cmd_template_set *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd template_set %d (role %d)", + template_id, role_id); + + WARN_ON(buf_len > WL1271_CMD_TEMPL_MAX_SIZE); + buf_len = min_t(size_t, buf_len, WL1271_CMD_TEMPL_MAX_SIZE); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + /* during initialization wlvif is NULL */ + cmd->role_id = role_id; + cmd->len = cpu_to_le16(buf_len); + cmd->template_type = template_id; + cmd->enabled_rates = cpu_to_le32(rates); + cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit; + cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit; + cmd->index = index; + + if (buf) + memcpy(cmd->template_data, buf, buf_len); + + ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("cmd set_template failed: %d", ret); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct sk_buff *skb = NULL; + int size; + void *ptr; + int ret = -ENOMEM; + + + if (wlvif->bss_type == BSS_TYPE_IBSS) { + size = sizeof(struct wl12xx_null_data_template); + ptr = NULL; + } else { + skb = ieee80211_nullfunc_get(wl->hw, + wl12xx_wlvif_to_vif(wlvif)); + if (!skb) + goto out; + size = skb->len; + ptr = skb->data; + } + + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_NULL_DATA, ptr, size, 0, + wlvif->basic_rate); + +out: + dev_kfree_skb(skb); + if (ret) + wl1271_warning("cmd buld null data failed %d", ret); + + return ret; + +} + +int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb = NULL; + int ret = -ENOMEM; + + skb = ieee80211_nullfunc_get(wl->hw, vif); + if (!skb) + goto out; + + ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV, + skb->data, skb->len, + wlvif->sta.klv_template_id, + wlvif->basic_rate); + +out: + dev_kfree_skb(skb); + if (ret) + wl1271_warning("cmd build klv null data failed %d", ret); + + return ret; + +} + +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 aid) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb; + int ret = 0; + + skb = ieee80211_pspoll_get(wl->hw, vif); + if (!skb) + goto out; + + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_PS_POLL, skb->data, + skb->len, 0, wlvif->basic_rate_set); + +out: + dev_kfree_skb(skb); + return ret; +} + +int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 role_id, u8 band, + const u8 *ssid, size_t ssid_len, + const u8 *ie0, size_t ie0_len, const u8 *ie1, + size_t ie1_len, bool sched_scan) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb; + int ret; + u32 rate; + u16 template_id_2_4 = wl->scan_templ_id_2_4; + u16 template_id_5 = wl->scan_templ_id_5; + + wl1271_debug(DEBUG_SCAN, "build probe request band %d", band); + + skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len, + ie0_len + ie1_len); + if (!skb) { + ret = -ENOMEM; + goto out; + } + if (ie0_len) + memcpy(skb_put(skb, ie0_len), ie0, ie0_len); + if (ie1_len) + memcpy(skb_put(skb, ie1_len), ie1, ie1_len); + + if (sched_scan && + (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) { + template_id_2_4 = wl->sched_scan_templ_id_2_4; + template_id_5 = wl->sched_scan_templ_id_5; + } + + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]); + if (band == IEEE80211_BAND_2GHZ) + ret = wl1271_cmd_template_set(wl, role_id, + template_id_2_4, + skb->data, skb->len, 0, rate); + else + ret = wl1271_cmd_template_set(wl, role_id, + template_id_5, + skb->data, skb->len, 0, rate); + +out: + dev_kfree_skb(skb); + return ret; +} +EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req); + +struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct sk_buff *skb) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + int ret; + u32 rate; + + if (!skb) + skb = ieee80211_ap_probereq_get(wl->hw, vif); + if (!skb) + goto out; + + wl1271_debug(DEBUG_SCAN, "set ap probe request template"); + + rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]); + if (wlvif->band == IEEE80211_BAND_2GHZ) + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_CFG_PROBE_REQ_2_4, + skb->data, skb->len, 0, rate); + else + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_CFG_PROBE_REQ_5, + skb->data, skb->len, 0, rate); + + if (ret < 0) + wl1271_error("Unable to set ap probe request template."); + +out: + return skb; +} + +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret, extra = 0; + u16 fc; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb; + struct wl12xx_arp_rsp_template *tmpl; + struct ieee80211_hdr_3addr *hdr; + struct arphdr *arp_hdr; + + skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) + + WL1271_EXTRA_SPACE_MAX); + if (!skb) { + wl1271_error("failed to allocate buffer for arp rsp template"); + return -ENOMEM; + } + + skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX); + + tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl)); + memset(tmpl, 0, sizeof(*tmpl)); + + /* llc layer */ + memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); + tmpl->llc_type = cpu_to_be16(ETH_P_ARP); + + /* arp header */ + arp_hdr = &tmpl->arp_hdr; + arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER); + arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP); + arp_hdr->ar_hln = ETH_ALEN; + arp_hdr->ar_pln = 4; + arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY); + + /* arp payload */ + memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN); + tmpl->sender_ip = wlvif->ip_addr; + + /* encryption space */ + switch (wlvif->encryption_type) { + case KEY_TKIP: + if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) + extra = WL1271_EXTRA_SPACE_TKIP; + break; + case KEY_AES: + extra = WL1271_EXTRA_SPACE_AES; + break; + case KEY_NONE: + case KEY_WEP: + case KEY_GEM: + extra = 0; + break; + default: + wl1271_warning("Unknown encryption type: %d", + wlvif->encryption_type); + ret = -EINVAL; + goto out; + } + + if (extra) { + u8 *space = skb_push(skb, extra); + memset(space, 0, extra); + } + + /* QoS header - BE */ + if (wlvif->sta.qos) + memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16)); + + /* mac80211 header */ + hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr)); + memset(hdr, 0, sizeof(*hdr)); + fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS; + if (wlvif->sta.qos) + fc |= IEEE80211_STYPE_QOS_DATA; + else + fc |= IEEE80211_STYPE_DATA; + if (wlvif->encryption_type != KEY_NONE) + fc |= IEEE80211_FCTL_PROTECTED; + + hdr->frame_control = cpu_to_le16(fc); + memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); + memcpy(hdr->addr2, vif->addr, ETH_ALEN); + eth_broadcast_addr(hdr->addr3); + + ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP, + skb->data, skb->len, 0, + wlvif->basic_rate); +out: + dev_kfree_skb(skb); + return ret; +} + +int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_qos_hdr template; + + memset(&template, 0, sizeof(template)); + + memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN); + memcpy(template.addr2, vif->addr, ETH_ALEN); + memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN); + + template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_TODS); + + /* FIXME: not sure what priority to use here */ + template.qos_ctrl = cpu_to_le16(0); + + return wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_QOS_NULL_DATA, &template, + sizeof(template), 0, + wlvif->basic_rate); +} + +int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid) +{ + struct wl1271_cmd_set_keys *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = hlid; + cmd->key_id = id; + cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; + cmd->key_action = cpu_to_le16(KEY_SET_ID); + cmd->key_type = KEY_WEP; + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("cmd set_default_wep_key failed: %d", ret); + goto out; + } + +out: + kfree(cmd); + + return ret; +} + +int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, const u8 *addr, + u32 tx_seq_32, u16 tx_seq_16) +{ + struct wl1271_cmd_set_keys *cmd; + int ret = 0; + + /* hlid might have already been deleted */ + if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) + return 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = wlvif->sta.hlid; + + if (key_type == KEY_WEP) + cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; + else if (is_broadcast_ether_addr(addr)) + cmd->lid_key_type = BROADCAST_LID_TYPE; + else + cmd->lid_key_type = UNICAST_LID_TYPE; + + cmd->key_action = cpu_to_le16(action); + cmd->key_size = key_size; + cmd->key_type = key_type; + + cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16); + cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32); + + cmd->key_id = id; + + if (key_type == KEY_TKIP) { + /* + * We get the key in the following form: + * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) + * but the target is expecting: + * TKIP - RX MIC - TX MIC + */ + memcpy(cmd->key, key, 16); + memcpy(cmd->key + 16, key + 24, 8); + memcpy(cmd->key + 24, key + 16, 8); + + } else { + memcpy(cmd->key, key, key_size); + } + + wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("could not set keys"); + goto out; + } + +out: + kfree(cmd); + + return ret; +} + +/* + * TODO: merge with sta/ibss into 1 set_key function. + * note there are slight diffs + */ +int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) +{ + struct wl1271_cmd_set_keys *cmd; + int ret = 0; + u8 lid_type; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + if (hlid == wlvif->ap.bcast_hlid) { + if (key_type == KEY_WEP) + lid_type = WEP_DEFAULT_LID_TYPE; + else + lid_type = BROADCAST_LID_TYPE; + } else { + lid_type = UNICAST_LID_TYPE; + } + + wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d" + " hlid: %d", (int)action, (int)id, (int)lid_type, + (int)key_type, (int)hlid); + + cmd->lid_key_type = lid_type; + cmd->hlid = hlid; + cmd->key_action = cpu_to_le16(action); + cmd->key_size = key_size; + cmd->key_type = key_type; + cmd->key_id = id; + cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16); + cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32); + + if (key_type == KEY_TKIP) { + /* + * We get the key in the following form: + * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) + * but the target is expecting: + * TKIP - RX MIC - TX MIC + */ + memcpy(cmd->key, key, 16); + memcpy(cmd->key + 16, key + 24, 8); + memcpy(cmd->key + 24, key + 16, 8); + } else { + memcpy(cmd->key, key, key_size); + } + + wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("could not set ap keys"); + goto out; + } + +out: + kfree(cmd); + return ret; +} + +int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid) +{ + struct wl12xx_cmd_set_peer_state *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd set peer state (hlid=%d)", hlid); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = hlid; + cmd->state = WL1271_CMD_STA_STATE_CONNECTED; + + /* wmm param is valid only for station role */ + if (wlvif->bss_type == BSS_TYPE_STA_BSS) + cmd->wmm = wlvif->wmm_enabled; + + ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send set peer state command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u8 hlid) +{ + struct wl12xx_cmd_add_peer *cmd; + int i, ret; + u32 sta_rates; + + wl1271_debug(DEBUG_CMD, "cmd add peer %d", (int)hlid); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + memcpy(cmd->addr, sta->addr, ETH_ALEN); + cmd->bss_index = WL1271_AP_BSS_INDEX; + cmd->aid = sta->aid; + cmd->hlid = hlid; + cmd->sp_len = sta->max_sp; + cmd->wmm = sta->wme ? 1 : 0; + cmd->session_id = wl->session_ids[hlid]; + cmd->role_id = wlvif->role_id; + + for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++) + if (sta->wme && (sta->uapsd_queues & BIT(i))) + cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] = + WL1271_PSD_UPSD_TRIGGER; + else + cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] = + WL1271_PSD_LEGACY; + + + sta_rates = sta->supp_rates[wlvif->band]; + if (sta->ht_cap.ht_supported) + sta_rates |= + (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) | + (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET); + + cmd->supported_rates = + cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, + wlvif->band)); + + wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", + cmd->supported_rates, sta->uapsd_queues); + + ret = wl1271_cmd_send(wl, CMD_ADD_PEER, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd add peer"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid) +{ + struct wl12xx_cmd_remove_peer *cmd; + int ret; + bool timeout = false; + + wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = hlid; + /* We never send a deauth, mac80211 is in charge of this */ + cmd->reason_opcode = 0; + cmd->send_deauth_flag = 0; + cmd->role_id = wlvif->role_id; + + ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd remove peer"); + goto out_free; + } + + ret = wl->ops->wait_for_event(wl, + WLCORE_EVENT_PEER_REMOVE_COMPLETE, + &timeout); + + /* + * We are ok with a timeout here. The event is sometimes not sent + * due to a firmware bug. In case of another error (like SDIO timeout) + * queue a recovery. + */ + if (ret) + wl12xx_queue_recovery_work(wl); + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch) +{ + /* + * map the given band/channel to the respective predefined + * bit expected by the fw + */ + switch (band) { + case IEEE80211_BAND_2GHZ: + /* channels 1..14 are mapped to 0..13 */ + if (ch >= 1 && ch <= 14) + return ch - 1; + break; + case IEEE80211_BAND_5GHZ: + switch (ch) { + case 8 ... 16: + /* channels 8,12,16 are mapped to 18,19,20 */ + return 18 + (ch-8)/4; + case 34 ... 48: + /* channels 34,36..48 are mapped to 21..28 */ + return 21 + (ch-34)/2; + case 52 ... 64: + /* channels 52,56..64 are mapped to 29..32 */ + return 29 + (ch-52)/4; + case 100 ... 140: + /* channels 100,104..140 are mapped to 33..43 */ + return 33 + (ch-100)/4; + case 149 ... 165: + /* channels 149,153..165 are mapped to 44..48 */ + return 44 + (ch-149)/4; + default: + break; + } + break; + default: + break; + } + + wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch); + return -1; +} + +void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, + enum ieee80211_band band) +{ + int ch_bit_idx = 0; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return; + + ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel); + + if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS) + set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending); +} + +int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl) +{ + struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL; + int ret = 0, i, b, ch_bit_idx; + u32 tmp_ch_bitmap[2]; + struct wiphy *wiphy = wl->hw->wiphy; + struct ieee80211_supported_band *band; + bool timeout = false; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return 0; + + wl1271_debug(DEBUG_CMD, "cmd reg domain config"); + + memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap)); + + for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) { + band = wiphy->bands[b]; + for (i = 0; i < band->n_channels; i++) { + struct ieee80211_channel *channel = &band->channels[i]; + u16 ch = channel->hw_value; + u32 flags = channel->flags; + + if (flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_NO_IR)) + continue; + + if ((flags & IEEE80211_CHAN_RADAR) && + channel->dfs_state != NL80211_DFS_AVAILABLE) + continue; + + ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch); + if (ch_bit_idx < 0) + continue; + + set_bit(ch_bit_idx, (long *)tmp_ch_bitmap); + } + } + + tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0]; + tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1]; + + if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap))) + goto out; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]); + cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]); + cmd->dfs_region = wl->dfs_region; + + wl1271_debug(DEBUG_CMD, + "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x", + cmd->ch_bit_map1, cmd->ch_bit_map2); + + ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send reg domain dfs config"); + goto out; + } + + ret = wl->ops->wait_for_event(wl, + WLCORE_EVENT_DFS_CONFIG_COMPLETE, + &timeout); + if (ret < 0 || timeout) { + wl1271_error("reg domain conf %serror", + timeout ? "completion " : ""); + ret = timeout ? -ETIMEDOUT : ret; + goto out; + } + + memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap)); + memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending)); + +out: + kfree(cmd); + return ret; +} + +int wl12xx_cmd_config_fwlog(struct wl1271 *wl) +{ + struct wl12xx_cmd_config_fwlog *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd config firmware logger"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->logger_mode = wl->conf.fwlog.mode; + cmd->log_severity = wl->conf.fwlog.severity; + cmd->timestamp = wl->conf.fwlog.timestamp; + cmd->output = wl->conf.fwlog.output; + cmd->threshold = wl->conf.fwlog.threshold; + + ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send config firmware logger command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_start_fwlog(struct wl1271 *wl) +{ + struct wl12xx_cmd_start_fwlog *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd start firmware logger"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send start firmware logger command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_cmd_stop_fwlog(struct wl1271 *wl) +{ + struct wl12xx_cmd_stop_fwlog *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd stop firmware logger"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send stop firmware logger command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 role_id, enum ieee80211_band band, u8 channel) +{ + struct wl12xx_cmd_roc *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id); + + if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID)) + return -EINVAL; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = role_id; + cmd->channel = channel; + switch (band) { + case IEEE80211_BAND_2GHZ: + cmd->band = WLCORE_BAND_2_4GHZ; + break; + case IEEE80211_BAND_5GHZ: + cmd->band = WLCORE_BAND_5GHZ; + break; + default: + wl1271_error("roc - unknown band: %d", (int)wlvif->band); + ret = -EINVAL; + goto out_free; + } + + + ret = wl1271_cmd_send(wl, CMD_REMAIN_ON_CHANNEL, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send ROC command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id) +{ + struct wl12xx_cmd_croc *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd croc (%d)", role_id); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + cmd->role_id = role_id; + + ret = wl1271_cmd_send(wl, CMD_CANCEL_REMAIN_ON_CHANNEL, cmd, + sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send ROC command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, + enum ieee80211_band band, u8 channel) +{ + int ret = 0; + + if (WARN_ON(test_bit(role_id, wl->roc_map))) + return 0; + + ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel); + if (ret < 0) + goto out; + + __set_bit(role_id, wl->roc_map); +out: + return ret; +} + +int wl12xx_croc(struct wl1271 *wl, u8 role_id) +{ + int ret = 0; + + if (WARN_ON(!test_bit(role_id, wl->roc_map))) + return 0; + + ret = wl12xx_cmd_croc(wl, role_id); + if (ret < 0) + goto out; + + __clear_bit(role_id, wl->roc_map); + + /* + * Rearm the tx watchdog when removing the last ROC. This prevents + * recoveries due to just finished ROCs - when Tx hasn't yet had + * a chance to get out. + */ + if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) + wl12xx_rearm_tx_watchdog_locked(wl); +out: + return ret; +} + +int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct wl12xx_cmd_stop_channel_switch *cmd; + int ret; + + wl1271_debug(DEBUG_ACX, "cmd stop channel switch"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + + ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to stop channel switch command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +/* start dev role and roc on its channel */ +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum ieee80211_band band, int channel) +{ + int ret; + + if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS))) + return -EINVAL; + + ret = wl12xx_cmd_role_enable(wl, + wl12xx_wlvif_to_vif(wlvif)->addr, + WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + + ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel); + if (ret < 0) + goto out_disable; + + ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel); + if (ret < 0) + goto out_stop; + + return 0; + +out_stop: + wl12xx_cmd_role_stop_dev(wl, wlvif); +out_disable: + wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); +out: + return ret; +} + +/* croc dev hlid, and stop the role */ +int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS))) + return -EINVAL; + + /* flush all pending packets */ + ret = wlcore_tx_work_locked(wl); + if (ret < 0) + goto out; + + if (test_bit(wlvif->dev_role_id, wl->roc_map)) { + ret = wl12xx_croc(wl, wlvif->dev_role_id); + if (ret < 0) + goto out; + } + + ret = wl12xx_cmd_role_stop_dev(wl, wlvif); + if (ret < 0) + goto out; + + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (ret < 0) + goto out; + +out: + return ret; +} diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h new file mode 100644 index 000000000..e14cd407a --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -0,0 +1,712 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __CMD_H__ +#define __CMD_H__ + +#include "wlcore.h" + +struct acx_header; + +int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len); +int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, + u8 *role_id); +int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); +int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum ieee80211_band band, int channel); +int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); +int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, + size_t cmd_len, size_t res_len); +int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); +int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf, + size_t len, unsigned long valid_rets); +int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); +int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 ps_mode, u16 auto_ps_timeout); +int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, + size_t len); +int wl1271_cmd_template_set(struct wl1271 *wl, u8 role_id, + u16 template_id, void *buf, size_t buf_len, + int index, u32 rates); +int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 aid); +int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 role_id, u8 band, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, const u8 *common_ie, + size_t common_ie_len, bool sched_scan); +struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct sk_buff *skb); +int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid); +int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, const u8 *addr, + u32 tx_seq_32, u16 tx_seq_16); +int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16); +int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid); +int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id, + enum ieee80211_band band, u8 channel); +int wl12xx_croc(struct wl1271 *wl, u8 role_id); +int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, u8 hlid); +int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid); +void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, + enum ieee80211_band band); +int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); +int wl12xx_cmd_config_fwlog(struct wl1271 *wl); +int wl12xx_cmd_start_fwlog(struct wl1271 *wl); +int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); +int wl12xx_cmd_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch); +int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, + struct wl12xx_vif *wlvif); +int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 *hlid); +void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); +int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, + u32 mask, bool *timeout); +u8 wlcore_get_native_channel_type(u8 nl_channel_type); + +enum wl1271_commands { + CMD_INTERROGATE = 1, /* use this to read information elements */ + CMD_CONFIGURE = 2, /* use this to write information elements */ + CMD_ENABLE_RX = 3, + CMD_ENABLE_TX = 4, + CMD_DISABLE_RX = 5, + CMD_DISABLE_TX = 6, + CMD_SCAN = 7, + CMD_STOP_SCAN = 8, + CMD_SET_KEYS = 9, + CMD_READ_MEMORY = 10, + CMD_WRITE_MEMORY = 11, + CMD_SET_TEMPLATE = 12, + CMD_TEST = 13, + CMD_NOISE_HIST = 14, + CMD_QUIET_ELEMENT_SET_STATE = 15, + CMD_SET_BCN_MODE = 16, + + CMD_MEASUREMENT = 17, + CMD_STOP_MEASUREMENT = 18, + CMD_SET_PS_MODE = 19, + CMD_CHANNEL_SWITCH = 20, + CMD_STOP_CHANNEL_SWICTH = 21, + CMD_AP_DISCOVERY = 22, + CMD_STOP_AP_DISCOVERY = 23, + CMD_HEALTH_CHECK = 24, + CMD_DEBUG = 25, + CMD_TRIGGER_SCAN_TO = 26, + CMD_CONNECTION_SCAN_CFG = 27, + CMD_CONNECTION_SCAN_SSID_CFG = 28, + CMD_START_PERIODIC_SCAN = 29, + CMD_STOP_PERIODIC_SCAN = 30, + CMD_SET_PEER_STATE = 31, + CMD_REMAIN_ON_CHANNEL = 32, + CMD_CANCEL_REMAIN_ON_CHANNEL = 33, + CMD_CONFIG_FWLOGGER = 34, + CMD_START_FWLOGGER = 35, + CMD_STOP_FWLOGGER = 36, + + /* Access point commands */ + CMD_ADD_PEER = 37, + CMD_REMOVE_PEER = 38, + + /* Role API */ + CMD_ROLE_ENABLE = 39, + CMD_ROLE_DISABLE = 40, + CMD_ROLE_START = 41, + CMD_ROLE_STOP = 42, + + /* DFS */ + CMD_START_RADAR_DETECTION = 43, + CMD_STOP_RADAR_DETECTION = 44, + + /* WIFI Direct */ + CMD_WFD_START_DISCOVERY = 45, + CMD_WFD_STOP_DISCOVERY = 46, + CMD_WFD_ATTRIBUTE_CONFIG = 47, + CMD_GENERIC_CFG = 48, + CMD_NOP = 49, + + /* start of 18xx specific commands */ + CMD_DFS_CHANNEL_CONFIG = 60, + CMD_SMART_CONFIG_START = 61, + CMD_SMART_CONFIG_STOP = 62, + CMD_SMART_CONFIG_SET_GROUP_KEY = 63, + + CMD_CAC_START = 64, + CMD_CAC_STOP = 65, + CMD_DFS_MASTER_RESTART = 66, + CMD_DFS_RADAR_DETECTION_DEBUG = 67, + + MAX_COMMAND_ID = 0xFFFF, +}; + +#define MAX_CMD_PARAMS 572 + +enum cmd_templ { + CMD_TEMPL_NULL_DATA = 0, + CMD_TEMPL_BEACON, + CMD_TEMPL_CFG_PROBE_REQ_2_4, + CMD_TEMPL_CFG_PROBE_REQ_5, + CMD_TEMPL_PROBE_RESPONSE, + CMD_TEMPL_QOS_NULL_DATA, + CMD_TEMPL_PS_POLL, + CMD_TEMPL_KLV, + CMD_TEMPL_DISCONNECT, + CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY, + CMD_TEMPL_APP_PROBE_REQ_5_LEGACY, + CMD_TEMPL_BAR, /* for firmware internal use only */ + CMD_TEMPL_CTS, /* + * For CTS-to-self (FastCTS) mechanism + * for BT/WLAN coexistence (SoftGemini). */ + CMD_TEMPL_AP_BEACON, + CMD_TEMPL_AP_PROBE_RESPONSE, + CMD_TEMPL_ARP_RSP, + CMD_TEMPL_DEAUTH_AP, + CMD_TEMPL_TEMPORARY, + CMD_TEMPL_LINK_MEASUREMENT_REPORT, + CMD_TEMPL_PROBE_REQ_2_4_PERIODIC, + CMD_TEMPL_PROBE_REQ_5_PERIODIC, + + CMD_TEMPL_MAX = 0xff +}; + +/* unit ms */ +#define WL1271_COMMAND_TIMEOUT 2000 +#define WL1271_CMD_TEMPL_DFLT_SIZE 252 +#define WL1271_CMD_TEMPL_MAX_SIZE 512 +#define WL1271_EVENT_TIMEOUT 5000 + +struct wl1271_cmd_header { + __le16 id; + __le16 status; + /* payload */ + u8 data[0]; +} __packed; + +#define WL1271_CMD_MAX_PARAMS 572 + +struct wl1271_command { + struct wl1271_cmd_header header; + u8 parameters[WL1271_CMD_MAX_PARAMS]; +} __packed; + +enum { + CMD_MAILBOX_IDLE = 0, + CMD_STATUS_SUCCESS = 1, + CMD_STATUS_UNKNOWN_CMD = 2, + CMD_STATUS_UNKNOWN_IE = 3, + CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11, + CMD_STATUS_RX_BUSY = 13, + CMD_STATUS_INVALID_PARAM = 14, + CMD_STATUS_TEMPLATE_TOO_LARGE = 15, + CMD_STATUS_OUT_OF_MEMORY = 16, + CMD_STATUS_STA_TABLE_FULL = 17, + CMD_STATUS_RADIO_ERROR = 18, + CMD_STATUS_WRONG_NESTING = 19, + CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ + CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ + CMD_STATUS_TEMPLATE_OOM = 23, + CMD_STATUS_NO_RX_BA_SESSION = 24, + + MAX_COMMAND_STATUS +}; + +#define CMDMBOX_HEADER_LEN 4 +#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 + +enum { + BSS_TYPE_IBSS = 0, + BSS_TYPE_STA_BSS = 2, + BSS_TYPE_AP_BSS = 3, + MAX_BSS_TYPE = 0xFF +}; + +#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ +#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 +#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10 + +struct wl12xx_cmd_role_enable { + struct wl1271_cmd_header header; + + u8 role_id; + u8 role_type; + u8 mac_address[ETH_ALEN]; +} __packed; + +struct wl12xx_cmd_role_disable { + struct wl1271_cmd_header header; + + u8 role_id; + u8 padding[3]; +} __packed; + +enum wlcore_band { + WLCORE_BAND_2_4GHZ = 0, + WLCORE_BAND_5GHZ = 1, + WLCORE_BAND_JAPAN_4_9_GHZ = 2, + WLCORE_BAND_DEFAULT = WLCORE_BAND_2_4GHZ, + WLCORE_BAND_INVALID = 0x7E, + WLCORE_BAND_MAX_RADIO = 0x7F, +}; + +enum wlcore_channel_type { + WLCORE_CHAN_NO_HT, + WLCORE_CHAN_HT20, + WLCORE_CHAN_HT40MINUS, + WLCORE_CHAN_HT40PLUS +}; + +struct wl12xx_cmd_role_start { + struct wl1271_cmd_header header; + + u8 role_id; + u8 band; + u8 channel; + + /* enum wlcore_channel_type */ + u8 channel_type; + + union { + struct { + u8 hlid; + u8 session; + u8 padding_1[54]; + } __packed device; + /* sta & p2p_cli use the same struct */ + struct { + u8 bssid[ETH_ALEN]; + u8 hlid; /* data hlid */ + u8 session; + __le32 remote_rates; /* remote supported rates */ + + /* + * The target uses this field to determine the rate at + * which to transmit control frame responses (such as + * ACK or CTS frames). + */ + __le32 basic_rate_set; + __le32 local_rates; /* local supported rates */ + + u8 ssid_type; + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + + __le16 beacon_interval; /* in TBTTs */ + } __packed sta; + struct { + u8 bssid[ETH_ALEN]; + u8 hlid; /* data hlid */ + u8 dtim_interval; + __le32 remote_rates; /* remote supported rates */ + + __le32 basic_rate_set; + __le32 local_rates; /* local supported rates */ + + u8 ssid_type; + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + + __le16 beacon_interval; /* in TBTTs */ + + u8 padding_1[4]; + } __packed ibss; + /* ap & p2p_go use the same struct */ + struct { + __le16 aging_period; /* in secs */ + u8 beacon_expiry; /* in ms */ + u8 bss_index; + /* The host link id for the AP's global queue */ + u8 global_hlid; + /* The host link id for the AP's broadcast queue */ + u8 broadcast_hlid; + + __le16 beacon_interval; /* in TBTTs */ + + __le32 basic_rate_set; + __le32 local_rates; /* local supported rates */ + + u8 dtim_interval; + + u8 ssid_type; + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + + u8 reset_tsf; + + /* + * ap supports wmm (note that there is additional + * per-sta wmm configuration) + */ + u8 wmm; + + u8 bcast_session_id; + u8 global_session_id; + u8 padding_1[1]; + } __packed ap; + }; +} __packed; + +struct wl12xx_cmd_role_stop { + struct wl1271_cmd_header header; + + u8 role_id; + u8 disc_type; /* only STA and P2P_CLI */ + __le16 reason; /* only STA and P2P_CLI */ +} __packed; + +struct cmd_enabledisable_path { + struct wl1271_cmd_header header; + + u8 channel; + u8 padding[3]; +} __packed; + +#define WL1271_RATE_AUTOMATIC 0 + +struct wl1271_cmd_template_set { + struct wl1271_cmd_header header; + + u8 role_id; + u8 template_type; + __le16 len; + u8 index; /* relevant only for KLV_TEMPLATE type */ + u8 padding[3]; + + __le32 enabled_rates; + u8 short_retry_limit; + u8 long_retry_limit; + u8 aflags; + u8 reserved; + + u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; +} __packed; + +#define TIM_ELE_ID 5 +#define PARTIAL_VBM_MAX 251 + +struct wl1271_tim { + u8 identity; + u8 length; + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ +} __packed; + +enum wl1271_cmd_ps_mode { + STATION_AUTO_PS_MODE, /* Dynamic Power Save */ + STATION_ACTIVE_MODE, + STATION_POWER_SAVE_MODE +}; + +struct wl1271_cmd_ps_params { + struct wl1271_cmd_header header; + + u8 role_id; + u8 ps_mode; /* STATION_* */ + u16 auto_ps_timeout; +} __packed; + +/* HW encryption keys */ +#define NUM_ACCESS_CATEGORIES_COPY 4 + +enum wl1271_cmd_key_action { + KEY_ADD_OR_REPLACE = 1, + KEY_REMOVE = 2, + KEY_SET_ID = 3, + MAX_KEY_ACTION = 0xffff, +}; + +enum wl1271_cmd_lid_key_type { + UNICAST_LID_TYPE = 0, + BROADCAST_LID_TYPE = 1, + WEP_DEFAULT_LID_TYPE = 2 +}; + +enum wl1271_cmd_key_type { + KEY_NONE = 0, + KEY_WEP = 1, + KEY_TKIP = 2, + KEY_AES = 3, + KEY_GEM = 4, +}; + +struct wl1271_cmd_set_keys { + struct wl1271_cmd_header header; + + /* + * Indicates whether the HLID is a unicast key set + * or broadcast key set. A special value 0xFF is + * used to indicate that the HLID is on WEP-default + * (multi-hlids). of type wl1271_cmd_lid_key_type. + */ + u8 hlid; + + /* + * In WEP-default network (hlid == 0xFF) used to + * indicate which network STA/IBSS/AP role should be + * changed + */ + u8 lid_key_type; + + /* + * Key ID - For TKIP and AES key types, this field + * indicates the value that should be inserted into + * the KeyID field of frames transmitted using this + * key entry. For broadcast keys the index use as a + * marker for TX/RX key. + * For WEP default network (HLID=0xFF), this field + * indicates the ID of the key to add or remove. + */ + u8 key_id; + u8 reserved_1; + + /* key_action_e */ + __le16 key_action; + + /* key size in bytes */ + u8 key_size; + + /* key_type_e */ + u8 key_type; + + /* This field holds the security key data to add to the STA table */ + u8 key[MAX_KEY_SIZE]; + __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; + __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; +} __packed; + +struct wl1271_cmd_test_header { + u8 id; + u8 padding[3]; +} __packed; + +enum wl1271_channel_tune_bands { + WL1271_CHANNEL_TUNE_BAND_2_4, + WL1271_CHANNEL_TUNE_BAND_5, + WL1271_CHANNEL_TUNE_BAND_4_9 +}; + +#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 + +/* + * There are three types of disconnections: + * + * DISCONNECT_IMMEDIATE: the fw doesn't send any frames + * DISCONNECT_DEAUTH: the fw generates a DEAUTH request with the reason + * we have passed + * DISCONNECT_DISASSOC: the fw generates a DESASSOC request with the reason + * we have passed + */ +enum wl1271_disconnect_type { + DISCONNECT_IMMEDIATE, + DISCONNECT_DEAUTH, + DISCONNECT_DISASSOC +}; + +#define WL1271_CMD_STA_STATE_CONNECTED 1 + +struct wl12xx_cmd_set_peer_state { + struct wl1271_cmd_header header; + + u8 hlid; + u8 state; + + /* + * wmm is relevant for sta role only. + * ap role configures the per-sta wmm params in + * the add_peer command. + */ + u8 wmm; + u8 padding[1]; +} __packed; + +struct wl12xx_cmd_roc { + struct wl1271_cmd_header header; + + u8 role_id; + u8 channel; + u8 band; + u8 padding; +}; + +struct wl12xx_cmd_croc { + struct wl1271_cmd_header header; + + u8 role_id; + u8 padding[3]; +}; + +enum wl12xx_ssid_type { + WL12XX_SSID_TYPE_PUBLIC = 0, + WL12XX_SSID_TYPE_HIDDEN = 1, + WL12XX_SSID_TYPE_ANY = 2, +}; + +enum wl1271_psd_type { + WL1271_PSD_LEGACY = 0, + WL1271_PSD_UPSD_TRIGGER = 1, + WL1271_PSD_LEGACY_PSPOLL = 2, + WL1271_PSD_SAPSD = 3 +}; + +struct wl12xx_cmd_add_peer { + struct wl1271_cmd_header header; + + u8 addr[ETH_ALEN]; + u8 hlid; + u8 aid; + u8 psd_type[NUM_ACCESS_CATEGORIES_COPY]; + __le32 supported_rates; + u8 bss_index; + u8 sp_len; + u8 wmm; + u8 session_id; + u8 role_id; + u8 padding[3]; +} __packed; + +struct wl12xx_cmd_remove_peer { + struct wl1271_cmd_header header; + + u8 hlid; + u8 reason_opcode; + u8 send_deauth_flag; + u8 role_id; +} __packed; + +/* + * Continuous mode - packets are transferred to the host periodically + * via the data path. + * On demand - Log messages are stored in a cyclic buffer in the + * firmware, and only transferred to the host when explicitly requested + */ +enum wl12xx_fwlogger_log_mode { + WL12XX_FWLOG_CONTINUOUS, + WL12XX_FWLOG_ON_DEMAND +}; + +/* Include/exclude timestamps from the log messages */ +enum wl12xx_fwlogger_timestamp { + WL12XX_FWLOG_TIMESTAMP_DISABLED, + WL12XX_FWLOG_TIMESTAMP_ENABLED +}; + +/* + * Logs can be routed to the debug pinouts (where available), to the host bus + * (SDIO/SPI), or dropped + */ +enum wl12xx_fwlogger_output { + WL12XX_FWLOG_OUTPUT_NONE, + WL12XX_FWLOG_OUTPUT_DBG_PINS, + WL12XX_FWLOG_OUTPUT_HOST, +}; + +struct wl12xx_cmd_regdomain_dfs_config { + struct wl1271_cmd_header header; + + __le32 ch_bit_map1; + __le32 ch_bit_map2; + u8 dfs_region; + u8 padding[3]; +} __packed; + +struct wl12xx_cmd_config_fwlog { + struct wl1271_cmd_header header; + + /* See enum wl12xx_fwlogger_log_mode */ + u8 logger_mode; + + /* Minimum log level threshold */ + u8 log_severity; + + /* Include/exclude timestamps from the log messages */ + u8 timestamp; + + /* See enum wl1271_fwlogger_output */ + u8 output; + + /* Regulates the frequency of log messages */ + u8 threshold; + + u8 padding[3]; +} __packed; + +struct wl12xx_cmd_start_fwlog { + struct wl1271_cmd_header header; +} __packed; + +struct wl12xx_cmd_stop_fwlog { + struct wl1271_cmd_header header; +} __packed; + +struct wl12xx_cmd_stop_channel_switch { + struct wl1271_cmd_header header; + + u8 role_id; + u8 padding[3]; +} __packed; + +/* Used to check radio status after calibration */ +#define MAX_TLV_LENGTH 500 +#define TEST_CMD_P2G_CAL 2 /* TX BiP */ + +struct wl1271_cmd_cal_p2g { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + __le32 ver; + __le16 len; + u8 buf[MAX_TLV_LENGTH]; + u8 type; + u8 padding; + + __le16 radio_status; + + u8 sub_band_mask; + u8 padding2; +} __packed; + +#endif /* __WL1271_CMD_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h new file mode 100644 index 000000000..166add00b --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -0,0 +1,1392 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __CONF_H__ +#define __CONF_H__ + +enum { + CONF_HW_BIT_RATE_1MBPS = BIT(0), + CONF_HW_BIT_RATE_2MBPS = BIT(1), + CONF_HW_BIT_RATE_5_5MBPS = BIT(2), + CONF_HW_BIT_RATE_6MBPS = BIT(3), + CONF_HW_BIT_RATE_9MBPS = BIT(4), + CONF_HW_BIT_RATE_11MBPS = BIT(5), + CONF_HW_BIT_RATE_12MBPS = BIT(6), + CONF_HW_BIT_RATE_18MBPS = BIT(7), + CONF_HW_BIT_RATE_22MBPS = BIT(8), + CONF_HW_BIT_RATE_24MBPS = BIT(9), + CONF_HW_BIT_RATE_36MBPS = BIT(10), + CONF_HW_BIT_RATE_48MBPS = BIT(11), + CONF_HW_BIT_RATE_54MBPS = BIT(12), + CONF_HW_BIT_RATE_MCS_0 = BIT(13), + CONF_HW_BIT_RATE_MCS_1 = BIT(14), + CONF_HW_BIT_RATE_MCS_2 = BIT(15), + CONF_HW_BIT_RATE_MCS_3 = BIT(16), + CONF_HW_BIT_RATE_MCS_4 = BIT(17), + CONF_HW_BIT_RATE_MCS_5 = BIT(18), + CONF_HW_BIT_RATE_MCS_6 = BIT(19), + CONF_HW_BIT_RATE_MCS_7 = BIT(20), + CONF_HW_BIT_RATE_MCS_8 = BIT(21), + CONF_HW_BIT_RATE_MCS_9 = BIT(22), + CONF_HW_BIT_RATE_MCS_10 = BIT(23), + CONF_HW_BIT_RATE_MCS_11 = BIT(24), + CONF_HW_BIT_RATE_MCS_12 = BIT(25), + CONF_HW_BIT_RATE_MCS_13 = BIT(26), + CONF_HW_BIT_RATE_MCS_14 = BIT(27), + CONF_HW_BIT_RATE_MCS_15 = BIT(28), +}; + +enum { + CONF_HW_RATE_INDEX_1MBPS = 0, + CONF_HW_RATE_INDEX_2MBPS = 1, + CONF_HW_RATE_INDEX_5_5MBPS = 2, + CONF_HW_RATE_INDEX_11MBPS = 3, + CONF_HW_RATE_INDEX_6MBPS = 4, + CONF_HW_RATE_INDEX_9MBPS = 5, + CONF_HW_RATE_INDEX_12MBPS = 6, + CONF_HW_RATE_INDEX_18MBPS = 7, + CONF_HW_RATE_INDEX_24MBPS = 8, + CONF_HW_RATE_INDEX_36MBPS = 9, + CONF_HW_RATE_INDEX_48MBPS = 10, + CONF_HW_RATE_INDEX_54MBPS = 11, + CONF_HW_RATE_INDEX_MCS0 = 12, + CONF_HW_RATE_INDEX_MCS1 = 13, + CONF_HW_RATE_INDEX_MCS2 = 14, + CONF_HW_RATE_INDEX_MCS3 = 15, + CONF_HW_RATE_INDEX_MCS4 = 16, + CONF_HW_RATE_INDEX_MCS5 = 17, + CONF_HW_RATE_INDEX_MCS6 = 18, + CONF_HW_RATE_INDEX_MCS7 = 19, + CONF_HW_RATE_INDEX_MCS7_SGI = 20, + CONF_HW_RATE_INDEX_MCS0_40MHZ = 21, + CONF_HW_RATE_INDEX_MCS1_40MHZ = 22, + CONF_HW_RATE_INDEX_MCS2_40MHZ = 23, + CONF_HW_RATE_INDEX_MCS3_40MHZ = 24, + CONF_HW_RATE_INDEX_MCS4_40MHZ = 25, + CONF_HW_RATE_INDEX_MCS5_40MHZ = 26, + CONF_HW_RATE_INDEX_MCS6_40MHZ = 27, + CONF_HW_RATE_INDEX_MCS7_40MHZ = 28, + CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI = 29, + + /* MCS8+ rates overlap with 40Mhz rates */ + CONF_HW_RATE_INDEX_MCS8 = 21, + CONF_HW_RATE_INDEX_MCS9 = 22, + CONF_HW_RATE_INDEX_MCS10 = 23, + CONF_HW_RATE_INDEX_MCS11 = 24, + CONF_HW_RATE_INDEX_MCS12 = 25, + CONF_HW_RATE_INDEX_MCS13 = 26, + CONF_HW_RATE_INDEX_MCS14 = 27, + CONF_HW_RATE_INDEX_MCS15 = 28, + CONF_HW_RATE_INDEX_MCS15_SGI = 29, + + CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_MCS7_40MHZ_SGI, +}; + +#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff + +enum { + CONF_SG_DISABLE = 0, + CONF_SG_PROTECTIVE, + CONF_SG_OPPORTUNISTIC +}; + +enum { + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT master basic rate + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_BT_MASTER_MIN_BR = 0, + CONF_SG_ACL_BT_MASTER_MAX_BR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT slave basic rate + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_BT_SLAVE_MIN_BR, + CONF_SG_ACL_BT_SLAVE_MAX_BR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT master EDR + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_BT_MASTER_MIN_EDR, + CONF_SG_ACL_BT_MASTER_MAX_EDR, + + /* + * Configure the min and max time BT gains the antenna + * in WLAN / BT slave EDR + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_BT_SLAVE_MIN_EDR, + CONF_SG_ACL_BT_SLAVE_MAX_EDR, + + /* + * The maximum time WLAN can gain the antenna + * in WLAN PSM / BT master/slave BR + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_WLAN_PS_MASTER_BR, + CONF_SG_ACL_WLAN_PS_SLAVE_BR, + + /* + * The maximum time WLAN can gain the antenna + * in WLAN PSM / BT master/slave EDR + * + * Range: 0 - 255 (ms) + */ + CONF_SG_ACL_WLAN_PS_MASTER_EDR, + CONF_SG_ACL_WLAN_PS_SLAVE_EDR, + + /* TODO: explain these values */ + CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR, + CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR, + CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR, + CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR, + CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR, + CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR, + CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR, + CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR, + + CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR, + CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR, + CONF_SG_ACL_PASSIVE_SCAN_BT_BR, + CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR, + CONF_SG_ACL_PASSIVE_SCAN_BT_EDR, + CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR, + + /* + * Compensation percentage of probe requests when scan initiated + * during BT voice/ACL link. + * + * Range: 0 - 255 (%) + */ + CONF_SG_AUTO_SCAN_PROBE_REQ, + + /* + * Compensation percentage of probe requests when active scan initiated + * during BT voice + * + * Range: 0 - 255 (%) + */ + CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3, + + /* + * Compensation percentage of WLAN active scan window if initiated + * during BT A2DP + * + * Range: 0 - 1000 (%) + */ + CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT A2DP BR + * + * Range: 0 - 1000 (%) + */ + CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT A2DP EDR + * + * Range: 0 - 1000 (%) + */ + CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR, + + /* + * Compensation percentage of WLAN passive scan window if initiated + * during BT voice + * + * Range: 0 - 1000 (%) + */ + CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3, + + /* TODO: explain these values */ + CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN, + CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN, + CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN, + + /* + * Defines whether the SG will force WLAN host to enter/exit PSM + * + * Range: 1 - SG can force, 0 - host handles PSM + */ + CONF_SG_STA_FORCE_PS_IN_BT_SCO, + + /* + * Defines antenna configuration (single/dual antenna) + * + * Range: 0 - single antenna, 1 - dual antenna + */ + CONF_SG_ANTENNA_CONFIGURATION, + + /* + * The threshold (percent) of max consecutive beacon misses before + * increasing priority of beacon reception. + * + * Range: 0 - 100 (%) + */ + CONF_SG_BEACON_MISS_PERCENT, + + /* + * Protection time of the DHCP procedure. + * + * Range: 0 - 100000 (ms) + */ + CONF_SG_DHCP_TIME, + + /* + * RX guard time before the beginning of a new BT voice frame during + * which no new WLAN trigger frame is transmitted. + * + * Range: 0 - 100000 (us) + */ + CONF_SG_RXT, + + /* + * TX guard time before the beginning of a new BT voice frame during + * which no new WLAN frame is transmitted. + * + * Range: 0 - 100000 (us) + */ + + CONF_SG_TXT, + + /* + * Enable adaptive RXT/TXT algorithm. If disabled, the host values + * will be utilized. + * + * Range: 0 - disable, 1 - enable + */ + CONF_SG_ADAPTIVE_RXT_TXT, + + /* TODO: explain this value */ + CONF_SG_GENERAL_USAGE_BIT_MAP, + + /* + * Number of consecutive BT voice frames not interrupted by WLAN + * + * Range: 0 - 100 + */ + CONF_SG_HV3_MAX_SERVED, + + /* + * The used WLAN legacy service period during active BT ACL link + * + * Range: 0 - 255 (ms) + */ + CONF_SG_PS_POLL_TIMEOUT, + + /* + * The used WLAN UPSD service period during active BT ACL link + * + * Range: 0 - 255 (ms) + */ + CONF_SG_UPSD_TIMEOUT, + + CONF_SG_CONSECUTIVE_CTS_THRESHOLD, + CONF_SG_STA_RX_WINDOW_AFTER_DTIM, + CONF_SG_STA_CONNECTION_PROTECTION_TIME, + + /* AP params */ + CONF_AP_BEACON_MISS_TX, + CONF_AP_RX_WINDOW_AFTER_BEACON, + CONF_AP_BEACON_WINDOW_INTERVAL, + CONF_AP_CONNECTION_PROTECTION_TIME, + CONF_AP_BT_ACL_VAL_BT_SERVE_TIME, + CONF_AP_BT_ACL_VAL_WL_SERVE_TIME, + + /* CTS Diluting params */ + CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH, + CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER, + + CONF_SG_TEMP_PARAM_1, + CONF_SG_TEMP_PARAM_2, + CONF_SG_TEMP_PARAM_3, + CONF_SG_TEMP_PARAM_4, + CONF_SG_TEMP_PARAM_5, + CONF_SG_TEMP_PARAM_6, + CONF_SG_TEMP_PARAM_7, + CONF_SG_TEMP_PARAM_8, + CONF_SG_TEMP_PARAM_9, + CONF_SG_TEMP_PARAM_10, + + CONF_SG_PARAMS_MAX, + CONF_SG_PARAMS_ALL = 0xff +}; + +struct conf_sg_settings { + u32 params[CONF_SG_PARAMS_MAX]; + u8 state; +} __packed; + +enum conf_rx_queue_type { + CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */ + CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */ +}; + +struct conf_rx_settings { + /* + * The maximum amount of time, in TU, before the + * firmware discards the MSDU. + * + * Range: 0 - 0xFFFFFFFF + */ + u32 rx_msdu_life_time; + + /* + * Packet detection threshold in the PHY. + * + * FIXME: details unknown. + */ + u32 packet_detection_threshold; + + /* + * The longest time the STA will wait to receive traffic from the AP + * after a PS-poll has been transmitted. + * + * Range: 0 - 200000 + */ + u16 ps_poll_timeout; + /* + * The longest time the STA will wait to receive traffic from the AP + * after a frame has been sent from an UPSD enabled queue. + * + * Range: 0 - 200000 + */ + u16 upsd_timeout; + + /* + * The number of octets in an MPDU, below which an RTS/CTS + * handshake is not performed. + * + * Range: 0 - 4096 + */ + u16 rts_threshold; + + /* + * The RX Clear Channel Assessment threshold in the PHY + * (the energy threshold). + * + * Range: ENABLE_ENERGY_D == 0x140A + * DISABLE_ENERGY_D == 0xFFEF + */ + u16 rx_cca_threshold; + + /* + * Occupied Rx mem-blocks number which requires interrupting the host + * (0 = no buffering, 0xffff = disabled). + * + * Range: u16 + */ + u16 irq_blk_threshold; + + /* + * Rx packets number which requires interrupting the host + * (0 = no buffering). + * + * Range: u16 + */ + u16 irq_pkt_threshold; + + /* + * Max time in msec the FW may delay RX-Complete interrupt. + * + * Range: 1 - 100 + */ + u16 irq_timeout; + + /* + * The RX queue type. + * + * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY, + */ + u8 queue_type; +} __packed; + +#define CONF_TX_MAX_RATE_CLASSES 10 + +#define CONF_TX_RATE_MASK_UNSPECIFIED 0 +#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS) +#define CONF_TX_RATE_RETRY_LIMIT 10 + +/* basic rates for p2p operations (probe req/resp, etc.) */ +#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS + +/* + * Rates supported for data packets when operating as STA/AP. Note the absence + * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop + * one. The rate dropped is not mandatory under any operating mode. + */ +#define CONF_TX_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ + CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \ + CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \ + CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \ + CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ + CONF_HW_BIT_RATE_54MBPS) + +#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ + CONF_HW_BIT_RATE_11MBPS) + +#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \ + CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \ + CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ + CONF_HW_BIT_RATE_54MBPS) + +#define CONF_TX_MCS_RATES (CONF_HW_BIT_RATE_MCS_0 | \ + CONF_HW_BIT_RATE_MCS_1 | CONF_HW_BIT_RATE_MCS_2 | \ + CONF_HW_BIT_RATE_MCS_3 | CONF_HW_BIT_RATE_MCS_4 | \ + CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \ + CONF_HW_BIT_RATE_MCS_7) + +#define CONF_TX_MIMO_RATES (CONF_HW_BIT_RATE_MCS_8 | \ + CONF_HW_BIT_RATE_MCS_9 | CONF_HW_BIT_RATE_MCS_10 | \ + CONF_HW_BIT_RATE_MCS_11 | CONF_HW_BIT_RATE_MCS_12 | \ + CONF_HW_BIT_RATE_MCS_13 | CONF_HW_BIT_RATE_MCS_14 | \ + CONF_HW_BIT_RATE_MCS_15) + +/* + * Default rates for management traffic when operating in AP mode. This + * should be configured according to the basic rate set of the AP + */ +#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS) + +/* default rates for working as IBSS (11b and OFDM) */ +#define CONF_TX_IBSS_DEFAULT_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ + CONF_HW_BIT_RATE_11MBPS | CONF_TX_OFDM_RATES); + +struct conf_tx_rate_class { + + /* + * The rates enabled for this rate class. + * + * Range: CONF_HW_BIT_RATE_* bit mask + */ + u32 enabled_rates; + + /* + * The dot11 short retry limit used for TX retries. + * + * Range: u8 + */ + u8 short_retry_limit; + + /* + * The dot11 long retry limit used for TX retries. + * + * Range: u8 + */ + u8 long_retry_limit; + + /* + * Flags controlling the attributes of TX transmission. + * + * Range: bit 0: Truncate - when set, FW attempts to send a frame stop + * when the total valid per-rate attempts have + * been exhausted; otherwise transmissions + * will continue at the lowest available rate + * until the appropriate one of the + * short_retry_limit, long_retry_limit, + * dot11_max_transmit_msdu_life_time, or + * max_tx_life_time, is exhausted. + * 1: Preamble Override - indicates if the preamble type + * should be used in TX. + * 2: Preamble Type - the type of the preamble to be used by + * the policy (0 - long preamble, 1 - short preamble. + */ + u8 aflags; +} __packed; + +#define CONF_TX_MAX_AC_COUNT 4 + +/* Slot number setting to start transmission at PIFS interval */ +#define CONF_TX_AIFS_PIFS 1 +/* Slot number setting to start transmission at DIFS interval normal + * DCF access */ +#define CONF_TX_AIFS_DIFS 2 + + +enum conf_tx_ac { + CONF_TX_AC_BE = 0, /* best effort / legacy */ + CONF_TX_AC_BK = 1, /* background */ + CONF_TX_AC_VI = 2, /* video */ + CONF_TX_AC_VO = 3, /* voice */ + CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */ + CONF_TX_AC_ANY_TID = 0xff +}; + +struct conf_tx_ac_category { + /* + * The AC class identifier. + * + * Range: enum conf_tx_ac + */ + u8 ac; + + /* + * The contention window minimum size (in slots) for the access + * class. + * + * Range: u8 + */ + u8 cw_min; + + /* + * The contention window maximum size (in slots) for the access + * class. + * + * Range: u8 + */ + u16 cw_max; + + /* + * The AIF value (in slots) for the access class. + * + * Range: u8 + */ + u8 aifsn; + + /* + * The TX Op Limit (in microseconds) for the access class. + * + * Range: u16 + */ + u16 tx_op_limit; +} __packed; + +#define CONF_TX_MAX_TID_COUNT 8 + +/* Allow TX BA on all TIDs but 6,7. These are currently reserved in the FW */ +#define CONF_TX_BA_ENABLED_TID_BITMAP 0x3F + +enum { + CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/ + CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/ + CONF_CHANNEL_TYPE_HCCA = 2, /* HCCA*/ +}; + +enum { + CONF_PS_SCHEME_LEGACY = 0, + CONF_PS_SCHEME_UPSD_TRIGGER = 1, + CONF_PS_SCHEME_LEGACY_PSPOLL = 2, + CONF_PS_SCHEME_SAPSD = 3, +}; + +enum { + CONF_ACK_POLICY_LEGACY = 0, + CONF_ACK_POLICY_NO_ACK = 1, + CONF_ACK_POLICY_BLOCK = 2, +}; + + +struct conf_tx_tid { + u8 queue_id; + u8 channel_type; + u8 tsid; + u8 ps_scheme; + u8 ack_policy; + u32 apsd_conf[2]; +} __packed; + +struct conf_tx_settings { + /* + * The TX ED value for TELEC Enable/Disable. + * + * Range: 0, 1 + */ + u8 tx_energy_detection; + + /* + * Configuration for rate classes for TX (currently only one + * rate class supported). Used in non-AP mode. + */ + struct conf_tx_rate_class sta_rc_conf; + + /* + * Configuration for access categories for TX rate control. + */ + u8 ac_conf_count; + struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; + + /* + * AP-mode - allow this number of TX retries to a station before an + * event is triggered from FW. + * In AP-mode the hlids of unreachable stations are given in the + * "sta_tx_retry_exceeded" member in the event mailbox. + */ + u8 max_tx_retries; + + /* + * AP-mode - after this number of seconds a connected station is + * considered inactive. + */ + u16 ap_aging_period; + + /* + * Configuration for TID parameters. + */ + u8 tid_conf_count; + struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT]; + + /* + * The TX fragmentation threshold. + * + * Range: u16 + */ + u16 frag_threshold; + + /* + * Max time in msec the FW may delay frame TX-Complete interrupt. + * + * Range: u16 + */ + u16 tx_compl_timeout; + + /* + * Completed TX packet count which requires to issue the TX-Complete + * interrupt. + * + * Range: u16 + */ + u16 tx_compl_threshold; + + /* + * The rate used for control messages and scanning on the 2.4GHz band + * + * Range: CONF_HW_BIT_RATE_* bit mask + */ + u32 basic_rate; + + /* + * The rate used for control messages and scanning on the 5GHz band + * + * Range: CONF_HW_BIT_RATE_* bit mask + */ + u32 basic_rate_5; + + /* + * TX retry limits for templates + */ + u8 tmpl_short_retry_limit; + u8 tmpl_long_retry_limit; + + /* Time in ms for Tx watchdog timer to expire */ + u32 tx_watchdog_timeout; + + /* + * when a slow link has this much packets pending, it becomes a low + * priority link, scheduling-wise + */ + u8 slow_link_thold; + + /* + * when a fast link has this much packets pending, it becomes a low + * priority link, scheduling-wise + */ + u8 fast_link_thold; +} __packed; + +enum { + CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/ + CONF_WAKE_UP_EVENT_DTIM = 0x02, /* Wake on every DTIM*/ + CONF_WAKE_UP_EVENT_N_DTIM = 0x04, /* Wake every Nth DTIM */ + CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */ + CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F +}; + +#define CONF_MAX_BCN_FILT_IE_COUNT 32 + +#define CONF_BCN_RULE_PASS_ON_CHANGE BIT(0) +#define CONF_BCN_RULE_PASS_ON_APPEARANCE BIT(1) + +#define CONF_BCN_IE_OUI_LEN 3 +#define CONF_BCN_IE_VER_LEN 2 + +struct conf_bcn_filt_rule { + /* + * IE number to which to associate a rule. + * + * Range: u8 + */ + u8 ie; + + /* + * Rule to associate with the specific ie. + * + * Range: CONF_BCN_RULE_PASS_ON_* + */ + u8 rule; + + /* + * OUI for the vendor specifie IE (221) + */ + u8 oui[CONF_BCN_IE_OUI_LEN]; + + /* + * Type for the vendor specifie IE (221) + */ + u8 type; + + /* + * Version for the vendor specifie IE (221) + */ + u8 version[CONF_BCN_IE_VER_LEN]; +} __packed; + +#define CONF_MAX_RSSI_SNR_TRIGGERS 8 + +enum { + CONF_TRIG_METRIC_RSSI_BEACON = 0, + CONF_TRIG_METRIC_RSSI_DATA, + CONF_TRIG_METRIC_SNR_BEACON, + CONF_TRIG_METRIC_SNR_DATA +}; + +enum { + CONF_TRIG_EVENT_TYPE_LEVEL = 0, + CONF_TRIG_EVENT_TYPE_EDGE +}; + +enum { + CONF_TRIG_EVENT_DIR_LOW = 0, + CONF_TRIG_EVENT_DIR_HIGH, + CONF_TRIG_EVENT_DIR_BIDIR +}; + +struct conf_sig_weights { + + /* + * RSSI from beacons average weight. + * + * Range: u8 + */ + u8 rssi_bcn_avg_weight; + + /* + * RSSI from data average weight. + * + * Range: u8 + */ + u8 rssi_pkt_avg_weight; + + /* + * SNR from beacons average weight. + * + * Range: u8 + */ + u8 snr_bcn_avg_weight; + + /* + * SNR from data average weight. + * + * Range: u8 + */ + u8 snr_pkt_avg_weight; +} __packed; + +enum conf_bcn_filt_mode { + CONF_BCN_FILT_MODE_DISABLED = 0, + CONF_BCN_FILT_MODE_ENABLED = 1 +}; + +enum conf_bet_mode { + CONF_BET_MODE_DISABLE = 0, + CONF_BET_MODE_ENABLE = 1, +}; + +struct conf_conn_settings { + /* + * Firmware wakeup conditions configuration. The host may set only + * one bit. + * + * Range: CONF_WAKE_UP_EVENT_* + */ + u8 wake_up_event; + + /* + * Listen interval for beacons or Dtims. + * + * Range: 0 for beacon and Dtim wakeup + * 1-10 for x Dtims + * 1-255 for x beacons + */ + u8 listen_interval; + + /* + * Firmware wakeup conditions during suspend + * Range: CONF_WAKE_UP_EVENT_* + */ + u8 suspend_wake_up_event; + + /* + * Listen interval during suspend. + * Currently will be in DTIMs (1-10) + * + */ + u8 suspend_listen_interval; + + /* + * Enable or disable the beacon filtering. + * + * Range: CONF_BCN_FILT_MODE_* + */ + u8 bcn_filt_mode; + + /* + * Configure Beacon filter pass-thru rules. + */ + u8 bcn_filt_ie_count; + struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT]; + + /* + * The number of consecutive beacons to lose, before the firmware + * becomes out of synch. + * + * Range: u32 + */ + u32 synch_fail_thold; + + /* + * After out-of-synch, the number of TU's to wait without a further + * received beacon (or probe response) before issuing the BSS_EVENT_LOSE + * event. + * + * Range: u32 + */ + u32 bss_lose_timeout; + + /* + * Beacon receive timeout. + * + * Range: u32 + */ + u32 beacon_rx_timeout; + + /* + * Broadcast receive timeout. + * + * Range: u32 + */ + u32 broadcast_timeout; + + /* + * Enable/disable reception of broadcast packets in power save mode + * + * Range: 1 - enable, 0 - disable + */ + u8 rx_broadcast_in_ps; + + /* + * Consecutive PS Poll failures before sending event to driver + * + * Range: u8 + */ + u8 ps_poll_threshold; + + /* + * Configuration of signal average weights. + */ + struct conf_sig_weights sig_weights; + + /* + * Specifies if beacon early termination procedure is enabled or + * disabled. + * + * Range: CONF_BET_MODE_* + */ + u8 bet_enable; + + /* + * Specifies the maximum number of consecutive beacons that may be + * early terminated. After this number is reached at least one full + * beacon must be correctly received in FW before beacon ET + * resumes. + * + * Range 0 - 255 + */ + u8 bet_max_consecutive; + + /* + * Specifies the maximum number of times to try PSM entry if it fails + * (if sending the appropriate null-func message fails.) + * + * Range 0 - 255 + */ + u8 psm_entry_retries; + + /* + * Specifies the maximum number of times to try PSM exit if it fails + * (if sending the appropriate null-func message fails.) + * + * Range 0 - 255 + */ + u8 psm_exit_retries; + + /* + * Specifies the maximum number of times to try transmit the PSM entry + * null-func frame for each PSM entry attempt + * + * Range 0 - 255 + */ + u8 psm_entry_nullfunc_retries; + + /* + * Specifies the dynamic PS timeout in ms that will be used + * by the FW when in AUTO_PS mode + */ + u16 dynamic_ps_timeout; + + /* + * Specifies whether dynamic PS should be disabled and PSM forced. + * This is required for certain WiFi certification tests. + */ + u8 forced_ps; + + /* + * + * Specifies the interval of the connection keep-alive null-func + * frame in ms. + * + * Range: 1000 - 3600000 + */ + u32 keep_alive_interval; + + /* + * Maximum listen interval supported by the driver in units of beacons. + * + * Range: u16 + */ + u8 max_listen_interval; + + /* + * Default sleep authorization for a new STA interface. This determines + * whether we can go to ELP. + */ + u8 sta_sleep_auth; + + /* + * Default RX BA Activity filter configuration + */ + u8 suspend_rx_ba_activity; +} __packed; + +enum { + CONF_REF_CLK_19_2_E, + CONF_REF_CLK_26_E, + CONF_REF_CLK_38_4_E, + CONF_REF_CLK_52_E, + CONF_REF_CLK_38_4_M_XTAL, + CONF_REF_CLK_26_M_XTAL, +}; + +enum single_dual_band_enum { + CONF_SINGLE_BAND, + CONF_DUAL_BAND +}; + +#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15 +#define CONF_NUMBER_OF_SUB_BANDS_5 7 +#define CONF_NUMBER_OF_RATE_GROUPS 6 +#define CONF_NUMBER_OF_CHANNELS_2_4 14 +#define CONF_NUMBER_OF_CHANNELS_5 35 + +struct conf_itrim_settings { + /* enable dco itrim */ + u8 enable; + + /* moderation timeout in microsecs from the last TX */ + u32 timeout; +} __packed; + +enum conf_fast_wakeup { + CONF_FAST_WAKEUP_ENABLE, + CONF_FAST_WAKEUP_DISABLE, +}; + +struct conf_pm_config_settings { + /* + * Host clock settling time + * + * Range: 0 - 30000 us + */ + u32 host_clk_settling_time; + + /* + * Host fast wakeup support + * + * Range: enum conf_fast_wakeup + */ + u8 host_fast_wakeup_support; +} __packed; + +struct conf_roam_trigger_settings { + /* + * The minimum interval between two trigger events. + * + * Range: 0 - 60000 ms + */ + u16 trigger_pacing; + + /* + * The weight for rssi/beacon average calculation + * + * Range: 0 - 255 + */ + u8 avg_weight_rssi_beacon; + + /* + * The weight for rssi/data frame average calculation + * + * Range: 0 - 255 + */ + u8 avg_weight_rssi_data; + + /* + * The weight for snr/beacon average calculation + * + * Range: 0 - 255 + */ + u8 avg_weight_snr_beacon; + + /* + * The weight for snr/data frame average calculation + * + * Range: 0 - 255 + */ + u8 avg_weight_snr_data; +} __packed; + +struct conf_scan_settings { + /* + * The minimum time to wait on each channel for active scans + * This value will be used whenever there's a connected interface. + * + * Range: u32 tu/1000 + */ + u32 min_dwell_time_active; + + /* + * The maximum time to wait on each channel for active scans + * This value will be currently used whenever there's a + * connected interface. It shouldn't exceed 30000 (~30ms) to avoid + * possible interference of voip traffic going on while scanning. + * + * Range: u32 tu/1000 + */ + u32 max_dwell_time_active; + + /* The minimum time to wait on each channel for active scans + * when it's possible to have longer scan dwell times. + * Currently this is used whenever we're idle on all interfaces. + * Longer dwell times improve detection of networks within a + * single scan. + * + * Range: u32 tu/1000 + */ + u32 min_dwell_time_active_long; + + /* The maximum time to wait on each channel for active scans + * when it's possible to have longer scan dwell times. + * See min_dwell_time_active_long + * + * Range: u32 tu/1000 + */ + u32 max_dwell_time_active_long; + + /* time to wait on the channel for passive scans (in TU/1000) */ + u32 dwell_time_passive; + + /* time to wait on the channel for DFS scans (in TU/1000) */ + u32 dwell_time_dfs; + + /* + * Number of probe requests to transmit on each active scan channel + * + * Range: u8 + */ + u16 num_probe_reqs; + + /* + * Scan trigger (split scan) timeout. The FW will split the scan + * operation into slices of the given time and allow the FW to schedule + * other tasks in between. + * + * Range: u32 Microsecs + */ + u32 split_scan_timeout; +} __packed; + +struct conf_sched_scan_settings { + /* + * The base time to wait on the channel for active scans (in TU/1000). + * The minimum dwell time is calculated according to this: + * min_dwell_time = base + num_of_probes_to_be_sent * delta_per_probe + * The maximum dwell time is calculated according to this: + * max_dwell_time = min_dwell_time + max_dwell_time_delta + */ + u32 base_dwell_time; + + /* The delta between the min dwell time and max dwell time for + * active scans (in TU/1000s). The max dwell time is used by the FW once + * traffic is detected on the channel. + */ + u32 max_dwell_time_delta; + + /* Delta added to min dwell time per each probe in 2.4 GHz (TU/1000) */ + u32 dwell_time_delta_per_probe; + + /* Delta added to min dwell time per each probe in 5 GHz (TU/1000) */ + u32 dwell_time_delta_per_probe_5; + + /* time to wait on the channel for passive scans (in TU/1000) */ + u32 dwell_time_passive; + + /* time to wait on the channel for DFS scans (in TU/1000) */ + u32 dwell_time_dfs; + + /* number of probe requests to send on each channel in active scans */ + u8 num_probe_reqs; + + /* RSSI threshold to be used for filtering */ + s8 rssi_threshold; + + /* SNR threshold to be used for filtering */ + s8 snr_threshold; +} __packed; + +struct conf_ht_setting { + u8 rx_ba_win_size; + u8 tx_ba_win_size; + u16 inactivity_timeout; + + /* bitmap of enabled TIDs for TX BA sessions */ + u8 tx_ba_tid_bitmap; +} __packed; + +struct conf_memory_settings { + /* Number of stations supported in IBSS mode */ + u8 num_stations; + + /* Number of ssid profiles used in IBSS mode */ + u8 ssid_profiles; + + /* Number of memory buffers allocated to rx pool */ + u8 rx_block_num; + + /* Minimum number of blocks allocated to tx pool */ + u8 tx_min_block_num; + + /* Disable/Enable dynamic memory */ + u8 dynamic_memory; + + /* + * Minimum required free tx memory blocks in order to assure optimum + * performance + * + * Range: 0-120 + */ + u8 min_req_tx_blocks; + + /* + * Minimum required free rx memory blocks in order to assure optimum + * performance + * + * Range: 0-120 + */ + u8 min_req_rx_blocks; + + /* + * Minimum number of mem blocks (free+used) guaranteed for TX + * + * Range: 0-120 + */ + u8 tx_min; +} __packed; + +struct conf_fm_coex { + u8 enable; + u8 swallow_period; + u8 n_divider_fref_set_1; + u8 n_divider_fref_set_2; + u16 m_divider_fref_set_1; + u16 m_divider_fref_set_2; + u32 coex_pll_stabilization_time; + u16 ldo_stabilization_time; + u8 fm_disturbed_band_margin; + u8 swallow_clk_diff; +} __packed; + +struct conf_rx_streaming_settings { + /* + * RX Streaming duration (in msec) from last tx/rx + * + * Range: u32 + */ + u32 duration; + + /* + * Bitmap of tids to be polled during RX streaming. + * (Note: it doesn't look like it really matters) + * + * Range: 0x1-0xff + */ + u8 queues; + + /* + * RX Streaming interval. + * (Note:this value is also used as the rx streaming timeout) + * Range: 0 (disabled), 10 - 100 + */ + u8 interval; + + /* + * enable rx streaming also when there is no coex activity + */ + u8 always; +} __packed; + +#define CONF_FWLOG_MIN_MEM_BLOCKS 2 +#define CONF_FWLOG_MAX_MEM_BLOCKS 16 + +struct conf_fwlog { + /* Continuous or on-demand */ + u8 mode; + + /* + * Number of memory blocks dedicated for the FW logger + * + * Range: 2-16, or 0 to disable the FW logger + */ + u8 mem_blocks; + + /* Minimum log level threshold */ + u8 severity; + + /* Include/exclude timestamps from the log messages */ + u8 timestamp; + + /* See enum wl1271_fwlogger_output */ + u8 output; + + /* Regulates the frequency of log messages */ + u8 threshold; +} __packed; + +#define ACX_RATE_MGMT_NUM_OF_RATES 13 +struct conf_rate_policy_settings { + u16 rate_retry_score; + u16 per_add; + u16 per_th1; + u16 per_th2; + u16 max_per; + u8 inverse_curiosity_factor; + u8 tx_fail_low_th; + u8 tx_fail_high_th; + u8 per_alpha_shift; + u8 per_add_shift; + u8 per_beta1_shift; + u8 per_beta2_shift; + u8 rate_check_up; + u8 rate_check_down; + u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES]; +} __packed; + +struct conf_hangover_settings { + u32 recover_time; + u8 hangover_period; + u8 dynamic_mode; + u8 early_termination_mode; + u8 max_period; + u8 min_period; + u8 increase_delta; + u8 decrease_delta; + u8 quiet_time; + u8 increase_time; + u8 window_size; +} __packed; + +struct conf_recovery_settings { + /* BUG() on fw recovery */ + u8 bug_on_recovery; + + /* Prevent HW recovery. FW will remain stuck. */ + u8 no_recovery; +} __packed; + +/* + * The conf version consists of 4 bytes. The two MSB are the wlcore + * version, the two LSB are the lower driver's private conf + * version. + */ +#define WLCORE_CONF_VERSION (0x0006 << 16) +#define WLCORE_CONF_MASK 0xffff0000 +#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \ + sizeof(struct wlcore_conf)) + +struct wlcore_conf_header { + __le32 magic; + __le32 version; + __le32 checksum; +} __packed; + +struct wlcore_conf { + struct conf_sg_settings sg; + struct conf_rx_settings rx; + struct conf_tx_settings tx; + struct conf_conn_settings conn; + struct conf_itrim_settings itrim; + struct conf_pm_config_settings pm_config; + struct conf_roam_trigger_settings roam_trigger; + struct conf_scan_settings scan; + struct conf_sched_scan_settings sched_scan; + struct conf_ht_setting ht; + struct conf_memory_settings mem; + struct conf_fm_coex fm_coex; + struct conf_rx_streaming_settings rx_streaming; + struct conf_fwlog fwlog; + struct conf_rate_policy_settings rate; + struct conf_hangover_settings hangover; + struct conf_recovery_settings recovery; +} __packed; + +struct wlcore_conf_file { + struct wlcore_conf_header header; + struct wlcore_conf core; + u8 priv[0]; +} __packed; + +#endif diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h new file mode 100644 index 000000000..27bfb7c11 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/debug.h @@ -0,0 +1,112 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2011 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __DEBUG_H__ +#define __DEBUG_H__ + +#include +#include + +#define DRIVER_NAME "wlcore" +#define DRIVER_PREFIX DRIVER_NAME ": " + +enum { + DEBUG_NONE = 0, + DEBUG_IRQ = BIT(0), + DEBUG_SPI = BIT(1), + DEBUG_BOOT = BIT(2), + DEBUG_MAILBOX = BIT(3), + DEBUG_TESTMODE = BIT(4), + DEBUG_EVENT = BIT(5), + DEBUG_TX = BIT(6), + DEBUG_RX = BIT(7), + DEBUG_SCAN = BIT(8), + DEBUG_CRYPT = BIT(9), + DEBUG_PSM = BIT(10), + DEBUG_MAC80211 = BIT(11), + DEBUG_CMD = BIT(12), + DEBUG_ACX = BIT(13), + DEBUG_SDIO = BIT(14), + DEBUG_FILTERS = BIT(15), + DEBUG_ADHOC = BIT(16), + DEBUG_AP = BIT(17), + DEBUG_PROBE = BIT(18), + DEBUG_IO = BIT(19), + DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), + DEBUG_ALL = ~0, +}; + +extern u32 wl12xx_debug_level; + +#define DEBUG_DUMP_LIMIT 1024 + +#define wl1271_error(fmt, arg...) \ + pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) + +#define wl1271_warning(fmt, arg...) \ + pr_warn(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + +#define wl1271_notice(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_info(fmt, arg...) \ + pr_info(DRIVER_PREFIX fmt "\n", ##arg) + +/* define the debug macro differently if dynamic debug is supported */ +#if defined(CONFIG_DYNAMIC_DEBUG) +#define wl1271_debug(level, fmt, arg...) \ + do { \ + if (unlikely(level & wl12xx_debug_level)) \ + dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \ + } while (0) +#else +#define wl1271_debug(level, fmt, arg...) \ + do { \ + if (unlikely(level & wl12xx_debug_level)) \ + printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \ + ##arg); \ + } while (0) +#endif + +#define wl1271_dump(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump_debug(DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + 0); \ + } while (0) + +#define wl1271_dump_ascii(level, prefix, buf, len) \ + do { \ + if (level & wl12xx_debug_level) \ + print_hex_dump_debug(DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + true); \ + } while (0) + +#endif /* __DEBUG_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c new file mode 100644 index 000000000..eb43f94a1 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -0,0 +1,1339 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "debugfs.h" + +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "acx.h" +#include "ps.h" +#include "io.h" +#include "tx.h" +#include "hw_ops.h" + +/* ms */ +#define WL1271_DEBUGFS_STATS_LIFETIME 1000 + +#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE)) + +/* debugfs macros idea from mac80211 */ +int wl1271_format_buffer(char __user *userbuf, size_t count, + loff_t *ppos, char *fmt, ...) +{ + va_list args; + char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; + int res; + + va_start(args, fmt); + res = vscnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} +EXPORT_SYMBOL_GPL(wl1271_format_buffer); + +void wl1271_debugfs_update_stats(struct wl1271 *wl) +{ + int ret; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (!wl->plt && + time_after(jiffies, wl->stats.fw_stats_update + + msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) { + wl1271_acx_statistics(wl, wl->stats.fw_stats); + wl->stats.fw_stats_update = jiffies; + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} +EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats); + +DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count); +DEBUGFS_READONLY_FILE(excessive_retries, "%u", + wl->stats.excessive_retries); + +static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + u32 queue_len; + char buf[20]; + int res; + + queue_len = wl1271_tx_total_queue_count(wl); + + res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +static const struct file_operations tx_queue_len_ops = { + .read = tx_queue_len_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static void chip_op_handler(struct wl1271 *wl, unsigned long value, + void *arg) +{ + int ret; + int (*chip_op) (struct wl1271 *wl); + + if (!arg) { + wl1271_warning("debugfs chip_op_handler with no callback"); + return; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + return; + + chip_op = arg; + chip_op(wl); + + wl1271_ps_elp_sleep(wl); +} + + +static inline void no_write_handler(struct wl1271 *wl, + unsigned long value, + unsigned long param) +{ +} + +#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \ + min_val, max_val, write_handler_locked, \ + write_handler_arg) \ + static ssize_t param##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ + { \ + struct wl1271 *wl = file->private_data; \ + return wl1271_format_buffer(user_buf, count, \ + ppos, "%d\n", \ + wl->conf.conf_sub_struct.param); \ + } \ + \ + static ssize_t param##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ + { \ + struct wl1271 *wl = file->private_data; \ + unsigned long value; \ + int ret; \ + \ + ret = kstrtoul_from_user(user_buf, count, 10, &value); \ + if (ret < 0) { \ + wl1271_warning("illegal value for " #param); \ + return -EINVAL; \ + } \ + \ + if (value < min_val || value > max_val) { \ + wl1271_warning(#param " is not in valid range"); \ + return -ERANGE; \ + } \ + \ + mutex_lock(&wl->mutex); \ + wl->conf.conf_sub_struct.param = value; \ + \ + write_handler_locked(wl, value, write_handler_arg); \ + \ + mutex_unlock(&wl->mutex); \ + return count; \ + } \ + \ + static const struct file_operations param##_ops = { \ + .read = param##_read, \ + .write = param##_write, \ + .open = simple_open, \ + .llseek = default_llseek, \ + }; + +WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535, + chip_op_handler, wl1271_acx_init_rx_interrupt) +WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535, + chip_op_handler, wl1271_acx_init_rx_interrupt) +WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100, + chip_op_handler, wl1271_acx_init_rx_interrupt) + +static ssize_t gpio_power_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + + int res; + char buf[10]; + + res = scnprintf(buf, sizeof(buf), "%d\n", state); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static ssize_t gpio_power_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in gpio_power"); + return -EINVAL; + } + + mutex_lock(&wl->mutex); + + if (value) + wl1271_power_on(wl); + else + wl1271_power_off(wl); + + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations gpio_power_ops = { + .read = gpio_power_read, + .write = gpio_power_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t start_recovery_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + mutex_lock(&wl->mutex); + wl12xx_queue_recovery_work(wl); + mutex_unlock(&wl->mutex); + + return count; +} + +static const struct file_operations start_recovery_ops = { + .write = start_recovery_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dynamic_ps_timeout_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(user_buf, count, + ppos, "%d\n", + wl->conf.conn.dynamic_ps_timeout); +} + +static ssize_t dynamic_ps_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in dynamic_ps"); + return -EINVAL; + } + + if (value < 1 || value > 65535) { + wl1271_warning("dyanmic_ps_timeout is not in valid range"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.conn.dynamic_ps_timeout = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* In case we're already in PSM, trigger it again to set new timeout + * immediately without waiting for re-association + */ + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) + wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations dynamic_ps_timeout_ops = { + .read = dynamic_ps_timeout_read, + .write = dynamic_ps_timeout_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t forced_ps_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(user_buf, count, + ppos, "%d\n", + wl->conf.conn.forced_ps); +} + +static ssize_t forced_ps_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret, ps_mode; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in forced_ps"); + return -EINVAL; + } + + if (value != 1 && value != 0) { + wl1271_warning("forced_ps should be either 0 or 1"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + if (wl->conf.conn.forced_ps == value) + goto out; + + wl->conf.conn.forced_ps = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* In case we're already in PSM, trigger it again to switch mode + * immediately without waiting for re-association + */ + + ps_mode = value ? STATION_POWER_SAVE_MODE : STATION_AUTO_PS_MODE; + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) + wl1271_ps_set_mode(wl, wlvif, ps_mode); + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations forced_ps_ops = { + .read = forced_ps_read, + .write = forced_ps_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t split_scan_timeout_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(user_buf, count, + ppos, "%d\n", + wl->conf.scan.split_scan_timeout / 1000); +} + +static ssize_t split_scan_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in split_scan_timeout"); + return -EINVAL; + } + + if (value == 0) + wl1271_info("split scan will be disabled"); + + mutex_lock(&wl->mutex); + + wl->conf.scan.split_scan_timeout = value * 1000; + + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations split_scan_timeout_ops = { + .read = split_scan_timeout_read, + .write = split_scan_timeout_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t driver_state_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + int res = 0; + ssize_t ret; + char *buf; + struct wl12xx_vif *wlvif; + +#define DRIVER_STATE_BUF_LEN 1024 + + buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&wl->mutex); + +#define DRIVER_STATE_PRINT(x, fmt) \ + (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\ + #x " = " fmt "\n", wl->x)) + +#define DRIVER_STATE_PRINT_GENERIC(x, fmt, args...) \ + (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\ + #x " = " fmt "\n", args)) + +#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld") +#define DRIVER_STATE_PRINT_INT(x) DRIVER_STATE_PRINT(x, "%d") +#define DRIVER_STATE_PRINT_STR(x) DRIVER_STATE_PRINT(x, "%s") +#define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx") +#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x") + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + continue; + + DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel, + wlvif->p2p ? "P2P-CL" : "STA"); + } + + wl12xx_for_each_wlvif_ap(wl, wlvif) + DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel, + wlvif->p2p ? "P2P-GO" : "AP"); + + DRIVER_STATE_PRINT_INT(tx_blocks_available); + DRIVER_STATE_PRINT_INT(tx_allocated_blocks); + DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]); + DRIVER_STATE_PRINT_INT(tx_allocated_pkts[1]); + DRIVER_STATE_PRINT_INT(tx_allocated_pkts[2]); + DRIVER_STATE_PRINT_INT(tx_allocated_pkts[3]); + DRIVER_STATE_PRINT_INT(tx_frames_cnt); + DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]); + DRIVER_STATE_PRINT_INT(tx_queue_count[0]); + DRIVER_STATE_PRINT_INT(tx_queue_count[1]); + DRIVER_STATE_PRINT_INT(tx_queue_count[2]); + DRIVER_STATE_PRINT_INT(tx_queue_count[3]); + DRIVER_STATE_PRINT_INT(tx_packets_count); + DRIVER_STATE_PRINT_INT(tx_results_count); + DRIVER_STATE_PRINT_LHEX(flags); + DRIVER_STATE_PRINT_INT(tx_blocks_freed); + DRIVER_STATE_PRINT_INT(rx_counter); + DRIVER_STATE_PRINT_INT(state); + DRIVER_STATE_PRINT_INT(band); + DRIVER_STATE_PRINT_INT(power_level); + DRIVER_STATE_PRINT_INT(sg_enabled); + DRIVER_STATE_PRINT_INT(enable_11a); + DRIVER_STATE_PRINT_INT(noise); + DRIVER_STATE_PRINT_LHEX(ap_fw_ps_map); + DRIVER_STATE_PRINT_LHEX(ap_ps_map); + DRIVER_STATE_PRINT_HEX(quirks); + DRIVER_STATE_PRINT_HEX(irq); + /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */ + DRIVER_STATE_PRINT_HEX(hw_pg_ver); + DRIVER_STATE_PRINT_HEX(irq_flags); + DRIVER_STATE_PRINT_HEX(chip.id); + DRIVER_STATE_PRINT_STR(chip.fw_ver_str); + DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str); + DRIVER_STATE_PRINT_INT(recovery_count); + +#undef DRIVER_STATE_PRINT_INT +#undef DRIVER_STATE_PRINT_LONG +#undef DRIVER_STATE_PRINT_HEX +#undef DRIVER_STATE_PRINT_LHEX +#undef DRIVER_STATE_PRINT_STR +#undef DRIVER_STATE_PRINT +#undef DRIVER_STATE_BUF_LEN + + mutex_unlock(&wl->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); + kfree(buf); + return ret; +} + +static const struct file_operations driver_state_ops = { + .read = driver_state_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t vifs_state_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + int ret, res = 0; + const int buf_size = 4096; + char *buf; + char tmp_buf[64]; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&wl->mutex); + +#define VIF_STATE_PRINT(x, fmt) \ + (res += scnprintf(buf + res, buf_size - res, \ + #x " = " fmt "\n", wlvif->x)) + +#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld") +#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d") +#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s") +#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx") +#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx") +#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x") + +#define VIF_STATE_PRINT_NSTR(x, len) \ + do { \ + memset(tmp_buf, 0, sizeof(tmp_buf)); \ + memcpy(tmp_buf, wlvif->x, \ + min_t(u8, len, sizeof(tmp_buf) - 1)); \ + res += scnprintf(buf + res, buf_size - res, \ + #x " = %s\n", tmp_buf); \ + } while (0) + + wl12xx_for_each_wlvif(wl, wlvif) { + VIF_STATE_PRINT_INT(role_id); + VIF_STATE_PRINT_INT(bss_type); + VIF_STATE_PRINT_LHEX(flags); + VIF_STATE_PRINT_INT(p2p); + VIF_STATE_PRINT_INT(dev_role_id); + VIF_STATE_PRINT_INT(dev_hlid); + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + VIF_STATE_PRINT_INT(sta.hlid); + VIF_STATE_PRINT_INT(sta.basic_rate_idx); + VIF_STATE_PRINT_INT(sta.ap_rate_idx); + VIF_STATE_PRINT_INT(sta.p2p_rate_idx); + VIF_STATE_PRINT_INT(sta.qos); + } else { + VIF_STATE_PRINT_INT(ap.global_hlid); + VIF_STATE_PRINT_INT(ap.bcast_hlid); + VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]); + VIF_STATE_PRINT_INT(ap.mgmt_rate_idx); + VIF_STATE_PRINT_INT(ap.bcast_rate_idx); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]); + VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]); + } + VIF_STATE_PRINT_INT(last_tx_hlid); + VIF_STATE_PRINT_INT(tx_queue_count[0]); + VIF_STATE_PRINT_INT(tx_queue_count[1]); + VIF_STATE_PRINT_INT(tx_queue_count[2]); + VIF_STATE_PRINT_INT(tx_queue_count[3]); + VIF_STATE_PRINT_LHEX(links_map[0]); + VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len); + VIF_STATE_PRINT_INT(band); + VIF_STATE_PRINT_INT(channel); + VIF_STATE_PRINT_HEX(bitrate_masks[0]); + VIF_STATE_PRINT_HEX(bitrate_masks[1]); + VIF_STATE_PRINT_HEX(basic_rate_set); + VIF_STATE_PRINT_HEX(basic_rate); + VIF_STATE_PRINT_HEX(rate_set); + VIF_STATE_PRINT_INT(beacon_int); + VIF_STATE_PRINT_INT(default_key); + VIF_STATE_PRINT_INT(aid); + VIF_STATE_PRINT_INT(psm_entry_retry); + VIF_STATE_PRINT_INT(power_level); + VIF_STATE_PRINT_INT(rssi_thold); + VIF_STATE_PRINT_INT(last_rssi_event); + VIF_STATE_PRINT_INT(ba_support); + VIF_STATE_PRINT_INT(ba_allowed); + VIF_STATE_PRINT_LLHEX(total_freed_pkts); + } + +#undef VIF_STATE_PRINT_INT +#undef VIF_STATE_PRINT_LONG +#undef VIF_STATE_PRINT_HEX +#undef VIF_STATE_PRINT_LHEX +#undef VIF_STATE_PRINT_LLHEX +#undef VIF_STATE_PRINT_STR +#undef VIF_STATE_PRINT_NSTR +#undef VIF_STATE_PRINT + + mutex_unlock(&wl->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, res); + kfree(buf); + return ret; +} + +static const struct file_operations vifs_state_ops = { + .read = vifs_state_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dtim_interval_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + u8 value; + + if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_DTIM || + wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM) + value = wl->conf.conn.listen_interval; + else + value = 0; + + return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value); +} + +static ssize_t dtim_interval_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value for dtim_interval"); + return -EINVAL; + } + + if (value < 1 || value > 10) { + wl1271_warning("dtim value is not in valid range"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.conn.listen_interval = value; + /* for some reason there are different event types for 1 and >1 */ + if (value == 1) + wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_DTIM; + else + wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM; + + /* + * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only + * take effect on the next time we enter psm. + */ + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations dtim_interval_ops = { + .read = dtim_interval_read, + .write = dtim_interval_write, + .open = simple_open, + .llseek = default_llseek, +}; + + + +static ssize_t suspend_dtim_interval_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + u8 value; + + if (wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_DTIM || + wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM) + value = wl->conf.conn.suspend_listen_interval; + else + value = 0; + + return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value); +} + +static ssize_t suspend_dtim_interval_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value for suspend_dtim_interval"); + return -EINVAL; + } + + if (value < 1 || value > 10) { + wl1271_warning("suspend_dtim value is not in valid range"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.conn.suspend_listen_interval = value; + /* for some reason there are different event types for 1 and >1 */ + if (value == 1) + wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_DTIM; + else + wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM; + + mutex_unlock(&wl->mutex); + return count; +} + + +static const struct file_operations suspend_dtim_interval_ops = { + .read = suspend_dtim_interval_read, + .write = suspend_dtim_interval_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t beacon_interval_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + u8 value; + + if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_BEACON || + wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_BEACONS) + value = wl->conf.conn.listen_interval; + else + value = 0; + + return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value); +} + +static ssize_t beacon_interval_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value for beacon_interval"); + return -EINVAL; + } + + if (value < 1 || value > 255) { + wl1271_warning("beacon interval value is not in valid range"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.conn.listen_interval = value; + /* for some reason there are different event types for 1 and >1 */ + if (value == 1) + wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_BEACON; + else + wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_BEACONS; + + /* + * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only + * take effect on the next time we enter psm. + */ + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations beacon_interval_ops = { + .read = beacon_interval_read, + .write = beacon_interval_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t rx_streaming_interval_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in rx_streaming_interval!"); + return -EINVAL; + } + + /* valid values: 0, 10-100 */ + if (value && (value < 10 || value > 100)) { + wl1271_warning("value is not in range!"); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.rx_streaming.interval = value; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_recalc_rx_streaming(wl, wlvif); + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static ssize_t rx_streaming_interval_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + return wl1271_format_buffer(userbuf, count, ppos, + "%d\n", wl->conf.rx_streaming.interval); +} + +static const struct file_operations rx_streaming_interval_ops = { + .read = rx_streaming_interval_read, + .write = rx_streaming_interval_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t rx_streaming_always_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal value in rx_streaming_write!"); + return -EINVAL; + } + + /* valid values: 0, 10-100 */ + if (!(value == 0 || value == 1)) { + wl1271_warning("value is not in valid!"); + return -EINVAL; + } + + mutex_lock(&wl->mutex); + + wl->conf.rx_streaming.always = value; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_recalc_rx_streaming(wl, wlvif); + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static ssize_t rx_streaming_always_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + return wl1271_format_buffer(userbuf, count, ppos, + "%d\n", wl->conf.rx_streaming.always); +} + +static const struct file_operations rx_streaming_always_ops = { + .read = rx_streaming_always_read, + .write = rx_streaming_always_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t beacon_filtering_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &value); + if (ret < 0) { + wl1271_warning("illegal value for beacon_filtering!"); + return -EINVAL; + } + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations beacon_filtering_ops = { + .write = beacon_filtering_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t fw_stats_raw_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + wl1271_debugfs_update_stats(wl); + + return simple_read_from_buffer(userbuf, count, ppos, + wl->stats.fw_stats, + wl->stats.fw_stats_len); +} + +static const struct file_operations fw_stats_raw_ops = { + .read = fw_stats_raw_read, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t sleep_auth_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(user_buf, count, + ppos, "%d\n", + wl->sleep_auth); +} + +static ssize_t sleep_auth_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &value); + if (ret < 0) { + wl1271_warning("illegal value in sleep_auth"); + return -EINVAL; + } + + if (value > WL1271_PSM_MAX) { + wl1271_warning("sleep_auth must be between 0 and %d", + WL1271_PSM_MAX); + return -ERANGE; + } + + mutex_lock(&wl->mutex); + + wl->conf.conn.sta_sleep_auth = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + /* this will show up on "read" in case we are off */ + wl->sleep_auth = value; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_acx_sleep_auth(wl, value); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations sleep_auth_ops = { + .read = sleep_auth_read, + .write = sleep_auth_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dev_mem_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wlcore_partition_set part, old_part; + size_t bytes = count; + int ret; + char *buf; + + /* only requests of dword-aligned size and offset are supported */ + if (bytes % 4) + return -EINVAL; + + if (*ppos % 4) + return -EINVAL; + + /* function should return in reasonable time */ + bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE); + + if (bytes == 0) + return -EINVAL; + + memset(&part, 0, sizeof(part)); + part.mem.start = *ppos; + part.mem.size = bytes; + + buf = kmalloc(bytes, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WLCORE_STATE_OFF)) { + ret = -EFAULT; + goto skip_read; + } + + /* + * Don't fail if elp_wakeup returns an error, so the device's memory + * could be read even if the FW crashed + */ + wl1271_ps_elp_wakeup(wl); + + /* store current partition and switch partition */ + memcpy(&old_part, &wl->curr_part, sizeof(old_part)); + ret = wlcore_set_partition(wl, &part); + if (ret < 0) + goto part_err; + + ret = wlcore_raw_read(wl, 0, buf, bytes, false); + if (ret < 0) + goto read_err; + +read_err: + /* recover partition */ + ret = wlcore_set_partition(wl, &old_part); + if (ret < 0) + goto part_err; + +part_err: + wl1271_ps_elp_sleep(wl); + +skip_read: + mutex_unlock(&wl->mutex); + + if (ret == 0) { + ret = copy_to_user(user_buf, buf, bytes); + if (ret < bytes) { + bytes -= ret; + *ppos += bytes; + ret = 0; + } else { + ret = -EFAULT; + } + } + + kfree(buf); + + return ((ret == 0) ? bytes : ret); +} + +static ssize_t dev_mem_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wlcore_partition_set part, old_part; + size_t bytes = count; + int ret; + char *buf; + + /* only requests of dword-aligned size and offset are supported */ + if (bytes % 4) + return -EINVAL; + + if (*ppos % 4) + return -EINVAL; + + /* function should return in reasonable time */ + bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE); + + if (bytes == 0) + return -EINVAL; + + memset(&part, 0, sizeof(part)); + part.mem.start = *ppos; + part.mem.size = bytes; + + buf = kmalloc(bytes, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf, user_buf, bytes); + if (ret) { + ret = -EFAULT; + goto err_out; + } + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WLCORE_STATE_OFF)) { + ret = -EFAULT; + goto skip_write; + } + + /* + * Don't fail if elp_wakeup returns an error, so the device's memory + * could be read even if the FW crashed + */ + wl1271_ps_elp_wakeup(wl); + + /* store current partition and switch partition */ + memcpy(&old_part, &wl->curr_part, sizeof(old_part)); + ret = wlcore_set_partition(wl, &part); + if (ret < 0) + goto part_err; + + ret = wlcore_raw_write(wl, 0, buf, bytes, false); + if (ret < 0) + goto write_err; + +write_err: + /* recover partition */ + ret = wlcore_set_partition(wl, &old_part); + if (ret < 0) + goto part_err; + +part_err: + wl1271_ps_elp_sleep(wl); + +skip_write: + mutex_unlock(&wl->mutex); + + if (ret == 0) + *ppos += bytes; + +err_out: + kfree(buf); + + return ((ret == 0) ? bytes : ret); +} + +static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig) +{ + loff_t ret; + + /* only requests of dword-aligned size and offset are supported */ + if (offset % 4) + return -EINVAL; + + switch (orig) { + case SEEK_SET: + file->f_pos = offset; + ret = file->f_pos; + break; + case SEEK_CUR: + file->f_pos += offset; + ret = file->f_pos; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct file_operations dev_mem_ops = { + .open = simple_open, + .read = dev_mem_read, + .write = dev_mem_write, + .llseek = dev_mem_seek, +}; + +static int wl1271_debugfs_add_files(struct wl1271 *wl, + struct dentry *rootdir) +{ + int ret = 0; + struct dentry *entry, *streaming; + + DEBUGFS_ADD(tx_queue_len, rootdir); + DEBUGFS_ADD(retry_count, rootdir); + DEBUGFS_ADD(excessive_retries, rootdir); + + DEBUGFS_ADD(gpio_power, rootdir); + DEBUGFS_ADD(start_recovery, rootdir); + DEBUGFS_ADD(driver_state, rootdir); + DEBUGFS_ADD(vifs_state, rootdir); + DEBUGFS_ADD(dtim_interval, rootdir); + DEBUGFS_ADD(suspend_dtim_interval, rootdir); + DEBUGFS_ADD(beacon_interval, rootdir); + DEBUGFS_ADD(beacon_filtering, rootdir); + DEBUGFS_ADD(dynamic_ps_timeout, rootdir); + DEBUGFS_ADD(forced_ps, rootdir); + DEBUGFS_ADD(split_scan_timeout, rootdir); + DEBUGFS_ADD(irq_pkt_threshold, rootdir); + DEBUGFS_ADD(irq_blk_threshold, rootdir); + DEBUGFS_ADD(irq_timeout, rootdir); + DEBUGFS_ADD(fw_stats_raw, rootdir); + DEBUGFS_ADD(sleep_auth, rootdir); + + streaming = debugfs_create_dir("rx_streaming", rootdir); + if (!streaming || IS_ERR(streaming)) + goto err; + + DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); + DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); + + DEBUGFS_ADD_PREFIX(dev, mem, rootdir); + + return 0; + +err: + if (IS_ERR(entry)) + ret = PTR_ERR(entry); + else + ret = -ENOMEM; + + return ret; +} + +void wl1271_debugfs_reset(struct wl1271 *wl) +{ + if (!wl->stats.fw_stats) + return; + + memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len); + wl->stats.retry_count = 0; + wl->stats.excessive_retries = 0; +} + +int wl1271_debugfs_init(struct wl1271 *wl) +{ + int ret; + struct dentry *rootdir; + + rootdir = debugfs_create_dir(KBUILD_MODNAME, + wl->hw->wiphy->debugfsdir); + + if (IS_ERR(rootdir)) { + ret = PTR_ERR(rootdir); + goto out; + } + + wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL); + if (!wl->stats.fw_stats) { + ret = -ENOMEM; + goto out_remove; + } + + wl->stats.fw_stats_update = jiffies; + + ret = wl1271_debugfs_add_files(wl, rootdir); + if (ret < 0) + goto out_exit; + + ret = wlcore_debugfs_init(wl, rootdir); + if (ret < 0) + goto out_exit; + + goto out; + +out_exit: + wl1271_debugfs_exit(wl); + +out_remove: + debugfs_remove_recursive(rootdir); + +out: + return ret; +} + +void wl1271_debugfs_exit(struct wl1271 *wl) +{ + kfree(wl->stats.fw_stats); + wl->stats.fw_stats = NULL; +} diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h new file mode 100644 index 000000000..bf14676e6 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -0,0 +1,120 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __DEBUGFS_H__ +#define __DEBUGFS_H__ + +#include "wlcore.h" + +__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count, + loff_t *ppos, char *fmt, ...); + +int wl1271_debugfs_init(struct wl1271 *wl); +void wl1271_debugfs_exit(struct wl1271 *wl); +void wl1271_debugfs_reset(struct wl1271 *wl); +void wl1271_debugfs_update_stats(struct wl1271 *wl); + +#define DEBUGFS_FORMAT_BUFFER_SIZE 256 + +#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1271 *wl = file->private_data; \ + return wl1271_format_buffer(userbuf, count, ppos, \ + fmt "\n", ##value); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_ADD(name, parent) \ + do { \ + entry = debugfs_create_file(#name, 0400, parent, \ + wl, &name## _ops); \ + if (!entry || IS_ERR(entry)) \ + goto err; \ + } while (0) + + +#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ + do { \ + entry = debugfs_create_file(#name, 0400, parent, \ + wl, &prefix## _## name## _ops); \ + if (!entry || IS_ERR(entry)) \ + goto err; \ + } while (0) + +#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \ +static ssize_t sub## _ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1271 *wl = file->private_data; \ + struct struct_type *stats = wl->stats.fw_stats; \ + \ + wl1271_debugfs_update_stats(wl); \ + \ + return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \ + stats->sub.name); \ +} \ + \ +static const struct file_operations sub## _ ##name## _ops = { \ + .read = sub## _ ##name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_FWSTATS_FILE_ARRAY(sub, name, len, struct_type) \ +static ssize_t sub## _ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1271 *wl = file->private_data; \ + struct struct_type *stats = wl->stats.fw_stats; \ + char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \ + int res, i; \ + \ + wl1271_debugfs_update_stats(wl); \ + \ + for (i = 0; i < len; i++) \ + res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \ + buf, i, stats->sub.name[i]); \ + \ + return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \ +} \ + \ +static const struct file_operations sub## _ ##name## _ops = { \ + .read = sub## _ ##name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_FWSTATS_ADD(sub, name) \ + DEBUGFS_ADD(sub## _ ##name, stats) + + +#endif /* WL1271_DEBUGFS_H */ diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c new file mode 100644 index 000000000..c42e78955 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -0,0 +1,311 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wlcore.h" +#include "debug.h" +#include "io.h" +#include "event.h" +#include "ps.h" +#include "scan.h" +#include "wl12xx_80211.h" + +void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr) +{ + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + enum nl80211_cqm_rssi_threshold_event event; + s8 metric = metric_arr[0]; + + wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric); + + /* TODO: check actual multi-role support */ + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (metric <= wlvif->rssi_thold) + event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + else + event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + + vif = wl12xx_wlvif_to_vif(wlvif); + if (event != wlvif->last_rssi_event) + ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL); + wlvif->last_rssi_event = event; + } +} +EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger); + +static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + u8 hlid = wlvif->sta.hlid; + if (!wl->links[hlid].ba_bitmap) + return; + ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap, + vif->bss_conf.bssid); + } else { + u8 hlid; + struct wl1271_link *lnk; + for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, + wl->num_links) { + lnk = &wl->links[hlid]; + if (!lnk->ba_bitmap) + continue; + + ieee80211_stop_rx_ba_session(vif, + lnk->ba_bitmap, + lnk->addr); + } + } +} + +void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable) +{ + struct wl12xx_vif *wlvif; + + if (enable) { + set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); + } else { + clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_recalc_rx_streaming(wl, wlvif); + } + } +} +EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense); + +void wlcore_event_sched_scan_completed(struct wl1271 *wl, + u8 status) +{ + wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)", + status); + + if (wl->sched_vif) { + ieee80211_sched_scan_stopped(wl->hw); + wl->sched_vif = NULL; + } +} +EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed); + +void wlcore_event_ba_rx_constraint(struct wl1271 *wl, + unsigned long roles_bitmap, + unsigned long allowed_bitmap) +{ + struct wl12xx_vif *wlvif; + + wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx", + __func__, roles_bitmap, allowed_bitmap); + + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; + + wlvif->ba_allowed = !!test_bit(wlvif->role_id, + &allowed_bitmap); + if (!wlvif->ba_allowed) + wl1271_stop_ba_event(wl, wlvif); + } +} +EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint); + +void wlcore_event_channel_switch(struct wl1271 *wl, + unsigned long roles_bitmap, + bool success) +{ + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + + wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d", + __func__, roles_bitmap, success); + + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; + + if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, + &wlvif->flags)) + continue; + + vif = wl12xx_wlvif_to_vif(wlvif); + + if (wlvif->bss_type == BSS_TYPE_STA_BSS) { + ieee80211_chswitch_done(vif, success); + cancel_delayed_work(&wlvif->channel_switch_work); + } else { + set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags); + ieee80211_csa_finish(vif); + } + } +} +EXPORT_SYMBOL_GPL(wlcore_event_channel_switch); + +void wlcore_event_dummy_packet(struct wl1271 *wl) +{ + if (wl->plt) { + wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring."); + return; + } + + wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); + wl1271_tx_dummy_packet(wl); +} +EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet); + +static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap) +{ + u32 num_packets = wl->conf.tx.max_tx_retries; + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + const u8 *addr; + int h; + + for_each_set_bit(h, &sta_bitmap, wl->num_links) { + bool found = false; + /* find the ap vif connected to this sta */ + wl12xx_for_each_wlvif_ap(wl, wlvif) { + if (!test_bit(h, wlvif->ap.sta_hlid_map)) + continue; + found = true; + break; + } + if (!found) + continue; + + vif = wl12xx_wlvif_to_vif(wlvif); + addr = wl->links[h].addr; + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, addr); + if (sta) { + wl1271_debug(DEBUG_EVENT, "remove sta %d", h); + ieee80211_report_low_ack(sta, num_packets); + } + rcu_read_unlock(); + } +} + +void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap) +{ + wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID"); + wlcore_disconnect_sta(wl, sta_bitmap); +} +EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure); + +void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap) +{ + wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); + wlcore_disconnect_sta(wl, sta_bitmap); +} +EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta); + +void wlcore_event_roc_complete(struct wl1271 *wl) +{ + wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID"); + if (wl->roc_vif) + ieee80211_ready_on_channel(wl->hw); +} +EXPORT_SYMBOL_GPL(wlcore_event_roc_complete); + +void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap) +{ + /* + * We are HW_MONITOR device. On beacon loss - queue + * connection loss work. Cancel it on REGAINED event. + */ + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + int delay = wl->conf.conn.synch_fail_thold * + wl->conf.conn.bss_lose_timeout; + + wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap); + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || + !test_bit(wlvif->role_id , &roles_bitmap)) + continue; + + vif = wl12xx_wlvif_to_vif(wlvif); + + /* don't attempt roaming in case of p2p */ + if (wlvif->p2p) { + ieee80211_connection_loss(vif); + continue; + } + + /* + * if the work is already queued, it should take place. + * We don't want to delay the connection loss + * indication any more. + */ + ieee80211_queue_delayed_work(wl->hw, + &wlvif->connection_loss_work, + msecs_to_jiffies(delay)); + + ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL); + } +} +EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss); + +int wl1271_event_unmask(struct wl1271 *wl) +{ + int ret; + + wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask); + ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask)); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) +{ + int ret; + + wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); + + if (mbox_num > 1) + return -EINVAL; + + /* first we read the mbox descriptor */ + ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox, + wl->mbox_size, false); + if (ret < 0) + return ret; + + /* process the descriptor */ + ret = wl->ops->process_mailbox_events(wl); + if (ret < 0) + return ret; + + /* + * TODO: we just need this because one bit is in a different + * place. Is there any better way? + */ + ret = wl->ops->ack_event(wl); + + return ret; +} diff --git a/drivers/net/wireless/ti/wlcore/event.h b/drivers/net/wireless/ti/wlcore/event.h new file mode 100644 index 000000000..acc7a59d3 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/event.h @@ -0,0 +1,87 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __EVENT_H__ +#define __EVENT_H__ + +/* + * Mbox events + * + * The event mechanism is based on a pair of event buffers (buffers A and + * B) at fixed locations in the target's memory. The host processes one + * buffer while the other buffer continues to collect events. If the host + * is not processing events, an interrupt is issued to signal that a buffer + * is ready. Once the host is done with processing events from one buffer, + * it signals the target (with an ACK interrupt) that the event buffer is + * free. + */ + +enum { + RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0), + RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1), + RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2), + RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3), + RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4), + RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5), + RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6), + RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7), + + EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, +}; + +/* events the driver might want to wait for */ +enum wlcore_wait_event { + WLCORE_EVENT_ROLE_STOP_COMPLETE, + WLCORE_EVENT_PEER_REMOVE_COMPLETE, + WLCORE_EVENT_DFS_CONFIG_COMPLETE +}; + +enum { + EVENT_ENTER_POWER_SAVE_FAIL = 0, + EVENT_ENTER_POWER_SAVE_SUCCESS, +}; + +#define NUM_OF_RSSI_SNR_TRIGGERS 8 + +struct wl1271; + +int wl1271_event_unmask(struct wl1271 *wl); +int wl1271_event_handle(struct wl1271 *wl, u8 mbox); + +void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable); +void wlcore_event_sched_scan_completed(struct wl1271 *wl, + u8 status); +void wlcore_event_ba_rx_constraint(struct wl1271 *wl, + unsigned long roles_bitmap, + unsigned long allowed_bitmap); +void wlcore_event_channel_switch(struct wl1271 *wl, + unsigned long roles_bitmap, + bool success); +void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap); +void wlcore_event_dummy_packet(struct wl1271 *wl); +void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap); +void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap); +void wlcore_event_roc_complete(struct wl1271 *wl); +void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr); +#endif diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h new file mode 100644 index 000000000..eec56935b --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/hw_ops.h @@ -0,0 +1,332 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WLCORE_HW_OPS_H__ +#define __WLCORE_HW_OPS_H__ + +#include "wlcore.h" +#include "rx.h" + +static inline u32 +wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks) +{ + if (!wl->ops->calc_tx_blocks) + BUG_ON(1); + + return wl->ops->calc_tx_blocks(wl, len, spare_blks); +} + +static inline void +wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, + u32 blks, u32 spare_blks) +{ + if (!wl->ops->set_tx_desc_blocks) + BUG_ON(1); + + return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks); +} + +static inline void +wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb) +{ + if (!wl->ops->set_tx_desc_data_len) + BUG_ON(1); + + wl->ops->set_tx_desc_data_len(wl, desc, skb); +} + +static inline enum wl_rx_buf_align +wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc) +{ + + if (!wl->ops->get_rx_buf_align) + BUG_ON(1); + + return wl->ops->get_rx_buf_align(wl, rx_desc); +} + +static inline int +wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len) +{ + if (wl->ops->prepare_read) + return wl->ops->prepare_read(wl, rx_desc, len); + + return 0; +} + +static inline u32 +wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len) +{ + if (!wl->ops->get_rx_packet_len) + BUG_ON(1); + + return wl->ops->get_rx_packet_len(wl, rx_data, data_len); +} + +static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl) +{ + if (wl->ops->tx_delayed_compl) + return wl->ops->tx_delayed_compl(wl); + + return 0; +} + +static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl) +{ + if (wl->ops->tx_immediate_compl) + wl->ops->tx_immediate_compl(wl); +} + +static inline int +wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (wl->ops->init_vif) + return wl->ops->init_vif(wl, wlvif); + + return 0; +} + +static inline void +wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status, + struct wl_fw_status *fw_status) +{ + BUG_ON(!wl->ops->convert_fw_status); + + wl->ops->convert_fw_status(wl, raw_fw_status, fw_status); +} + +static inline u32 +wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (!wl->ops->sta_get_ap_rate_mask) + BUG_ON(1); + + return wl->ops->sta_get_ap_rate_mask(wl, wlvif); +} + +static inline int wlcore_identify_fw(struct wl1271 *wl) +{ + if (wl->ops->identify_fw) + return wl->ops->identify_fw(wl); + + return 0; +} + +static inline void +wlcore_hw_set_tx_desc_csum(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb) +{ + if (!wl->ops->set_tx_desc_csum) + BUG_ON(1); + + wl->ops->set_tx_desc_csum(wl, desc, skb); +} + +static inline void +wlcore_hw_set_rx_csum(struct wl1271 *wl, + struct wl1271_rx_descriptor *desc, + struct sk_buff *skb) +{ + if (wl->ops->set_rx_csum) + wl->ops->set_rx_csum(wl, desc, skb); +} + +static inline u32 +wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + if (wl->ops->ap_get_mimo_wide_rate_mask) + return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif); + + return 0; +} + +static inline int +wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir) +{ + if (wl->ops->debugfs_init) + return wl->ops->debugfs_init(wl, rootdir); + + return 0; +} + +static inline int +wlcore_handle_static_data(struct wl1271 *wl, void *static_data) +{ + if (wl->ops->handle_static_data) + return wl->ops->handle_static_data(wl, static_data); + + return 0; +} + +static inline int +wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem) +{ + if (!wl->ops->get_spare_blocks) + BUG_ON(1); + + return wl->ops->get_spare_blocks(wl, is_gem); +} + +static inline int +wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf) +{ + if (!wl->ops->set_key) + BUG_ON(1); + + return wl->ops->set_key(wl, cmd, vif, sta, key_conf); +} + +static inline u32 +wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len) +{ + if (wl->ops->pre_pkt_send) + return wl->ops->pre_pkt_send(wl, buf_offset, last_len); + + return buf_offset; +} + +static inline void +wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (wl->ops->sta_rc_update) + wl->ops->sta_rc_update(wl, wlvif); +} + +static inline int +wlcore_hw_interrupt_notify(struct wl1271 *wl, bool action) +{ + if (wl->ops->interrupt_notify) + return wl->ops->interrupt_notify(wl, action); + return 0; +} + +static inline int +wlcore_hw_rx_ba_filter(struct wl1271 *wl, bool action) +{ + if (wl->ops->rx_ba_filter) + return wl->ops->rx_ba_filter(wl, action); + return 0; +} + +static inline int +wlcore_hw_ap_sleep(struct wl1271 *wl) +{ + if (wl->ops->ap_sleep) + return wl->ops->ap_sleep(wl); + + return 0; +} + +static inline int +wlcore_hw_set_peer_cap(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid) +{ + if (wl->ops->set_peer_cap) + return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation, + rate_set, hlid); + + return 0; +} + +static inline u32 +wlcore_hw_convert_hwaddr(struct wl1271 *wl, u32 hwaddr) +{ + if (!wl->ops->convert_hwaddr) + BUG_ON(1); + + return wl->ops->convert_hwaddr(wl, hwaddr); +} + +static inline bool +wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + if (!wl->ops->lnk_high_prio) + BUG_ON(1); + + return wl->ops->lnk_high_prio(wl, hlid, lnk); +} + +static inline bool +wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk) +{ + if (!wl->ops->lnk_low_prio) + BUG_ON(1); + + return wl->ops->lnk_low_prio(wl, hlid, lnk); +} + +static inline int +wlcore_smart_config_start(struct wl1271 *wl, u32 group_bitmap) +{ + if (!wl->ops->smart_config_start) + return -EINVAL; + + return wl->ops->smart_config_start(wl, group_bitmap); +} + +static inline int +wlcore_smart_config_stop(struct wl1271 *wl) +{ + if (!wl->ops->smart_config_stop) + return -EINVAL; + + return wl->ops->smart_config_stop(wl); +} + +static inline int +wlcore_smart_config_set_group_key(struct wl1271 *wl, u16 group_id, + u8 key_len, u8 *key) +{ + if (!wl->ops->smart_config_set_group_key) + return -EINVAL; + + return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key); +} + +static inline int +wlcore_hw_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start) +{ + if (!wl->ops->set_cac) + return -EINVAL; + + return wl->ops->set_cac(wl, wlvif, start); +} + +static inline int +wlcore_hw_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + if (!wl->ops->dfs_master_restart) + return -EINVAL; + + return wl->ops->dfs_master_restart(wl, wlvif); +} +#endif diff --git a/drivers/net/wireless/ti/wlcore/ini.h b/drivers/net/wireless/ti/wlcore/ini.h new file mode 100644 index 000000000..d24fe3bbc --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/ini.h @@ -0,0 +1,232 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __INI_H__ +#define __INI_H__ + +#define GENERAL_SETTINGS_DRPW_LPD 0xc0 +#define SCRATCH_ENABLE_LPD BIT(25) + +#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16 + +struct wl1271_ini_general_params { + u8 ref_clock; + u8 settling_time; + u8 clk_valid_on_wakeup; + u8 dc2dc_mode; + u8 dual_mode_select; + u8 tx_bip_fem_auto_detect; + u8 tx_bip_fem_manufacturer; + u8 general_settings; + u8 sr_state; + u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM]; +} __packed; + +#define WL128X_INI_MAX_SETTINGS_PARAM 4 + +struct wl128x_ini_general_params { + u8 ref_clock; + u8 settling_time; + u8 clk_valid_on_wakeup; + u8 tcxo_ref_clock; + u8 tcxo_settling_time; + u8 tcxo_valid_on_wakeup; + u8 tcxo_ldo_voltage; + u8 xtal_itrim_val; + u8 platform_conf; + u8 dual_mode_select; + u8 tx_bip_fem_auto_detect; + u8 tx_bip_fem_manufacturer; + u8 general_settings[WL128X_INI_MAX_SETTINGS_PARAM]; + u8 sr_state; + u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM]; + u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM]; +} __packed; + +#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15 + +struct wl1271_ini_band_params_2 { + u8 rx_trace_insertion_loss; + u8 tx_trace_loss; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +#define WL1271_INI_CHANNEL_COUNT_2 14 + +struct wl128x_ini_band_params_2 { + u8 rx_trace_insertion_loss; + u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_2]; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +#define WL1271_INI_RATE_GROUP_COUNT 6 + +struct wl1271_ini_fem_params_2 { + __le16 tx_bip_ref_pd_voltage; + u8 tx_bip_ref_power; + u8 tx_bip_ref_offset; + u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT]; + u8 rx_fem_insertion_loss; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + +#define WL128X_INI_RATE_GROUP_COUNT 7 +/* low and high temperatures */ +#define WL128X_INI_PD_VS_TEMPERATURE_RANGES 2 + +struct wl128x_ini_fem_params_2 { + __le16 tx_bip_ref_pd_voltage; + u8 tx_bip_ref_power; + u8 tx_bip_ref_offset; + u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT + 1]; + u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_2]; + u8 tx_pd_vs_temperature[WL128X_INI_PD_VS_TEMPERATURE_RANGES]; + u8 rx_fem_insertion_loss; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + +#define WL1271_INI_CHANNEL_COUNT_5 35 +#define WL1271_INI_SUB_BAND_COUNT_5 7 + +struct wl1271_ini_band_params_5 { + u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +struct wl128x_ini_band_params_5 { + u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_5]; + u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; +} __packed; + +struct wl1271_ini_fem_params_5 { + __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5]; + u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT]; + u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + +struct wl128x_ini_fem_params_5 { + __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5]; + u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5]; + u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT]; + u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_5]; + u8 tx_pd_vs_temperature[WL1271_INI_SUB_BAND_COUNT_5 * + WL128X_INI_PD_VS_TEMPERATURE_RANGES]; + u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; + u8 degraded_low_to_normal_thr; + u8 normal_to_degraded_high_thr; +} __packed; + +/* NVS data structure */ +#define WL1271_INI_NVS_SECTION_SIZE 468 + +/* We have four FEM module types: 0-RFMD, 1-TQS, 2-SKW, 3-TQS_HP */ +#define WL1271_INI_FEM_MODULE_COUNT 4 + +/* + * In NVS we only store two FEM module entries - + * FEM modules 0,2,3 are stored in entry 0 + * FEM module 1 is stored in entry 1 + */ +#define WL12XX_NVS_FEM_MODULE_COUNT 2 + +#define WL12XX_FEM_TO_NVS_ENTRY(ini_fem_module) \ + ((ini_fem_module) == 1 ? 1 : 0) + +#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800 + +struct wl1271_nvs_file { + /* NVS section - must be first! */ + u8 nvs[WL1271_INI_NVS_SECTION_SIZE]; + + /* INI section */ + struct wl1271_ini_general_params general_params; + u8 padding1; + struct wl1271_ini_band_params_2 stat_radio_params_2; + u8 padding2; + struct { + struct wl1271_ini_fem_params_2 params; + u8 padding; + } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT]; + struct wl1271_ini_band_params_5 stat_radio_params_5; + u8 padding3; + struct { + struct wl1271_ini_fem_params_5 params; + u8 padding; + } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT]; +} __packed; + +struct wl128x_nvs_file { + /* NVS section - must be first! */ + u8 nvs[WL1271_INI_NVS_SECTION_SIZE]; + + /* INI section */ + struct wl128x_ini_general_params general_params; + u8 fem_vendor_and_options; + struct wl128x_ini_band_params_2 stat_radio_params_2; + u8 padding2; + struct { + struct wl128x_ini_fem_params_2 params; + u8 padding; + } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT]; + struct wl128x_ini_band_params_5 stat_radio_params_5; + u8 padding3; + struct { + struct wl128x_ini_fem_params_5 params; + u8 padding; + } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT]; +} __packed; +#endif diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c new file mode 100644 index 000000000..5ca1fb161 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -0,0 +1,760 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include + +#include "debug.h" +#include "init.h" +#include "wl12xx_80211.h" +#include "acx.h" +#include "cmd.h" +#include "tx.h" +#include "io.h" +#include "hw_ops.h" + +int wl1271_init_templates_config(struct wl1271 *wl) +{ + int ret, i; + size_t max_size; + + /* send empty templates for fw memory reservation */ + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + wl->scan_templ_id_2_4, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + wl->scan_templ_id_5, + NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, + WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) { + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + wl->sched_scan_templ_id_2_4, + NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + wl->sched_scan_templ_id_5, + NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + } + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_NULL_DATA, NULL, + sizeof(struct wl12xx_null_data_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_PS_POLL, NULL, + sizeof(struct wl12xx_ps_poll_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_QOS_NULL_DATA, NULL, + sizeof + (struct ieee80211_qos_hdr), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_PROBE_RESPONSE, NULL, + WL1271_CMD_TEMPL_DFLT_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_BEACON, NULL, + WL1271_CMD_TEMPL_DFLT_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + max_size = sizeof(struct wl12xx_arp_rsp_template) + + WL1271_EXTRA_SPACE_MAX; + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_ARP_RSP, NULL, + max_size, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + /* + * Put very large empty placeholders for all templates. These + * reserve memory for later. + */ + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_AP_PROBE_RESPONSE, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_AP_BEACON, NULL, + WL1271_CMD_TEMPL_MAX_SIZE, + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_DEAUTH_AP, NULL, + sizeof + (struct wl12xx_disconn_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) { + ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID, + CMD_TEMPL_KLV, NULL, + sizeof(struct ieee80211_qos_hdr), + i, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + } + + return 0; +} + +static int wl1271_ap_init_deauth_template(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct wl12xx_disconn_template *tmpl; + int ret; + u32 rate; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) { + ret = -ENOMEM; + goto out; + } + + tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_DEAUTH); + + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_DEAUTH_AP, + tmpl, sizeof(*tmpl), 0, rate); + +out: + kfree(tmpl); + return ret; +} + +static int wl1271_ap_init_null_template(struct wl1271 *wl, + struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_hdr_3addr *nullfunc; + int ret; + u32 rate; + + nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL); + if (!nullfunc) { + ret = -ENOMEM; + goto out; + } + + nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS); + + /* nullfunc->addr1 is filled by FW */ + + memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); + memcpy(nullfunc->addr3, vif->addr, ETH_ALEN); + + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_NULL_DATA, nullfunc, + sizeof(*nullfunc), 0, rate); + +out: + kfree(nullfunc); + return ret; +} + +static int wl1271_ap_init_qos_null_template(struct wl1271 *wl, + struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_qos_hdr *qosnull; + int ret; + u32 rate; + + qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL); + if (!qosnull) { + ret = -ENOMEM; + goto out; + } + + qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_FROMDS); + + /* qosnull->addr1 is filled by FW */ + + memcpy(qosnull->addr2, vif->addr, ETH_ALEN); + memcpy(qosnull->addr3, vif->addr, ETH_ALEN); + + rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_QOS_NULL_DATA, qosnull, + sizeof(*qosnull), 0, rate); + +out: + kfree(qosnull); + return ret; +} + +static int wl12xx_init_rx_config(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_rx_msdu_life_time(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl12xx_init_phy_vif_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME); + if (ret < 0) + return ret; + + ret = wl1271_acx_service_period_timeout(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_init_sta_beacon_filter(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_acx_beacon_filter_table(wl, wlvif); + if (ret < 0) + return ret; + + /* disable beacon filtering until we get the first beacon */ + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_pta(struct wl1271 *wl) +{ + int ret; + + ret = wl12xx_acx_sg_cfg(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sg_enable(wl, wl->sg_enabled); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_energy_detection(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_cca_threshold(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_init_beacon_broadcast(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_acx_bcn_dtim_options(wl, wlvif); + if (ret < 0) + return ret; + + return 0; +} + +static int wl12xx_init_fwlog(struct wl1271 *wl) +{ + int ret; + + if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) + return 0; + + ret = wl12xx_cmd_config_fwlog(wl); + if (ret < 0) + return ret; + + return 0; +} + +/* generic sta initialization (non vif-specific) */ +static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + /* PS config */ + ret = wl12xx_acx_config_ps(wl, wlvif); + if (ret < 0) + return ret; + + /* FM WLAN coexistence */ + ret = wl1271_acx_fm_coex(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl, + struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + /* disable the keep-alive feature */ + ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + if (ret < 0) + return ret; + + return 0; +} + +/* generic ap initialization (non vif-specific) */ +static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_init_ap_rates(wl, wlvif); + if (ret < 0) + return ret; + + /* configure AP sleep, if enabled */ + ret = wlcore_hw_ap_sleep(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + ret = wl1271_ap_init_deauth_template(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl1271_ap_init_null_template(wl, vif); + if (ret < 0) + return ret; + + ret = wl1271_ap_init_qos_null_template(wl, vif); + if (ret < 0) + return ret; + + /* + * when operating as AP we want to receive external beacons for + * configuring ERP protection. + */ + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl, + struct ieee80211_vif *vif) +{ + return wl1271_ap_init_templates(wl, vif); +} + +int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int i, ret; + struct conf_tx_rate_class rc; + u32 supported_rates; + + wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", + wlvif->basic_rate_set); + + if (wlvif->basic_rate_set == 0) + return -EINVAL; + + rc.enabled_rates = wlvif->basic_rate_set; + rc.long_retry_limit = 10; + rc.short_retry_limit = 10; + rc.aflags = 0; + ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx); + if (ret < 0) + return ret; + + /* use the min basic rate for AP broadcast/multicast */ + rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + rc.short_retry_limit = 10; + rc.long_retry_limit = 10; + rc.aflags = 0; + ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx); + if (ret < 0) + return ret; + + /* + * If the basic rates contain OFDM rates, use OFDM only + * rates for unicast TX as well. Else use all supported rates. + */ + if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES)) + supported_rates = CONF_TX_OFDM_RATES; + else + supported_rates = CONF_TX_ENABLED_RATES; + + /* unconditionally enable HT rates */ + supported_rates |= CONF_TX_MCS_RATES; + + /* get extra MIMO or wide-chan rates where the HW supports it */ + supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif); + + /* configure unicast TX rate classes */ + for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { + rc.enabled_rates = supported_rates; + rc.short_retry_limit = 10; + rc.long_retry_limit = 10; + rc.aflags = 0; + ret = wl1271_acx_ap_rate_policy(wl, &rc, + wlvif->ap.ucast_rate_idx[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + /* Reset the BA RX indicators */ + wlvif->ba_allowed = true; + wl->ba_rx_session_count = 0; + + /* BA is supported in STA/AP modes */ + if (wlvif->bss_type != BSS_TYPE_AP_BSS && + wlvif->bss_type != BSS_TYPE_STA_BSS) { + wlvif->ba_support = false; + return 0; + } + + wlvif->ba_support = true; + + /* 802.11n initiator BA session setting */ + return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); +} + +/* vif-specifc initialization */ +static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0); + if (ret < 0) + return ret; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, false); + if (ret < 0) + return ret; + + /* Beacon filtering */ + ret = wl1271_init_sta_beacon_filter(wl, wlvif); + if (ret < 0) + return ret; + + /* Beacons and broadcast settings */ + ret = wl1271_init_beacon_broadcast(wl, wlvif); + if (ret < 0) + return ret; + + /* Configure rssi/snr averaging weights */ + ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif); + if (ret < 0) + return ret; + + return 0; +} + +/* vif-specific intialization */ +static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); + if (ret < 0) + return ret; + + /* initialize Tx power */ + ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct conf_tx_ac_category *conf_ac; + struct conf_tx_tid *conf_tid; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + int ret, i; + + /* consider all existing roles before configuring psm. */ + + if (wl->ap_count == 0 && is_ap) { /* first AP */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + return ret; + + /* unmask ap events */ + wl->event_mask |= wl->ap_event_mask; + ret = wl1271_event_unmask(wl); + if (ret < 0) + return ret; + /* first STA, no APs */ + } else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) { + u8 sta_auth = wl->conf.conn.sta_sleep_auth; + /* Configure for power according to debugfs */ + if (sta_auth != WL1271_PSM_ILLEGAL) + ret = wl1271_acx_sleep_auth(wl, sta_auth); + /* Configure for ELP power saving */ + else + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + + if (ret < 0) + return ret; + } + + /* Mode specific init */ + if (is_ap) { + ret = wl1271_ap_hw_init(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_init_ap_role(wl, wlvif); + if (ret < 0) + return ret; + } else { + ret = wl1271_sta_hw_init(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_init_sta_role(wl, wlvif); + if (ret < 0) + return ret; + } + + wl12xx_init_phy_vif_config(wl, wlvif); + + /* Default TID/AC configuration */ + BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac, + conf_ac->cw_min, conf_ac->cw_max, + conf_ac->aifsn, conf_ac->tx_op_limit); + if (ret < 0) + return ret; + + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, wlvif, + conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + return ret; + } + + /* Configure HW encryption */ + ret = wl1271_acx_feature_cfg(wl, wlvif); + if (ret < 0) + return ret; + + /* Mode specific init - post mem init */ + if (is_ap) + ret = wl1271_ap_hw_init_post_mem(wl, vif); + else + ret = wl1271_sta_hw_init_post_mem(wl, vif); + + if (ret < 0) + return ret; + + /* Configure initiator BA sessions policies */ + ret = wl1271_set_ba_policies(wl, wlvif); + if (ret < 0) + return ret; + + ret = wlcore_hw_init_vif(wl, wlvif); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_hw_init(struct wl1271 *wl) +{ + int ret; + + /* Chip-specific hw init */ + ret = wl->ops->hw_init(wl); + if (ret < 0) + return ret; + + /* Init templates */ + ret = wl1271_init_templates_config(wl); + if (ret < 0) + return ret; + + ret = wl12xx_acx_mem_cfg(wl); + if (ret < 0) + return ret; + + /* Configure the FW logger */ + ret = wl12xx_init_fwlog(wl); + if (ret < 0) + return ret; + + ret = wlcore_cmd_regdomain_config_locked(wl); + if (ret < 0) + return ret; + + /* Bluetooth WLAN coexistence */ + ret = wl1271_init_pta(wl); + if (ret < 0) + return ret; + + /* Default memory configuration */ + ret = wl1271_acx_init_mem_config(wl); + if (ret < 0) + return ret; + + /* RX config */ + ret = wl12xx_init_rx_config(wl); + if (ret < 0) + goto out_free_memmap; + + ret = wl1271_acx_dco_itrim_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Configure TX patch complete interrupt behavior */ + ret = wl1271_acx_tx_config_options(wl); + if (ret < 0) + goto out_free_memmap; + + /* RX complete interrupt pacing */ + ret = wl1271_acx_init_rx_interrupt(wl); + if (ret < 0) + goto out_free_memmap; + + /* Energy detection */ + ret = wl1271_init_energy_detection(wl); + if (ret < 0) + goto out_free_memmap; + + /* Default fragmentation threshold */ + ret = wl1271_acx_frag_threshold(wl, wl->hw->wiphy->frag_threshold); + if (ret < 0) + goto out_free_memmap; + + /* Enable data path */ + ret = wl1271_cmd_data_path(wl, 1); + if (ret < 0) + goto out_free_memmap; + + /* configure PM */ + ret = wl1271_acx_pm_config(wl); + if (ret < 0) + goto out_free_memmap; + + ret = wl12xx_acx_set_rate_mgmt_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* configure hangover */ + ret = wl12xx_acx_config_hangover(wl); + if (ret < 0) + goto out_free_memmap; + + return 0; + + out_free_memmap: + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + + return ret; +} diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h new file mode 100644 index 000000000..a45fbfdde --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/init.h @@ -0,0 +1,39 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __INIT_H__ +#define __INIT_H__ + +#include "wlcore.h" + +int wl1271_hw_init_power_auth(struct wl1271 *wl); +int wl1271_init_templates_config(struct wl1271 *wl); +int wl1271_init_pta(struct wl1271 *wl); +int wl1271_init_energy_detection(struct wl1271 *wl); +int wl1271_chip_specific_init(struct wl1271 *wl); +int wl1271_hw_init(struct wl1271 *wl); +int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif); +int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif); + +#endif diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c new file mode 100644 index 000000000..68e74eefd --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/io.c @@ -0,0 +1,200 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "wl12xx_80211.h" +#include "io.h" +#include "tx.h" + +bool wl1271_set_block_size(struct wl1271 *wl) +{ + if (wl->if_ops->set_block_size) { + wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE); + return true; + } + + return false; +} + +void wlcore_disable_interrupts(struct wl1271 *wl) +{ + disable_irq(wl->irq); +} +EXPORT_SYMBOL_GPL(wlcore_disable_interrupts); + +void wlcore_disable_interrupts_nosync(struct wl1271 *wl) +{ + disable_irq_nosync(wl->irq); +} +EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync); + +void wlcore_enable_interrupts(struct wl1271 *wl) +{ + enable_irq(wl->irq); +} +EXPORT_SYMBOL_GPL(wlcore_enable_interrupts); + +void wlcore_synchronize_interrupts(struct wl1271 *wl) +{ + synchronize_irq(wl->irq); +} +EXPORT_SYMBOL_GPL(wlcore_synchronize_interrupts); + +int wlcore_translate_addr(struct wl1271 *wl, int addr) +{ + struct wlcore_partition_set *part = &wl->curr_part; + + /* + * To translate, first check to which window of addresses the + * particular address belongs. Then subtract the starting address + * of that window from the address. Then, add offset of the + * translated region. + * + * The translated regions occur next to each other in physical device + * memory, so just add the sizes of the preceding address regions to + * get the offset to the new region. + */ + if ((addr >= part->mem.start) && + (addr < part->mem.start + part->mem.size)) + return addr - part->mem.start; + else if ((addr >= part->reg.start) && + (addr < part->reg.start + part->reg.size)) + return addr - part->reg.start + part->mem.size; + else if ((addr >= part->mem2.start) && + (addr < part->mem2.start + part->mem2.size)) + return addr - part->mem2.start + part->mem.size + + part->reg.size; + else if ((addr >= part->mem3.start) && + (addr < part->mem3.start + part->mem3.size)) + return addr - part->mem3.start + part->mem.size + + part->reg.size + part->mem2.size; + + WARN(1, "HW address 0x%x out of range", addr); + return 0; +} +EXPORT_SYMBOL_GPL(wlcore_translate_addr); + +/* Set the partitions to access the chip addresses + * + * To simplify driver code, a fixed (virtual) memory map is defined for + * register and memory addresses. Because in the chipset, in different stages + * of operation, those addresses will move around, an address translation + * mechanism is required. + * + * There are four partitions (three memory and one register partition), + * which are mapped to two different areas of the hardware memory. + * + * Virtual address + * space + * + * | | + * ...+----+--> mem.start + * Physical address ... | | + * space ... | | [PART_0] + * ... | | + * 00000000 <--+----+... ...+----+--> mem.start + mem.size + * | | ... | | + * |MEM | ... | | + * | | ... | | + * mem.size <--+----+... | | {unused area) + * | | ... | | + * |REG | ... | | + * mem.size | | ... | | + * + <--+----+... ...+----+--> reg.start + * reg.size | | ... | | + * |MEM2| ... | | [PART_1] + * | | ... | | + * ...+----+--> reg.start + reg.size + * | | + * + */ +int wlcore_set_partition(struct wl1271 *wl, + const struct wlcore_partition_set *p) +{ + int ret; + + /* copy partition info */ + memcpy(&wl->curr_part, p, sizeof(*p)); + + wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X", + p->mem.start, p->mem.size); + wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X", + p->reg.start, p->reg.size); + wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X", + p->mem2.start, p->mem2.size); + wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X", + p->mem3.start, p->mem3.size); + + ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); + if (ret < 0) + goto out; + + ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); + if (ret < 0) + goto out; + + /* + * We don't need the size of the last partition, as it is + * automatically calculated based on the total memory size and + * the sizes of the previous partitions. + */ + ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); + +out: + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_set_partition); + +void wl1271_io_reset(struct wl1271 *wl) +{ + if (wl->if_ops->reset) + wl->if_ops->reset(wl->dev); +} + +void wl1271_io_init(struct wl1271 *wl) +{ + if (wl->if_ops->init) + wl->if_ops->init(wl->dev); +} diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h new file mode 100644 index 000000000..0305729d0 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/io.h @@ -0,0 +1,238 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __IO_H__ +#define __IO_H__ + +#include + +#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 + +#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0 +#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR) +#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4) +#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8) +#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) +#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) +#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) +#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) + +#define HW_ACCESS_REGISTER_SIZE 4 + +#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 + +struct wl1271; + +void wlcore_disable_interrupts(struct wl1271 *wl); +void wlcore_disable_interrupts_nosync(struct wl1271 *wl); +void wlcore_enable_interrupts(struct wl1271 *wl); +void wlcore_synchronize_interrupts(struct wl1271 *wl); + +void wl1271_io_reset(struct wl1271 *wl); +void wl1271_io_init(struct wl1271 *wl); +int wlcore_translate_addr(struct wl1271 *wl, int addr); + +/* Raw target IO, address is not translated */ +static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr, + void *buf, size_t len, + bool fixed) +{ + int ret; + + if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) || + WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) && + addr != HW_ACCESS_ELP_CTRL_REG))) + return -EIO; + + ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed); + if (ret && wl->state != WLCORE_STATE_OFF) + set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); + + return ret; +} + +static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr, + void *buf, size_t len, + bool fixed) +{ + int ret; + + if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) || + WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) && + addr != HW_ACCESS_ELP_CTRL_REG))) + return -EIO; + + ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed); + if (ret && wl->state != WLCORE_STATE_OFF) + set_bit(WL1271_FLAG_IO_FAILED, &wl->flags); + + return ret; +} + +static inline int __must_check wlcore_raw_read_data(struct wl1271 *wl, int reg, + void *buf, size_t len, + bool fixed) +{ + return wlcore_raw_read(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline int __must_check wlcore_raw_write_data(struct wl1271 *wl, int reg, + void *buf, size_t len, + bool fixed) +{ + return wlcore_raw_write(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr, + u32 *val) +{ + int ret; + + ret = wlcore_raw_read(wl, addr, wl->buffer_32, + sizeof(*wl->buffer_32), false); + if (ret < 0) + return ret; + + if (val) + *val = le32_to_cpu(*wl->buffer_32); + + return 0; +} + +static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr, + u32 val) +{ + *wl->buffer_32 = cpu_to_le32(val); + return wlcore_raw_write(wl, addr, wl->buffer_32, + sizeof(*wl->buffer_32), false); +} + +static inline int __must_check wlcore_read(struct wl1271 *wl, int addr, + void *buf, size_t len, bool fixed) +{ + int physical; + + physical = wlcore_translate_addr(wl, addr); + + return wlcore_raw_read(wl, physical, buf, len, fixed); +} + +static inline int __must_check wlcore_write(struct wl1271 *wl, int addr, + void *buf, size_t len, bool fixed) +{ + int physical; + + physical = wlcore_translate_addr(wl, addr); + + return wlcore_raw_write(wl, physical, buf, len, fixed); +} + +static inline int __must_check wlcore_write_data(struct wl1271 *wl, int reg, + void *buf, size_t len, + bool fixed) +{ + return wlcore_write(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline int __must_check wlcore_read_data(struct wl1271 *wl, int reg, + void *buf, size_t len, + bool fixed) +{ + return wlcore_read(wl, wl->rtable[reg], buf, len, fixed); +} + +static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr, + void *buf, size_t len, + bool fixed) +{ + int physical; + int addr; + + /* Convert from FW internal address which is chip arch dependent */ + addr = wl->ops->convert_hwaddr(wl, hwaddr); + + physical = wlcore_translate_addr(wl, addr); + + return wlcore_raw_read(wl, physical, buf, len, fixed); +} + +static inline int __must_check wlcore_read32(struct wl1271 *wl, int addr, + u32 *val) +{ + return wlcore_raw_read32(wl, wlcore_translate_addr(wl, addr), val); +} + +static inline int __must_check wlcore_write32(struct wl1271 *wl, int addr, + u32 val) +{ + return wlcore_raw_write32(wl, wlcore_translate_addr(wl, addr), val); +} + +static inline int __must_check wlcore_read_reg(struct wl1271 *wl, int reg, + u32 *val) +{ + return wlcore_raw_read32(wl, + wlcore_translate_addr(wl, wl->rtable[reg]), + val); +} + +static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg, + u32 val) +{ + return wlcore_raw_write32(wl, + wlcore_translate_addr(wl, wl->rtable[reg]), + val); +} + +static inline void wl1271_power_off(struct wl1271 *wl) +{ + int ret; + + if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags)) + return; + + ret = wl->if_ops->power(wl->dev, false); + if (!ret) + clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); +} + +static inline int wl1271_power_on(struct wl1271 *wl) +{ + int ret = wl->if_ops->power(wl->dev, true); + if (ret == 0) + set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + + return ret; +} + +int wlcore_set_partition(struct wl1271 *wl, + const struct wlcore_partition_set *p); + +bool wl1271_set_block_size(struct wl1271 *wl); + +/* Functions from wl1271_main.c */ + +int wl1271_tx_dummy_packet(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c new file mode 100644 index 000000000..05b823e39 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -0,0 +1,6556 @@ + +/* + * This file is part of wlcore + * + * Copyright (C) 2008-2010 Nokia Corporation + * Copyright (C) 2011-2013 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "wl12xx_80211.h" +#include "io.h" +#include "tx.h" +#include "ps.h" +#include "init.h" +#include "debugfs.h" +#include "testmode.h" +#include "vendor_cmd.h" +#include "scan.h" +#include "hw_ops.h" +#include "sysfs.h" + +#define WL1271_BOOT_RETRIES 3 + +static char *fwlog_param; +static int fwlog_mem_blocks = -1; +static int bug_on_recovery = -1; +static int no_recovery = -1; + +static void __wl1271_op_remove_interface(struct wl1271 *wl, + struct ieee80211_vif *vif, + bool reset_tx_queues); +static void wlcore_op_stop_locked(struct wl1271 *wl); +static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); + +static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) + return -EINVAL; + + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + return 0; + + if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) + return 0; + + ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid); + if (ret < 0) + return ret; + + wl1271_info("Association completed."); + return 0; +} + +static void wl1271_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wl1271 *wl = hw->priv; + + /* copy the current dfs region */ + if (request) + wl->dfs_region = request->dfs_region; + + wlcore_regdomain_config(wl); +} + +static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool enable) +{ + int ret = 0; + + /* we should hold wl->mutex */ + ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); + if (ret < 0) + goto out; + + if (enable) + set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); + else + clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); +out: + return ret; +} + +/* + * this function is being called when the rx_streaming interval + * has beed changed or rx_streaming should be disabled + */ +int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret = 0; + int period = wl->conf.rx_streaming.interval; + + /* don't reconfigure if rx_streaming is disabled */ + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + goto out; + + /* reconfigure/disable according to new streaming_period */ + if (period && + test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && + (wl->conf.rx_streaming.always || + test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) + ret = wl1271_set_rx_streaming(wl, wlvif, true); + else { + ret = wl1271_set_rx_streaming(wl, wlvif, false); + /* don't cancel_work_sync since we might deadlock */ + del_timer_sync(&wlvif->rx_streaming_timer); + } +out: + return ret; +} + +static void wl1271_rx_streaming_enable_work(struct work_struct *work) +{ + int ret; + struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, + rx_streaming_enable_work); + struct wl1271 *wl = wlvif->wl; + + mutex_lock(&wl->mutex); + + if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || + !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || + (!wl->conf.rx_streaming.always && + !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) + goto out; + + if (!wl->conf.rx_streaming.interval) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_set_rx_streaming(wl, wlvif, true); + if (ret < 0) + goto out_sleep; + + /* stop it after some time of inactivity */ + mod_timer(&wlvif->rx_streaming_timer, + jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wl1271_rx_streaming_disable_work(struct work_struct *work) +{ + int ret; + struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, + rx_streaming_disable_work); + struct wl1271 *wl = wlvif->wl; + + mutex_lock(&wl->mutex); + + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_set_rx_streaming(wl, wlvif, false); + if (ret) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wl1271_rx_streaming_timer(unsigned long data) +{ + struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data; + struct wl1271 *wl = wlvif->wl; + ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); +} + +/* wl->mutex must be taken */ +void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl) +{ + /* if the watchdog is not armed, don't do anything */ + if (wl->tx_allocated_blocks == 0) + return; + + cancel_delayed_work(&wl->tx_watchdog_work); + ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work, + msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout)); +} + +static void wlcore_rc_update_work(struct work_struct *work) +{ + int ret; + struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, + rc_update_work); + struct wl1271 *wl = wlvif->wl; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wlcore_hw_sta_rc_update(wl, wlvif); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wl12xx_tx_watchdog_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, tx_watchdog_work); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* Tx went out in the meantime - everything is ok */ + if (unlikely(wl->tx_allocated_blocks == 0)) + goto out; + + /* + * if a ROC is in progress, we might not have any Tx for a long + * time (e.g. pending Tx on the non-ROC channels) + */ + if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { + wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC", + wl->conf.tx.tx_watchdog_timeout); + wl12xx_rearm_tx_watchdog_locked(wl); + goto out; + } + + /* + * if a scan is in progress, we might not have any Tx for a long + * time + */ + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { + wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan", + wl->conf.tx.tx_watchdog_timeout); + wl12xx_rearm_tx_watchdog_locked(wl); + goto out; + } + + /* + * AP might cache a frame for a long time for a sleeping station, + * so rearm the timer if there's an AP interface with stations. If + * Tx is genuinely stuck we will most hopefully discover it when all + * stations are removed due to inactivity. + */ + if (wl->active_sta_count) { + wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has " + " %d stations", + wl->conf.tx.tx_watchdog_timeout, + wl->active_sta_count); + wl12xx_rearm_tx_watchdog_locked(wl); + goto out; + } + + wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery", + wl->conf.tx.tx_watchdog_timeout); + wl12xx_queue_recovery_work(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static void wlcore_adjust_conf(struct wl1271 *wl) +{ + /* Adjust settings according to optional module parameters */ + + /* Firmware Logger params */ + if (fwlog_mem_blocks != -1) { + if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS && + fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) { + wl->conf.fwlog.mem_blocks = fwlog_mem_blocks; + } else { + wl1271_error( + "Illegal fwlog_mem_blocks=%d using default %d", + fwlog_mem_blocks, wl->conf.fwlog.mem_blocks); + } + } + + if (fwlog_param) { + if (!strcmp(fwlog_param, "continuous")) { + wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; + } else if (!strcmp(fwlog_param, "ondemand")) { + wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; + } else if (!strcmp(fwlog_param, "dbgpins")) { + wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; + wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; + } else if (!strcmp(fwlog_param, "disable")) { + wl->conf.fwlog.mem_blocks = 0; + wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; + } else { + wl1271_error("Unknown fwlog parameter %s", fwlog_param); + } + } + + if (bug_on_recovery != -1) + wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery; + + if (no_recovery != -1) + wl->conf.recovery.no_recovery = (u8) no_recovery; +} + +static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 hlid, u8 tx_pkts) +{ + bool fw_ps; + + fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); + + /* + * Wake up from high level PS if the STA is asleep with too little + * packets in FW or if the STA is awake. + */ + if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) + wl12xx_ps_link_end(wl, wlvif, hlid); + + /* + * Start high-level PS if the STA is asleep with enough blocks in FW. + * Make an exception if this is the only connected link. In this + * case FW-memory congestion is less of a problem. + * Note that a single connected STA means 2*ap_count + 1 active links, + * since we must account for the global and broadcast AP links + * for each AP. The "fw_ps" check assures us the other link is a STA + * connected to the AP. Otherwise the FW would not set the PSM bit. + */ + else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && + tx_pkts >= WL1271_PS_STA_MAX_PACKETS) + wl12xx_ps_link_start(wl, wlvif, hlid, true); +} + +static void wl12xx_irq_update_links_status(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct wl_fw_status *status) +{ + unsigned long cur_fw_ps_map; + u8 hlid; + + cur_fw_ps_map = status->link_ps_bitmap; + if (wl->ap_fw_ps_map != cur_fw_ps_map) { + wl1271_debug(DEBUG_PSM, + "link ps prev 0x%lx cur 0x%lx changed 0x%lx", + wl->ap_fw_ps_map, cur_fw_ps_map, + wl->ap_fw_ps_map ^ cur_fw_ps_map); + + wl->ap_fw_ps_map = cur_fw_ps_map; + } + + for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links) + wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, + wl->links[hlid].allocated_pkts); +} + +static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) +{ + struct wl12xx_vif *wlvif; + struct timespec ts; + u32 old_tx_blk_count = wl->tx_blocks_available; + int avail, freed_blocks; + int i; + int ret; + struct wl1271_link *lnk; + + ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, + wl->raw_fw_status, + wl->fw_status_len, false); + if (ret < 0) + return ret; + + wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status); + + wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " + "drv_rx_counter = %d, tx_results_counter = %d)", + status->intr, + status->fw_rx_counter, + status->drv_rx_counter, + status->tx_results_counter); + + for (i = 0; i < NUM_TX_QUEUES; i++) { + /* prevent wrap-around in freed-packets counter */ + wl->tx_allocated_pkts[i] -= + (status->counters.tx_released_pkts[i] - + wl->tx_pkts_freed[i]) & 0xff; + + wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; + } + + + for_each_set_bit(i, wl->links_map, wl->num_links) { + u8 diff; + lnk = &wl->links[i]; + + /* prevent wrap-around in freed-packets counter */ + diff = (status->counters.tx_lnk_free_pkts[i] - + lnk->prev_freed_pkts) & 0xff; + + if (diff == 0) + continue; + + lnk->allocated_pkts -= diff; + lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i]; + + /* accumulate the prev_freed_pkts counter */ + lnk->total_freed_pkts += diff; + } + + /* prevent wrap-around in total blocks counter */ + if (likely(wl->tx_blocks_freed <= status->total_released_blks)) + freed_blocks = status->total_released_blks - + wl->tx_blocks_freed; + else + freed_blocks = 0x100000000LL - wl->tx_blocks_freed + + status->total_released_blks; + + wl->tx_blocks_freed = status->total_released_blks; + + wl->tx_allocated_blocks -= freed_blocks; + + /* + * If the FW freed some blocks: + * If we still have allocated blocks - re-arm the timer, Tx is + * not stuck. Otherwise, cancel the timer (no Tx currently). + */ + if (freed_blocks) { + if (wl->tx_allocated_blocks) + wl12xx_rearm_tx_watchdog_locked(wl); + else + cancel_delayed_work(&wl->tx_watchdog_work); + } + + avail = status->tx_total - wl->tx_allocated_blocks; + + /* + * The FW might change the total number of TX memblocks before + * we get a notification about blocks being released. Thus, the + * available blocks calculation might yield a temporary result + * which is lower than the actual available blocks. Keeping in + * mind that only blocks that were allocated can be moved from + * TX to RX, tx_blocks_available should never decrease here. + */ + wl->tx_blocks_available = max((int)wl->tx_blocks_available, + avail); + + /* if more blocks are available now, tx work can be scheduled */ + if (wl->tx_blocks_available > old_tx_blk_count) + clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); + + /* for AP update num of allocated TX blocks per link and ps status */ + wl12xx_for_each_wlvif_ap(wl, wlvif) { + wl12xx_irq_update_links_status(wl, wlvif, status); + } + + /* update the host-chipset time offset */ + getnstimeofday(&ts); + wl->time_offset = (timespec_to_ns(&ts) >> 10) - + (s64)(status->fw_localtime); + + wl->fw_fast_lnk_map = status->link_fast_bitmap; + + return 0; +} + +static void wl1271_flush_deferred_work(struct wl1271 *wl) +{ + struct sk_buff *skb; + + /* Pass all received frames to the network stack */ + while ((skb = skb_dequeue(&wl->deferred_rx_queue))) + ieee80211_rx_ni(wl->hw, skb); + + /* Return sent skbs to the network stack */ + while ((skb = skb_dequeue(&wl->deferred_tx_queue))) + ieee80211_tx_status_ni(wl->hw, skb); +} + +static void wl1271_netstack_work(struct work_struct *work) +{ + struct wl1271 *wl = + container_of(work, struct wl1271, netstack_work); + + do { + wl1271_flush_deferred_work(wl); + } while (skb_queue_len(&wl->deferred_rx_queue)); +} + +#define WL1271_IRQ_MAX_LOOPS 256 + +static int wlcore_irq_locked(struct wl1271 *wl) +{ + int ret = 0; + u32 intr; + int loopcount = WL1271_IRQ_MAX_LOOPS; + bool done = false; + unsigned int defer_count; + unsigned long flags; + + /* + * In case edge triggered interrupt must be used, we cannot iterate + * more than once without introducing race conditions with the hardirq. + */ + if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) + loopcount = 1; + + wl1271_debug(DEBUG_IRQ, "IRQ work"); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + while (!done && loopcount--) { + /* + * In order to avoid a race with the hardirq, clear the flag + * before acknowledging the chip. Since the mutex is held, + * wl1271_ps_elp_wakeup cannot be called concurrently. + */ + clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + smp_mb__after_atomic(); + + ret = wlcore_fw_status(wl, wl->fw_status); + if (ret < 0) + goto out; + + wlcore_hw_tx_immediate_compl(wl); + + intr = wl->fw_status->intr; + intr &= WLCORE_ALL_INTR_MASK; + if (!intr) { + done = true; + continue; + } + + if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { + wl1271_error("HW watchdog interrupt received! starting recovery."); + wl->watchdog_recovery = true; + ret = -EIO; + + /* restarting the chip. ignore any other interrupt. */ + goto out; + } + + if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) { + wl1271_error("SW watchdog interrupt received! " + "starting recovery."); + wl->watchdog_recovery = true; + ret = -EIO; + + /* restarting the chip. ignore any other interrupt. */ + goto out; + } + + if (likely(intr & WL1271_ACX_INTR_DATA)) { + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); + + ret = wlcore_rx(wl, wl->fw_status); + if (ret < 0) + goto out; + + /* Check if any tx blocks were freed */ + spin_lock_irqsave(&wl->wl_lock, flags); + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && + wl1271_tx_total_queue_count(wl) > 0) { + spin_unlock_irqrestore(&wl->wl_lock, flags); + /* + * In order to avoid starvation of the TX path, + * call the work function directly. + */ + ret = wlcore_tx_work_locked(wl); + if (ret < 0) + goto out; + } else { + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + + /* check for tx results */ + ret = wlcore_hw_tx_delayed_compl(wl); + if (ret < 0) + goto out; + + /* Make sure the deferred queues don't get too long */ + defer_count = skb_queue_len(&wl->deferred_tx_queue) + + skb_queue_len(&wl->deferred_rx_queue); + if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) + wl1271_flush_deferred_work(wl); + } + + if (intr & WL1271_ACX_INTR_EVENT_A) { + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); + ret = wl1271_event_handle(wl, 0); + if (ret < 0) + goto out; + } + + if (intr & WL1271_ACX_INTR_EVENT_B) { + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); + ret = wl1271_event_handle(wl, 1); + if (ret < 0) + goto out; + } + + if (intr & WL1271_ACX_INTR_INIT_COMPLETE) + wl1271_debug(DEBUG_IRQ, + "WL1271_ACX_INTR_INIT_COMPLETE"); + + if (intr & WL1271_ACX_INTR_HW_AVAILABLE) + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); + } + + wl1271_ps_elp_sleep(wl); + +out: + return ret; +} + +static irqreturn_t wlcore_irq(int irq, void *cookie) +{ + int ret; + unsigned long flags; + struct wl1271 *wl = cookie; + + /* complete the ELP completion */ + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + if (wl->elp_compl) { + complete(wl->elp_compl); + wl->elp_compl = NULL; + } + + if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { + /* don't enqueue a work right now. mark it as pending */ + set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); + wl1271_debug(DEBUG_IRQ, "should not enqueue work"); + disable_irq_nosync(wl->irq); + pm_wakeup_event(wl->dev, 0); + spin_unlock_irqrestore(&wl->wl_lock, flags); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + /* TX might be handled here, avoid redundant work */ + set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); + cancel_work_sync(&wl->tx_work); + + mutex_lock(&wl->mutex); + + ret = wlcore_irq_locked(wl); + if (ret) + wl12xx_queue_recovery_work(wl); + + spin_lock_irqsave(&wl->wl_lock, flags); + /* In case TX was not handled here, queue TX work */ + clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && + wl1271_tx_total_queue_count(wl) > 0) + ieee80211_queue_work(wl->hw, &wl->tx_work); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + mutex_unlock(&wl->mutex); + + return IRQ_HANDLED; +} + +struct vif_counter_data { + u8 counter; + + struct ieee80211_vif *cur_vif; + bool cur_vif_running; +}; + +static void wl12xx_vif_count_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct vif_counter_data *counter = data; + + counter->counter++; + if (counter->cur_vif == vif) + counter->cur_vif_running = true; +} + +/* caller must not hold wl->mutex, as it might deadlock */ +static void wl12xx_get_vif_count(struct ieee80211_hw *hw, + struct ieee80211_vif *cur_vif, + struct vif_counter_data *data) +{ + memset(data, 0, sizeof(*data)); + data->cur_vif = cur_vif; + + ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, + wl12xx_vif_count_iter, data); +} + +static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) +{ + const struct firmware *fw; + const char *fw_name; + enum wl12xx_fw_type fw_type; + int ret; + + if (plt) { + fw_type = WL12XX_FW_TYPE_PLT; + fw_name = wl->plt_fw_name; + } else { + /* + * we can't call wl12xx_get_vif_count() here because + * wl->mutex is taken, so use the cached last_vif_count value + */ + if (wl->last_vif_count > 1 && wl->mr_fw_name) { + fw_type = WL12XX_FW_TYPE_MULTI; + fw_name = wl->mr_fw_name; + } else { + fw_type = WL12XX_FW_TYPE_NORMAL; + fw_name = wl->sr_fw_name; + } + } + + if (wl->fw_type == fw_type) + return 0; + + wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); + + ret = reject_firmware(&fw, fw_name, wl->dev); + + if (ret < 0) { + wl1271_error("could not get firmware %s: %d", fw_name, ret); + return ret; + } + + if (fw->size % 4) { + wl1271_error("firmware size is not multiple of 32 bits: %zu", + fw->size); + ret = -EILSEQ; + goto out; + } + + vfree(wl->fw); + wl->fw_type = WL12XX_FW_TYPE_NONE; + wl->fw_len = fw->size; + wl->fw = vmalloc(wl->fw_len); + + if (!wl->fw) { + wl1271_error("could not allocate memory for the firmware"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->fw, fw->data, wl->fw_len); + ret = 0; + wl->fw_type = fw_type; +out: + release_firmware(fw); + + return ret; +} + +void wl12xx_queue_recovery_work(struct wl1271 *wl) +{ + /* Avoid a recursive recovery */ + if (wl->state == WLCORE_STATE_ON) { + WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, + &wl->flags)); + + wl->state = WLCORE_STATE_RESTARTING; + set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); + wl1271_ps_elp_wakeup(wl); + wlcore_disable_interrupts_nosync(wl); + ieee80211_queue_work(wl->hw, &wl->recovery_work); + } +} + +size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) +{ + size_t len; + + /* Make sure we have enough room */ + len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size); + + /* Fill the FW log file, consumed by the sysfs fwlog entry */ + memcpy(wl->fwlog + wl->fwlog_size, memblock, len); + wl->fwlog_size += len; + + return len; +} + +static void wl12xx_read_fwlog_panic(struct wl1271 *wl) +{ + struct wlcore_partition_set part, old_part; + u32 addr; + u32 offset; + u32 end_of_log; + u8 *block; + int ret; + + if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || + (wl->conf.fwlog.mem_blocks == 0)) + return; + + wl1271_info("Reading FW panic log"); + + block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL); + if (!block) + return; + + /* + * Make sure the chip is awake and the logger isn't active. + * Do not send a stop fwlog command if the fw is hanged or if + * dbgpins are used (due to some fw bug). + */ + if (wl1271_ps_elp_wakeup(wl)) + goto out; + if (!wl->watchdog_recovery && + wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) + wl12xx_cmd_stop_fwlog(wl); + + /* Read the first memory block address */ + ret = wlcore_fw_status(wl, wl->fw_status); + if (ret < 0) + goto out; + + addr = wl->fw_status->log_start_addr; + if (!addr) + goto out; + + if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) { + offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor); + end_of_log = wl->fwlog_end; + } else { + offset = sizeof(addr); + end_of_log = addr; + } + + old_part = wl->curr_part; + memset(&part, 0, sizeof(part)); + + /* Traverse the memory blocks linked list */ + do { + part.mem.start = wlcore_hw_convert_hwaddr(wl, addr); + part.mem.size = PAGE_SIZE; + + ret = wlcore_set_partition(wl, &part); + if (ret < 0) { + wl1271_error("%s: set_partition start=0x%X size=%d", + __func__, part.mem.start, part.mem.size); + goto out; + } + + memset(block, 0, wl->fw_mem_block_size); + ret = wlcore_read_hwaddr(wl, addr, block, + wl->fw_mem_block_size, false); + + if (ret < 0) + goto out; + + /* + * Memory blocks are linked to one another. The first 4 bytes + * of each memory block hold the hardware address of the next + * one. The last memory block points to the first one in + * on demand mode and is equal to 0x2000000 in continuous mode. + */ + addr = le32_to_cpup((__le32 *)block); + + if (!wl12xx_copy_fwlog(wl, block + offset, + wl->fw_mem_block_size - offset)) + break; + } while (addr && (addr != end_of_log)); + + wake_up_interruptible(&wl->fwlog_waitq); + +out: + kfree(block); + wlcore_set_partition(wl, &old_part); +} + +static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid, struct ieee80211_sta *sta) +{ + struct wl1271_station *wl_sta; + u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING; + + wl_sta = (void *)sta->drv_priv; + wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts; + + /* + * increment the initial seq number on recovery to account for + * transmitted packets that we haven't yet got in the FW status + */ + if (wlvif->encryption_type == KEY_GEM) + sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM; + + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) + wl_sta->total_freed_pkts += sqn_recovery_padding; +} + +static void wlcore_save_freed_pkts_addr(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 hlid, const u8 *addr) +{ + struct ieee80211_sta *sta; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID || + is_zero_ether_addr(addr))) + return; + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, addr); + if (sta) + wlcore_save_freed_pkts(wl, wlvif, hlid, sta); + rcu_read_unlock(); +} + +static void wlcore_print_recovery(struct wl1271 *wl) +{ + u32 pc = 0; + u32 hint_sts = 0; + int ret; + + wl1271_info("Hardware recovery in progress. FW ver: %s", + wl->chip.fw_ver_str); + + /* change partitions momentarily so we can read the FW pc */ + ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); + if (ret < 0) + return; + + ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc); + if (ret < 0) + return; + + ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts); + if (ret < 0) + return; + + wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d", + pc, hint_sts, ++wl->recovery_count); + + wlcore_set_partition(wl, &wl->ptable[PART_WORK]); +} + + +static void wl1271_recovery_work(struct work_struct *work) +{ + struct wl1271 *wl = + container_of(work, struct wl1271, recovery_work); + struct wl12xx_vif *wlvif; + struct ieee80211_vif *vif; + + mutex_lock(&wl->mutex); + + if (wl->state == WLCORE_STATE_OFF || wl->plt) + goto out_unlock; + + if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { + if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST) + wl12xx_read_fwlog_panic(wl); + wlcore_print_recovery(wl); + } + + BUG_ON(wl->conf.recovery.bug_on_recovery && + !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + + if (wl->conf.recovery.no_recovery) { + wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); + goto out_unlock; + } + + /* Prevent spurious TX during FW restart */ + wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); + + /* reboot the chipset */ + while (!list_empty(&wl->wlvif_list)) { + wlvif = list_first_entry(&wl->wlvif_list, + struct wl12xx_vif, list); + vif = wl12xx_wlvif_to_vif(wlvif); + + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid, + vif->bss_conf.bssid); + } + + __wl1271_op_remove_interface(wl, vif, false); + } + + wlcore_op_stop_locked(wl); + + ieee80211_restart_hw(wl->hw); + + /* + * Its safe to enable TX now - the queues are stopped after a request + * to restart the HW. + */ + wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); + +out_unlock: + wl->watchdog_recovery = false; + clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); + mutex_unlock(&wl->mutex); +} + +static int wlcore_fw_wakeup(struct wl1271 *wl) +{ + return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); +} + +static int wl1271_setup(struct wl1271 *wl) +{ + wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL); + if (!wl->raw_fw_status) + goto err; + + wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL); + if (!wl->fw_status) + goto err; + + wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); + if (!wl->tx_res_if) + goto err; + + return 0; +err: + kfree(wl->fw_status); + kfree(wl->raw_fw_status); + return -ENOMEM; +} + +static int wl12xx_set_power_on(struct wl1271 *wl) +{ + int ret; + + msleep(WL1271_PRE_POWER_ON_SLEEP); + ret = wl1271_power_on(wl); + if (ret < 0) + goto out; + msleep(WL1271_POWER_ON_SLEEP); + wl1271_io_reset(wl); + wl1271_io_init(wl); + + ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); + if (ret < 0) + goto fail; + + /* ELP module wake up */ + ret = wlcore_fw_wakeup(wl); + if (ret < 0) + goto fail; + +out: + return ret; + +fail: + wl1271_power_off(wl); + return ret; +} + +static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) +{ + int ret = 0; + + ret = wl12xx_set_power_on(wl); + if (ret < 0) + goto out; + + /* + * For wl127x based devices we could use the default block + * size (512 bytes), but due to a bug in the sdio driver, we + * need to set it explicitly after the chip is powered on. To + * simplify the code and since the performance impact is + * negligible, we use the same block size for all different + * chip types. + * + * Check if the bus supports blocksize alignment and, if it + * doesn't, make sure we don't have the quirk. + */ + if (!wl1271_set_block_size(wl)) + wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; + + /* TODO: make sure the lower driver has set things up correctly */ + + ret = wl1271_setup(wl); + if (ret < 0) + goto out; + + ret = wl12xx_fetch_firmware(wl, plt); + if (ret < 0) + goto out; + +out: + return ret; +} + +int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode) +{ + int retries = WL1271_BOOT_RETRIES; + struct wiphy *wiphy = wl->hw->wiphy; + + static const char* const PLT_MODE[] = { + "PLT_OFF", + "PLT_ON", + "PLT_FEM_DETECT", + "PLT_CHIP_AWAKE" + }; + + int ret; + + mutex_lock(&wl->mutex); + + wl1271_notice("power up"); + + if (wl->state != WLCORE_STATE_OFF) { + wl1271_error("cannot go into PLT state because not " + "in off state: %d", wl->state); + ret = -EBUSY; + goto out; + } + + /* Indicate to lower levels that we are now in PLT mode */ + wl->plt = true; + wl->plt_mode = plt_mode; + + while (retries) { + retries--; + ret = wl12xx_chip_wakeup(wl, true); + if (ret < 0) + goto power_off; + + if (plt_mode != PLT_CHIP_AWAKE) { + ret = wl->ops->plt_init(wl); + if (ret < 0) + goto power_off; + } + + wl->state = WLCORE_STATE_ON; + wl1271_notice("firmware booted in PLT mode %s (%s)", + PLT_MODE[plt_mode], + wl->chip.fw_ver_str); + + /* update hw/fw version info in wiphy struct */ + wiphy->hw_version = wl->chip.id; + strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + sizeof(wiphy->fw_version)); + + goto out; + +power_off: + wl1271_power_off(wl); + } + + wl->plt = false; + wl->plt_mode = PLT_OFF; + + wl1271_error("firmware boot in PLT mode failed despite %d retries", + WL1271_BOOT_RETRIES); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +int wl1271_plt_stop(struct wl1271 *wl) +{ + int ret = 0; + + wl1271_notice("power down"); + + /* + * Interrupts must be disabled before setting the state to OFF. + * Otherwise, the interrupt handler might be called and exit without + * reading the interrupt status. + */ + wlcore_disable_interrupts(wl); + mutex_lock(&wl->mutex); + if (!wl->plt) { + mutex_unlock(&wl->mutex); + + /* + * This will not necessarily enable interrupts as interrupts + * may have been disabled when op_stop was called. It will, + * however, balance the above call to disable_interrupts(). + */ + wlcore_enable_interrupts(wl); + + wl1271_error("cannot power down because not in PLT " + "state: %d", wl->state); + ret = -EBUSY; + goto out; + } + + mutex_unlock(&wl->mutex); + + wl1271_flush_deferred_work(wl); + cancel_work_sync(&wl->netstack_work); + cancel_work_sync(&wl->recovery_work); + cancel_delayed_work_sync(&wl->elp_work); + cancel_delayed_work_sync(&wl->tx_watchdog_work); + + mutex_lock(&wl->mutex); + wl1271_power_off(wl); + wl->flags = 0; + wl->sleep_auth = WL1271_PSM_ILLEGAL; + wl->state = WLCORE_STATE_OFF; + wl->plt = false; + wl->plt_mode = PLT_OFF; + wl->rx_counter = 0; + mutex_unlock(&wl->mutex); + +out: + return ret; +} + +static void wl1271_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct wl1271 *wl = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct wl12xx_vif *wlvif = NULL; + unsigned long flags; + int q, mapping; + u8 hlid; + + if (!vif) { + wl1271_debug(DEBUG_TX, "DROP skb with no vif"); + ieee80211_free_txskb(hw, skb); + return; + } + + wlvif = wl12xx_vif_to_data(vif); + mapping = skb_get_queue_mapping(skb); + q = wl1271_tx_get_queue(mapping); + + hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta); + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* + * drop the packet if the link is invalid or the queue is stopped + * for any reason but watermark. Watermark is a "soft"-stop so we + * allow these packets through. + */ + if (hlid == WL12XX_INVALID_LINK_ID || + (!test_bit(hlid, wlvif->links_map)) || + (wlcore_is_queue_stopped_locked(wl, wlvif, q) && + !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, + WLCORE_QUEUE_STOP_REASON_WATERMARK))) { + wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); + ieee80211_free_txskb(hw, skb); + goto out; + } + + wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d", + hlid, q, skb->len); + skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); + + wl->tx_queue_count[q]++; + wlvif->tx_queue_count[q]++; + + /* + * The workqueue is slow to process the tx_queue and we need stop + * the queue here, otherwise the queue will get too long. + */ + if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && + !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, + WLCORE_QUEUE_STOP_REASON_WATERMARK)) { + wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); + wlcore_stop_queue_locked(wl, wlvif, q, + WLCORE_QUEUE_STOP_REASON_WATERMARK); + } + + /* + * The chip specific setup must run before the first TX packet - + * before that, the tx_work will not be initialized! + */ + + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && + !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) + ieee80211_queue_work(wl->hw, &wl->tx_work); + +out: + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +int wl1271_tx_dummy_packet(struct wl1271 *wl) +{ + unsigned long flags; + int q; + + /* no need to queue a new dummy packet if one is already pending */ + if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) + return 0; + + q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); + + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); + wl->tx_queue_count[q]++; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + /* The FW is low on RX memory blocks, so send the dummy packet asap */ + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) + return wlcore_tx_work_locked(wl); + + /* + * If the FW TX is busy, TX work will be scheduled by the threaded + * interrupt handler function + */ + return 0; +} + +/* + * The size of the dummy packet should be at least 1400 bytes. However, in + * order to minimize the number of bus transactions, aligning it to 512 bytes + * boundaries could be beneficial, performance wise + */ +#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512)) + +static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) +{ + struct sk_buff *skb; + struct ieee80211_hdr_3addr *hdr; + unsigned int dummy_packet_size; + + dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE - + sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr); + + skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE); + if (!skb) { + wl1271_warning("Failed to allocate a dummy packet skb"); + return NULL; + } + + skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr)); + + hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); + memset(hdr, 0, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_TODS); + + memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size); + + /* Dummy packets require the TID to be management */ + skb->priority = WL1271_TID_MGMT; + + /* Initialize all fields that might be used */ + skb_set_queue_mapping(skb, 0); + memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info)); + + return skb; +} + + +#ifdef CONFIG_PM +static int +wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p) +{ + int num_fields = 0, in_field = 0, fields_size = 0; + int i, pattern_len = 0; + + if (!p->mask) { + wl1271_warning("No mask in WoWLAN pattern"); + return -EINVAL; + } + + /* + * The pattern is broken up into segments of bytes at different offsets + * that need to be checked by the FW filter. Each segment is called + * a field in the FW API. We verify that the total number of fields + * required for this pattern won't exceed FW limits (8) + * as well as the total fields buffer won't exceed the FW limit. + * Note that if there's a pattern which crosses Ethernet/IP header + * boundary a new field is required. + */ + for (i = 0; i < p->pattern_len; i++) { + if (test_bit(i, (unsigned long *)p->mask)) { + if (!in_field) { + in_field = 1; + pattern_len = 1; + } else { + if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) { + num_fields++; + fields_size += pattern_len + + RX_FILTER_FIELD_OVERHEAD; + pattern_len = 1; + } else + pattern_len++; + } + } else { + if (in_field) { + in_field = 0; + fields_size += pattern_len + + RX_FILTER_FIELD_OVERHEAD; + num_fields++; + } + } + } + + if (in_field) { + fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD; + num_fields++; + } + + if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) { + wl1271_warning("RX Filter too complex. Too many segments"); + return -EINVAL; + } + + if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) { + wl1271_warning("RX filter pattern is too big"); + return -E2BIG; + } + + return 0; +} + +struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void) +{ + return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL); +} + +void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter) +{ + int i; + + if (filter == NULL) + return; + + for (i = 0; i < filter->num_fields; i++) + kfree(filter->fields[i].pattern); + + kfree(filter); +} + +int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, + u16 offset, u8 flags, + const u8 *pattern, u8 len) +{ + struct wl12xx_rx_filter_field *field; + + if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) { + wl1271_warning("Max fields per RX filter. can't alloc another"); + return -EINVAL; + } + + field = &filter->fields[filter->num_fields]; + + field->pattern = kzalloc(len, GFP_KERNEL); + if (!field->pattern) { + wl1271_warning("Failed to allocate RX filter pattern"); + return -ENOMEM; + } + + filter->num_fields++; + + field->offset = cpu_to_le16(offset); + field->flags = flags; + field->len = len; + memcpy(field->pattern, pattern, len); + + return 0; +} + +int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter) +{ + int i, fields_size = 0; + + for (i = 0; i < filter->num_fields; i++) + fields_size += filter->fields[i].len + + sizeof(struct wl12xx_rx_filter_field) - + sizeof(u8 *); + + return fields_size; +} + +void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter, + u8 *buf) +{ + int i; + struct wl12xx_rx_filter_field *field; + + for (i = 0; i < filter->num_fields; i++) { + field = (struct wl12xx_rx_filter_field *)buf; + + field->offset = filter->fields[i].offset; + field->flags = filter->fields[i].flags; + field->len = filter->fields[i].len; + + memcpy(&field->pattern, filter->fields[i].pattern, field->len); + buf += sizeof(struct wl12xx_rx_filter_field) - + sizeof(u8 *) + field->len; + } +} + +/* + * Allocates an RX filter returned through f + * which needs to be freed using rx_filter_free() + */ +static int +wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p, + struct wl12xx_rx_filter **f) +{ + int i, j, ret = 0; + struct wl12xx_rx_filter *filter; + u16 offset; + u8 flags, len; + + filter = wl1271_rx_filter_alloc(); + if (!filter) { + wl1271_warning("Failed to alloc rx filter"); + ret = -ENOMEM; + goto err; + } + + i = 0; + while (i < p->pattern_len) { + if (!test_bit(i, (unsigned long *)p->mask)) { + i++; + continue; + } + + for (j = i; j < p->pattern_len; j++) { + if (!test_bit(j, (unsigned long *)p->mask)) + break; + + if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE && + j >= WL1271_RX_FILTER_ETH_HEADER_SIZE) + break; + } + + if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) { + offset = i; + flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER; + } else { + offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE; + flags = WL1271_RX_FILTER_FLAG_IP_HEADER; + } + + len = j - i; + + ret = wl1271_rx_filter_alloc_field(filter, + offset, + flags, + &p->pattern[i], len); + if (ret) + goto err; + + i = j; + } + + filter->action = FILTER_SIGNAL; + + *f = filter; + return 0; + +err: + wl1271_rx_filter_free(filter); + *f = NULL; + + return ret; +} + +static int wl1271_configure_wowlan(struct wl1271 *wl, + struct cfg80211_wowlan *wow) +{ + int i, ret; + + if (!wow || wow->any || !wow->n_patterns) { + ret = wl1271_acx_default_rx_filter_enable(wl, 0, + FILTER_SIGNAL); + if (ret) + goto out; + + ret = wl1271_rx_filter_clear_all(wl); + if (ret) + goto out; + + return 0; + } + + if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS)) + return -EINVAL; + + /* Validate all incoming patterns before clearing current FW state */ + for (i = 0; i < wow->n_patterns; i++) { + ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]); + if (ret) { + wl1271_warning("Bad wowlan pattern %d", i); + return ret; + } + } + + ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); + if (ret) + goto out; + + ret = wl1271_rx_filter_clear_all(wl); + if (ret) + goto out; + + /* Translate WoWLAN patterns into filters */ + for (i = 0; i < wow->n_patterns; i++) { + struct cfg80211_pkt_pattern *p; + struct wl12xx_rx_filter *filter = NULL; + + p = &wow->patterns[i]; + + ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter); + if (ret) { + wl1271_warning("Failed to create an RX filter from " + "wowlan pattern %d", i); + goto out; + } + + ret = wl1271_rx_filter_enable(wl, i, 1, filter); + + wl1271_rx_filter_free(filter); + if (ret) + goto out; + } + + ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP); + +out: + return ret; +} + +static int wl1271_configure_suspend_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_wowlan *wow) +{ + int ret = 0; + + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + goto out; + + ret = wl1271_configure_wowlan(wl, wow); + if (ret < 0) + goto out; + + if ((wl->conf.conn.suspend_wake_up_event == + wl->conf.conn.wake_up_event) && + (wl->conf.conn.suspend_listen_interval == + wl->conf.conn.listen_interval)) + goto out; + + ret = wl1271_acx_wake_up_conditions(wl, wlvif, + wl->conf.conn.suspend_wake_up_event, + wl->conf.conn.suspend_listen_interval); + + if (ret < 0) + wl1271_error("suspend: set wake up conditions failed: %d", ret); +out: + return ret; + +} + +static int wl1271_configure_suspend_ap(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_wowlan *wow) +{ + int ret = 0; + + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) + goto out; + + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); + if (ret < 0) + goto out; + + ret = wl1271_configure_wowlan(wl, wow); + if (ret < 0) + goto out; + +out: + return ret; + +} + +static int wl1271_configure_suspend(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_wowlan *wow) +{ + if (wlvif->bss_type == BSS_TYPE_STA_BSS) + return wl1271_configure_suspend_sta(wl, wlvif, wow); + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + return wl1271_configure_suspend_ap(wl, wlvif, wow); + return 0; +} + +static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret = 0; + bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; + bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; + + if ((!is_ap) && (!is_sta)) + return; + + if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) || + (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))) + return; + + wl1271_configure_wowlan(wl, NULL); + + if (is_sta) { + if ((wl->conf.conn.suspend_wake_up_event == + wl->conf.conn.wake_up_event) && + (wl->conf.conn.suspend_listen_interval == + wl->conf.conn.listen_interval)) + return; + + ret = wl1271_acx_wake_up_conditions(wl, wlvif, + wl->conf.conn.wake_up_event, + wl->conf.conn.listen_interval); + + if (ret < 0) + wl1271_error("resume: wake up conditions failed: %d", + ret); + + } else if (is_ap) { + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); + } +} + +static int wl1271_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wow) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); + WARN_ON(!wow); + + /* we want to perform the recovery before suspending */ + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { + wl1271_warning("postponing suspend to perform recovery"); + return -EBUSY; + } + + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) { + mutex_unlock(&wl->mutex); + return ret; + } + + wl->wow_enabled = true; + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_configure_suspend(wl, wlvif, wow); + if (ret < 0) { + mutex_unlock(&wl->mutex); + wl1271_warning("couldn't prepare device to suspend"); + return ret; + } + } + + /* disable fast link flow control notifications from FW */ + ret = wlcore_hw_interrupt_notify(wl, false); + if (ret < 0) + goto out_sleep; + + /* if filtering is enabled, configure the FW to drop all RX BA frames */ + ret = wlcore_hw_rx_ba_filter(wl, + !!wl->conf.conn.suspend_rx_ba_activity); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + mutex_unlock(&wl->mutex); + + if (ret < 0) { + wl1271_warning("couldn't prepare device to suspend"); + return ret; + } + + /* flush any remaining work */ + wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); + + /* + * disable and re-enable interrupts in order to flush + * the threaded_irq + */ + wlcore_disable_interrupts(wl); + + /* + * set suspended flag to avoid triggering a new threaded_irq + * work. no need for spinlock as interrupts are disabled. + */ + set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); + + wlcore_enable_interrupts(wl); + flush_work(&wl->tx_work); + flush_delayed_work(&wl->elp_work); + + /* + * Cancel the watchdog even if above tx_flush failed. We will detect + * it on resume anyway. + */ + cancel_delayed_work(&wl->tx_watchdog_work); + + return 0; +} + +static int wl1271_op_resume(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + unsigned long flags; + bool run_irq_work = false, pending_recovery; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", + wl->wow_enabled); + WARN_ON(!wl->wow_enabled); + + /* + * re-enable irq_work enqueuing, and call irq_work directly if + * there is a pending work. + */ + spin_lock_irqsave(&wl->wl_lock, flags); + clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); + if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) + run_irq_work = true; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + mutex_lock(&wl->mutex); + + /* test the recovery flag before calling any SDIO functions */ + pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, + &wl->flags); + + if (run_irq_work) { + wl1271_debug(DEBUG_MAC80211, + "run postponed irq_work directly"); + + /* don't talk to the HW if recovery is pending */ + if (!pending_recovery) { + ret = wlcore_irq_locked(wl); + if (ret) + wl12xx_queue_recovery_work(wl); + } + + wlcore_enable_interrupts(wl); + } + + if (pending_recovery) { + wl1271_warning("queuing forgotten recovery on resume"); + ieee80211_queue_work(wl->hw, &wl->recovery_work); + goto out_sleep; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + wl1271_configure_resume(wl, wlvif); + } + + ret = wlcore_hw_interrupt_notify(wl, true); + if (ret < 0) + goto out_sleep; + + /* if filtering is enabled, configure the FW to drop all RX BA frames */ + ret = wlcore_hw_rx_ba_filter(wl, false); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + wl->wow_enabled = false; + + /* + * Set a flag to re-init the watchdog on the first Tx after resume. + * That way we avoid possible conditions where Tx-complete interrupts + * fail to arrive and we perform a spurious recovery. + */ + set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags); + mutex_unlock(&wl->mutex); + + return 0; +} +#endif + +static int wl1271_op_start(struct ieee80211_hw *hw) +{ + wl1271_debug(DEBUG_MAC80211, "mac80211 start"); + + /* + * We have to delay the booting of the hardware because + * we need to know the local MAC address before downloading and + * initializing the firmware. The MAC address cannot be changed + * after boot, and without the proper MAC address, the firmware + * will not function properly. + * + * The MAC address is first known when the corresponding interface + * is added. That is where we will initialize the hardware. + */ + + return 0; +} + +static void wlcore_op_stop_locked(struct wl1271 *wl) +{ + int i; + + if (wl->state == WLCORE_STATE_OFF) { + if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, + &wl->flags)) + wlcore_enable_interrupts(wl); + + return; + } + + /* + * this must be before the cancel_work calls below, so that the work + * functions don't perform further work. + */ + wl->state = WLCORE_STATE_OFF; + + /* + * Use the nosync variant to disable interrupts, so the mutex could be + * held while doing so without deadlocking. + */ + wlcore_disable_interrupts_nosync(wl); + + mutex_unlock(&wl->mutex); + + wlcore_synchronize_interrupts(wl); + if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) + cancel_work_sync(&wl->recovery_work); + wl1271_flush_deferred_work(wl); + cancel_delayed_work_sync(&wl->scan_complete_work); + cancel_work_sync(&wl->netstack_work); + cancel_work_sync(&wl->tx_work); + cancel_delayed_work_sync(&wl->elp_work); + cancel_delayed_work_sync(&wl->tx_watchdog_work); + + /* let's notify MAC80211 about the remaining pending TX frames */ + mutex_lock(&wl->mutex); + wl12xx_tx_reset(wl); + + wl1271_power_off(wl); + /* + * In case a recovery was scheduled, interrupts were disabled to avoid + * an interrupt storm. Now that the power is down, it is safe to + * re-enable interrupts to balance the disable depth + */ + if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) + wlcore_enable_interrupts(wl); + + wl->band = IEEE80211_BAND_2GHZ; + + wl->rx_counter = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->channel_type = NL80211_CHAN_NO_HT; + wl->tx_blocks_available = 0; + wl->tx_allocated_blocks = 0; + wl->tx_results_count = 0; + wl->tx_packets_count = 0; + wl->time_offset = 0; + wl->ap_fw_ps_map = 0; + wl->ap_ps_map = 0; + wl->sleep_auth = WL1271_PSM_ILLEGAL; + memset(wl->roles_map, 0, sizeof(wl->roles_map)); + memset(wl->links_map, 0, sizeof(wl->links_map)); + memset(wl->roc_map, 0, sizeof(wl->roc_map)); + memset(wl->session_ids, 0, sizeof(wl->session_ids)); + memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled)); + wl->active_sta_count = 0; + wl->active_link_count = 0; + + /* The system link is always allocated */ + wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0; + wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0; + __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); + + /* + * this is performed after the cancel_work calls and the associated + * mutex_lock, so that wl1271_op_add_interface does not accidentally + * get executed before all these vars have been reset. + */ + wl->flags = 0; + + wl->tx_blocks_freed = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->tx_pkts_freed[i] = 0; + wl->tx_allocated_pkts[i] = 0; + } + + wl1271_debugfs_reset(wl); + + kfree(wl->raw_fw_status); + wl->raw_fw_status = NULL; + kfree(wl->fw_status); + wl->fw_status = NULL; + kfree(wl->tx_res_if); + wl->tx_res_if = NULL; + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + + /* + * FW channels must be re-calibrated after recovery, + * save current Reg-Domain channel configuration and clear it. + */ + memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last, + sizeof(wl->reg_ch_conf_pending)); + memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last)); +} + +static void wlcore_op_stop(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + + wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); + + mutex_lock(&wl->mutex); + + wlcore_op_stop_locked(wl); + + mutex_unlock(&wl->mutex); +} + +static void wlcore_channel_switch_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work); + wl = wlvif->wl; + + wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* check the channel switch is still ongoing */ + if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) + goto out; + + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_chswitch_done(vif, false); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_cmd_stop_channel_switch(wl, wlvif); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wlcore_connection_loss_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + + dwork = container_of(work, struct delayed_work, work); + wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work); + wl = wlvif->wl; + + wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* Call mac80211 connection loss */ + if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + goto out; + + vif = wl12xx_wlvif_to_vif(wlvif); + ieee80211_connection_loss(vif); +out: + mutex_unlock(&wl->mutex); +} + +static void wlcore_pending_auth_complete_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct wl12xx_vif *wlvif; + unsigned long time_spare; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wlvif = container_of(dwork, struct wl12xx_vif, + pending_auth_complete_work); + wl = wlvif->wl; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* + * Make sure a second really passed since the last auth reply. Maybe + * a second auth reply arrived while we were stuck on the mutex. + * Check for a little less than the timeout to protect from scheduler + * irregularities. + */ + time_spare = jiffies + + msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50); + if (!time_after(time_spare, wlvif->pending_auth_reply_time)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* cancel the ROC if active */ + wlcore_update_inconn_sta(wl, wlvif, NULL, false); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) +{ + u8 policy = find_first_zero_bit(wl->rate_policies_map, + WL12XX_MAX_RATE_POLICIES); + if (policy >= WL12XX_MAX_RATE_POLICIES) + return -EBUSY; + + __set_bit(policy, wl->rate_policies_map); + *idx = policy; + return 0; +} + +static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) +{ + if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) + return; + + __clear_bit(*idx, wl->rate_policies_map); + *idx = WL12XX_MAX_RATE_POLICIES; +} + +static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx) +{ + u8 policy = find_first_zero_bit(wl->klv_templates_map, + WLCORE_MAX_KLV_TEMPLATES); + if (policy >= WLCORE_MAX_KLV_TEMPLATES) + return -EBUSY; + + __set_bit(policy, wl->klv_templates_map); + *idx = policy; + return 0; +} + +static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx) +{ + if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES)) + return; + + __clear_bit(*idx, wl->klv_templates_map); + *idx = WLCORE_MAX_KLV_TEMPLATES; +} + +static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + switch (wlvif->bss_type) { + case BSS_TYPE_AP_BSS: + if (wlvif->p2p) + return WL1271_ROLE_P2P_GO; + else + return WL1271_ROLE_AP; + + case BSS_TYPE_STA_BSS: + if (wlvif->p2p) + return WL1271_ROLE_P2P_CL; + else + return WL1271_ROLE_STA; + + case BSS_TYPE_IBSS: + return WL1271_ROLE_IBSS; + + default: + wl1271_error("invalid bss_type: %d", wlvif->bss_type); + } + return WL12XX_INVALID_ROLE_TYPE; +} + +static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int i; + + /* clear everything but the persistent data */ + memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_CLIENT: + wlvif->p2p = 1; + /* fall-through */ + case NL80211_IFTYPE_STATION: + wlvif->bss_type = BSS_TYPE_STA_BSS; + break; + case NL80211_IFTYPE_ADHOC: + wlvif->bss_type = BSS_TYPE_IBSS; + break; + case NL80211_IFTYPE_P2P_GO: + wlvif->p2p = 1; + /* fall-through */ + case NL80211_IFTYPE_AP: + wlvif->bss_type = BSS_TYPE_AP_BSS; + break; + default: + wlvif->bss_type = MAX_BSS_TYPE; + return -EOPNOTSUPP; + } + + wlvif->role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + /* init sta/ibss data */ + wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; + wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); + wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id); + wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; + wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; + wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; + } else { + /* init ap data */ + wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; + wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; + wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); + wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); + for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) + wl12xx_allocate_rate_policy(wl, + &wlvif->ap.ucast_rate_idx[i]); + wlvif->basic_rate_set = CONF_TX_ENABLED_RATES; + /* + * TODO: check if basic_rate shouldn't be + * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + * instead (the same thing for STA above). + */ + wlvif->basic_rate = CONF_TX_ENABLED_RATES; + /* TODO: this seems to be used only for STA, check it */ + wlvif->rate_set = CONF_TX_ENABLED_RATES; + } + + wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; + wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; + wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; + + /* + * mac80211 configures some values globally, while we treat them + * per-interface. thus, on init, we have to copy them from wl + */ + wlvif->band = wl->band; + wlvif->channel = wl->channel; + wlvif->power_level = wl->power_level; + wlvif->channel_type = wl->channel_type; + + INIT_WORK(&wlvif->rx_streaming_enable_work, + wl1271_rx_streaming_enable_work); + INIT_WORK(&wlvif->rx_streaming_disable_work, + wl1271_rx_streaming_disable_work); + INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work); + INIT_DELAYED_WORK(&wlvif->channel_switch_work, + wlcore_channel_switch_work); + INIT_DELAYED_WORK(&wlvif->connection_loss_work, + wlcore_connection_loss_work); + INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work, + wlcore_pending_auth_complete_work); + INIT_LIST_HEAD(&wlvif->list); + + setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, + (unsigned long) wlvif); + return 0; +} + +static int wl12xx_init_fw(struct wl1271 *wl) +{ + int retries = WL1271_BOOT_RETRIES; + bool booted = false; + struct wiphy *wiphy = wl->hw->wiphy; + int ret; + + while (retries) { + retries--; + ret = wl12xx_chip_wakeup(wl, false); + if (ret < 0) + goto power_off; + + ret = wl->ops->boot(wl); + if (ret < 0) + goto power_off; + + ret = wl1271_hw_init(wl); + if (ret < 0) + goto irq_disable; + + booted = true; + break; + +irq_disable: + mutex_unlock(&wl->mutex); + /* Unlocking the mutex in the middle of handling is + inherently unsafe. In this case we deem it safe to do, + because we need to let any possibly pending IRQ out of + the system (and while we are WLCORE_STATE_OFF the IRQ + work function will not do anything.) Also, any other + possible concurrent operations will fail due to the + current state, hence the wl1271 struct should be safe. */ + wlcore_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_work_sync(&wl->netstack_work); + mutex_lock(&wl->mutex); +power_off: + wl1271_power_off(wl); + } + + if (!booted) { + wl1271_error("firmware boot failed despite %d retries", + WL1271_BOOT_RETRIES); + goto out; + } + + wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); + + /* update hw/fw version info in wiphy struct */ + wiphy->hw_version = wl->chip.id; + strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + sizeof(wiphy->fw_version)); + + /* + * Now we know if 11a is supported (info from the NVS), so disable + * 11a channels if not supported + */ + if (!wl->enable_11a) + wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; + + wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", + wl->enable_11a ? "" : "not "); + + wl->state = WLCORE_STATE_ON; +out: + return ret; +} + +static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) +{ + return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; +} + +/* + * Check whether a fw switch (i.e. moving from one loaded + * fw to another) is needed. This function is also responsible + * for updating wl->last_vif_count, so it must be called before + * loading a non-plt fw (so the correct fw (single-role/multi-role) + * will be used). + */ +static bool wl12xx_need_fw_change(struct wl1271 *wl, + struct vif_counter_data vif_counter_data, + bool add) +{ + enum wl12xx_fw_type current_fw = wl->fw_type; + u8 vif_count = vif_counter_data.counter; + + if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags)) + return false; + + /* increase the vif count if this is a new vif */ + if (add && !vif_counter_data.cur_vif_running) + vif_count++; + + wl->last_vif_count = vif_count; + + /* no need for fw change if the device is OFF */ + if (wl->state == WLCORE_STATE_OFF) + return false; + + /* no need for fw change if a single fw is used */ + if (!wl->mr_fw_name) + return false; + + if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) + return true; + if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI) + return true; + + return false; +} + +/* + * Enter "forced psm". Make sure the sta is in psm against the ap, + * to make the fw switch a bit more disconnection-persistent. + */ +static void wl12xx_force_active_psm(struct wl1271 *wl) +{ + struct wl12xx_vif *wlvif; + + wl12xx_for_each_wlvif_sta(wl, wlvif) { + wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE); + } +} + +struct wlcore_hw_queue_iter_data { + unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)]; + /* current vif */ + struct ieee80211_vif *vif; + /* is the current vif among those iterated */ + bool cur_running; +}; + +static void wlcore_hw_queue_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct wlcore_hw_queue_iter_data *iter_data = data; + + if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) + return; + + if (iter_data->cur_running || vif == iter_data->vif) { + iter_data->cur_running = true; + return; + } + + __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map); +} + +static int wlcore_allocate_hw_queue_base(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct wlcore_hw_queue_iter_data iter_data = {}; + int i, q_base; + + iter_data.vif = vif; + + /* mark all bits taken by active interfaces */ + ieee80211_iterate_active_interfaces_atomic(wl->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + wlcore_hw_queue_iter, &iter_data); + + /* the current vif is already running in mac80211 (resume/recovery) */ + if (iter_data.cur_running) { + wlvif->hw_queue_base = vif->hw_queue[0]; + wl1271_debug(DEBUG_MAC80211, + "using pre-allocated hw queue base %d", + wlvif->hw_queue_base); + + /* interface type might have changed type */ + goto adjust_cab_queue; + } + + q_base = find_first_zero_bit(iter_data.hw_queue_map, + WLCORE_NUM_MAC_ADDRESSES); + if (q_base >= WLCORE_NUM_MAC_ADDRESSES) + return -EBUSY; + + wlvif->hw_queue_base = q_base * NUM_TX_QUEUES; + wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d", + wlvif->hw_queue_base); + + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0; + /* register hw queues in mac80211 */ + vif->hw_queue[i] = wlvif->hw_queue_base + i; + } + +adjust_cab_queue: + /* the last places are reserved for cab queues per interface */ + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES + + wlvif->hw_queue_base / NUM_TX_QUEUES; + else + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + + return 0; +} + +static int wl1271_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct vif_counter_data vif_count; + int ret = 0; + u8 role_type; + + if (wl->plt) { + wl1271_error("Adding Interface not allowed while in PLT mode"); + return -EBUSY; + } + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_UAPSD | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", + ieee80211_vif_type_p2p(vif), vif->addr); + + wl12xx_get_vif_count(hw, vif, &vif_count); + + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_unlock; + + /* + * in some very corner case HW recovery scenarios its possible to + * get here before __wl1271_op_remove_interface is complete, so + * opt out if that is the case. + */ + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || + test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { + ret = -EBUSY; + goto out; + } + + + ret = wl12xx_init_vif_data(wl, vif); + if (ret < 0) + goto out; + + wlvif->wl = wl; + role_type = wl12xx_get_role_type(wl, wlvif); + if (role_type == WL12XX_INVALID_ROLE_TYPE) { + ret = -EINVAL; + goto out; + } + + ret = wlcore_allocate_hw_queue_base(wl, wlvif); + if (ret < 0) + goto out; + + if (wl12xx_need_fw_change(wl, vif_count, true)) { + wl12xx_force_active_psm(wl); + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + mutex_unlock(&wl->mutex); + wl1271_recovery_work(&wl->recovery_work); + return 0; + } + + /* + * TODO: after the nvs issue will be solved, move this block + * to start(), and make sure here the driver is ON. + */ + if (wl->state == WLCORE_STATE_OFF) { + /* + * we still need this in order to configure the fw + * while uploading the nvs + */ + memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN); + + ret = wl12xx_init_fw(wl); + if (ret < 0) + goto out; + } + + ret = wl12xx_cmd_role_enable(wl, vif->addr, + role_type, &wlvif->role_id); + if (ret < 0) + goto out; + + ret = wl1271_init_vif_specific(wl, vif); + if (ret < 0) + goto out; + + list_add(&wlvif->list, &wl->wlvif_list); + set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); + + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + wl->ap_count++; + else + wl->sta_count++; +out: + wl1271_ps_elp_sleep(wl); +out_unlock: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void __wl1271_op_remove_interface(struct wl1271 *wl, + struct ieee80211_vif *vif, + bool reset_tx_queues) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int i, ret; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + + wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); + + if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + return; + + /* because of hardware recovery, we may get here twice */ + if (wl->state == WLCORE_STATE_OFF) + return; + + wl1271_info("down"); + + if (wl->scan.state != WL1271_SCAN_STATE_IDLE && + wl->scan_wlvif == wlvif) { + /* + * Rearm the tx watchdog just before idling scan. This + * prevents just-finished scans from triggering the watchdog + */ + wl12xx_rearm_tx_watchdog_locked(wl); + + wl->scan.state = WL1271_SCAN_STATE_IDLE; + memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + wl->scan_wlvif = NULL; + wl->scan.req = NULL; + ieee80211_scan_completed(wl->hw, true); + } + + if (wl->sched_vif == wlvif) + wl->sched_vif = NULL; + + if (wl->roc_vif == vif) { + wl->roc_vif = NULL; + ieee80211_remain_on_channel_expired(wl->hw); + } + + if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { + /* disable active roles */ + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto deinit; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + if (wl12xx_dev_role_started(wlvif)) + wl12xx_stop_dev(wl, wlvif); + } + + ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); + if (ret < 0) + goto deinit; + + wl1271_ps_elp_sleep(wl); + } +deinit: + wl12xx_tx_reset_wlvif(wl, wlvif); + + /* clear all hlids (except system_hlid) */ + wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS || + wlvif->bss_type == BSS_TYPE_IBSS) { + wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; + wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); + wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id); + } else { + wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; + wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; + wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); + wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); + for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) + wl12xx_free_rate_policy(wl, + &wlvif->ap.ucast_rate_idx[i]); + wl1271_free_ap_keys(wl, wlvif); + } + + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = NULL; + if (wl->last_wlvif == wlvif) + wl->last_wlvif = NULL; + list_del(&wlvif->list); + memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); + wlvif->role_id = WL12XX_INVALID_ROLE_ID; + wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; + + if (is_ap) + wl->ap_count--; + else + wl->sta_count--; + + /* + * Last AP, have more stations. Configure sleep auth according to STA. + * Don't do thin on unintended recovery. + */ + if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) && + !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) + goto unlock; + + if (wl->ap_count == 0 && is_ap) { + /* mask ap events */ + wl->event_mask &= ~wl->ap_event_mask; + wl1271_event_unmask(wl); + } + + if (wl->ap_count == 0 && is_ap && wl->sta_count) { + u8 sta_auth = wl->conf.conn.sta_sleep_auth; + /* Configure for power according to debugfs */ + if (sta_auth != WL1271_PSM_ILLEGAL) + wl1271_acx_sleep_auth(wl, sta_auth); + /* Configure for ELP power saving */ + else + wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + } + +unlock: + mutex_unlock(&wl->mutex); + + del_timer_sync(&wlvif->rx_streaming_timer); + cancel_work_sync(&wlvif->rx_streaming_enable_work); + cancel_work_sync(&wlvif->rx_streaming_disable_work); + cancel_work_sync(&wlvif->rc_update_work); + cancel_delayed_work_sync(&wlvif->connection_loss_work); + cancel_delayed_work_sync(&wlvif->channel_switch_work); + cancel_delayed_work_sync(&wlvif->pending_auth_complete_work); + + mutex_lock(&wl->mutex); +} + +static void wl1271_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl12xx_vif *iter; + struct vif_counter_data vif_count; + + wl12xx_get_vif_count(hw, vif, &vif_count); + mutex_lock(&wl->mutex); + + if (wl->state == WLCORE_STATE_OFF || + !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + goto out; + + /* + * wl->vif can be null here if someone shuts down the interface + * just when hardware recovery has been started. + */ + wl12xx_for_each_wlvif(wl, iter) { + if (iter != wlvif) + continue; + + __wl1271_op_remove_interface(wl, vif, true); + break; + } + WARN_ON(iter != wlvif); + if (wl12xx_need_fw_change(wl, vif_count, false)) { + wl12xx_force_active_psm(wl); + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + wl12xx_queue_recovery_work(wl); + } +out: + mutex_unlock(&wl->mutex); +} + +static int wl12xx_op_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, bool p2p) +{ + struct wl1271 *wl = hw->priv; + int ret; + + set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); + wl1271_op_remove_interface(hw, vif); + + vif->type = new_type; + vif->p2p = p2p; + ret = wl1271_op_add_interface(hw, vif); + + clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); + return ret; +} + +static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); + + /* + * One of the side effects of the JOIN command is that is clears + * WPA/WPA2 keys from the chipset. Performing a JOIN while associated + * to a WPA/WPA2 access point will therefore kill the data-path. + * Currently the only valid scenario for JOIN during association + * is on roaming, in which case we will also be given new keys. + * Keep the below message for now, unless it starts bothering + * users who really like to roam a lot :) + */ + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + wl1271_info("JOIN while associated."); + + /* clear encryption type */ + wlvif->encryption_type = KEY_NONE; + + if (is_ibss) + ret = wl12xx_cmd_role_start_ibss(wl, wlvif); + else { + if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) { + /* + * TODO: this is an ugly workaround for wl12xx fw + * bug - we are not able to tx/rx after the first + * start_sta, so make dummy start+stop calls, + * and then call start_sta again. + * this should be fixed in the fw. + */ + wl12xx_cmd_role_start_sta(wl, wlvif); + wl12xx_cmd_role_stop_sta(wl, wlvif); + } + + ret = wl12xx_cmd_role_start_sta(wl, wlvif); + } + + return ret; +} + +static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb, + int offset) +{ + u8 ssid_len; + const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, + skb->len - offset); + + if (!ptr) { + wl1271_error("No SSID in IEs!"); + return -ENOENT; + } + + ssid_len = ptr[1]; + if (ssid_len > IEEE80211_MAX_SSID_LEN) { + wl1271_error("SSID is too long!"); + return -EINVAL; + } + + wlvif->ssid_len = ssid_len; + memcpy(wlvif->ssid, ptr+2, ssid_len); + return 0; +} + +static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + struct sk_buff *skb; + int ieoffset; + + /* we currently only support setting the ssid from the ap probe req */ + if (wlvif->bss_type != BSS_TYPE_STA_BSS) + return -EINVAL; + + skb = ieee80211_ap_probereq_get(wl->hw, vif); + if (!skb) + return -EINVAL; + + ieoffset = offsetof(struct ieee80211_mgmt, + u.probe_req.variable); + wl1271_ssid_set(wlvif, skb, ieoffset); + dev_kfree_skb(skb); + + return 0; +} + +static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_bss_conf *bss_conf, + u32 sta_rate_set) +{ + int ieoffset; + int ret; + + wlvif->aid = bss_conf->aid; + wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef); + wlvif->beacon_int = bss_conf->beacon_int; + wlvif->wmm_enabled = bss_conf->qos; + + set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); + + /* + * with wl1271, we don't need to update the + * beacon_int and dtim_period, because the firmware + * updates it by itself when the first beacon is + * received after a join. + */ + ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); + if (ret < 0) + return ret; + + /* + * Get a template for hardware connection maintenance + */ + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, + wlvif, + NULL); + ieoffset = offsetof(struct ieee80211_mgmt, + u.probe_req.variable); + wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset); + + /* enable the connection monitoring feature */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, true); + if (ret < 0) + return ret; + + /* + * The join command disable the keep-alive mode, shut down its process, + * and also clear the template config, so we need to reset it all after + * the join. The acx_aid starts the keep-alive process, and the order + * of the commands below is relevant. + */ + ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); + if (ret < 0) + return ret; + + ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); + if (ret < 0) + return ret; + + ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl1271_acx_keep_alive_config(wl, wlvif, + wlvif->sta.klv_template_id, + ACX_KEEP_ALIVE_TPL_VALID); + if (ret < 0) + return ret; + + /* + * The default fw psm configuration is AUTO, while mac80211 default + * setting is off (ACTIVE), so sync the fw with the correct value. + */ + ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE); + if (ret < 0) + return ret; + + if (sta_rate_set) { + wlvif->rate_set = + wl1271_tx_enabled_rates_get(wl, + sta_rate_set, + wlvif->band); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + } + + return ret; +} + +static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS; + + /* make sure we are connected (sta) joined */ + if (sta && + !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + return false; + + /* make sure we are joined (ibss) */ + if (!sta && + test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) + return false; + + if (sta) { + /* use defaults when not associated */ + wlvif->aid = 0; + + /* free probe-request template */ + dev_kfree_skb(wlvif->probereq); + wlvif->probereq = NULL; + + /* disable connection monitor features */ + ret = wl1271_acx_conn_monit_params(wl, wlvif, false); + if (ret < 0) + return ret; + + /* Disable the keep-alive feature */ + ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); + if (ret < 0) + return ret; + + /* disable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); + if (ret < 0) + return ret; + } + + if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + wl12xx_cmd_stop_channel_switch(wl, wlvif); + ieee80211_chswitch_done(vif, false); + cancel_delayed_work(&wlvif->channel_switch_work); + } + + /* invalidate keep-alive template */ + wl1271_acx_keep_alive_config(wl, wlvif, + wlvif->sta.klv_template_id, + ACX_KEEP_ALIVE_TPL_INVALID); + + return 0; +} + +static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; + wlvif->rate_set = wlvif->basic_rate_set; +} + +static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool idle) +{ + bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); + + if (idle == cur_idle) + return; + + if (idle) { + clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); + } else { + /* The current firmware only supports sched_scan in idle */ + if (wl->sched_vif == wlvif) + wl->ops->sched_scan_stop(wl, wlvif); + + set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); + } +} + +static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_conf *conf, u32 changed) +{ + int ret; + + if (conf->power_level != wlvif->power_level) { + ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); + if (ret < 0) + return ret; + + wlvif->power_level = conf->power_level; + } + + return 0; +} + +static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s" + " changed 0x%x", + conf->flags & IEEE80211_CONF_PS ? "on" : "off", + conf->power_level, + conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", + changed); + + mutex_lock(&wl->mutex); + + if (changed & IEEE80211_CONF_CHANGE_POWER) + wl->power_level = conf->power_level; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* configure each interface */ + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl12xx_config_vif(wl, wlvif, conf, changed); + if (ret < 0) + goto out_sleep; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +struct wl1271_filter_params { + bool enabled; + int mc_list_length; + u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; +}; + +static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + struct wl1271_filter_params *fp; + struct netdev_hw_addr *ha; + + fp = kzalloc(sizeof(*fp), GFP_ATOMIC); + if (!fp) { + wl1271_error("Out of memory setting filters."); + return 0; + } + + /* update multicast filtering parameters */ + fp->mc_list_length = 0; + if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { + fp->enabled = false; + } else { + fp->enabled = true; + netdev_hw_addr_list_for_each(ha, mc_list) { + memcpy(fp->mc_list[fp->mc_list_length], + ha->addr, ETH_ALEN); + fp->mc_list_length++; + } + } + + return (u64)(unsigned long)fp; +} + +#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_FCSFAIL | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_CONTROL | \ + FIF_OTHER_BSS) + +static void wl1271_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *total, u64 multicast) +{ + struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" + " total %x", changed, *total); + + mutex_lock(&wl->mutex); + + *total &= WL1271_SUPPORTED_FILTERS; + changed &= WL1271_SUPPORTED_FILTERS; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) + ret = wl1271_acx_group_address_tbl(wl, wlvif, + false, + NULL, 0); + else if (fp) + ret = wl1271_acx_group_address_tbl(wl, wlvif, + fp->enabled, + fp->mc_list, + fp->mc_list_length); + if (ret < 0) + goto out_sleep; + } + } + + /* + * the fw doesn't provide an api to configure the filters. instead, + * the filters configuration is based on the active roles / ROC + * state. + */ + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + kfree(fp); +} + +static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 id, u8 key_type, u8 key_size, + const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) +{ + struct wl1271_ap_key *ap_key; + int i; + + wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); + + if (key_size > MAX_KEY_SIZE) + return -EINVAL; + + /* + * Find next free entry in ap_keys. Also check we are not replacing + * an existing key. + */ + for (i = 0; i < MAX_NUM_KEYS; i++) { + if (wlvif->ap.recorded_keys[i] == NULL) + break; + + if (wlvif->ap.recorded_keys[i]->id == id) { + wl1271_warning("trying to record key replacement"); + return -EINVAL; + } + } + + if (i == MAX_NUM_KEYS) + return -EBUSY; + + ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); + if (!ap_key) + return -ENOMEM; + + ap_key->id = id; + ap_key->key_type = key_type; + ap_key->key_size = key_size; + memcpy(ap_key->key, key, key_size); + ap_key->hlid = hlid; + ap_key->tx_seq_32 = tx_seq_32; + ap_key->tx_seq_16 = tx_seq_16; + + wlvif->ap.recorded_keys[i] = ap_key; + return 0; +} + +static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int i; + + for (i = 0; i < MAX_NUM_KEYS; i++) { + kfree(wlvif->ap.recorded_keys[i]); + wlvif->ap.recorded_keys[i] = NULL; + } +} + +static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int i, ret = 0; + struct wl1271_ap_key *key; + bool wep_key_added = false; + + for (i = 0; i < MAX_NUM_KEYS; i++) { + u8 hlid; + if (wlvif->ap.recorded_keys[i] == NULL) + break; + + key = wlvif->ap.recorded_keys[i]; + hlid = key->hlid; + if (hlid == WL12XX_INVALID_LINK_ID) + hlid = wlvif->ap.bcast_hlid; + + ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, + key->id, key->key_type, + key->key_size, key->key, + hlid, key->tx_seq_32, + key->tx_seq_16); + if (ret < 0) + goto out; + + if (key->key_type == KEY_WEP) + wep_key_added = true; + } + + if (wep_key_added) { + ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, + wlvif->ap.bcast_hlid); + if (ret < 0) + goto out; + } + +out: + wl1271_free_ap_keys(wl, wlvif); + return ret; +} + +static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u32 tx_seq_32, + u16 tx_seq_16, struct ieee80211_sta *sta) +{ + int ret; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + + if (is_ap) { + struct wl1271_station *wl_sta; + u8 hlid; + + if (sta) { + wl_sta = (struct wl1271_station *)sta->drv_priv; + hlid = wl_sta->hlid; + } else { + hlid = wlvif->ap.bcast_hlid; + } + + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + /* + * We do not support removing keys after AP shutdown. + * Pretend we do to make mac80211 happy. + */ + if (action != KEY_ADD_OR_REPLACE) + return 0; + + ret = wl1271_record_ap_key(wl, wlvif, id, + key_type, key_size, + key, hlid, tx_seq_32, + tx_seq_16); + } else { + ret = wl1271_cmd_set_ap_key(wl, wlvif, action, + id, key_type, key_size, + key, hlid, tx_seq_32, + tx_seq_16); + } + + if (ret < 0) + return ret; + } else { + const u8 *addr; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + addr = sta ? sta->addr : bcast_addr; + + if (is_zero_ether_addr(addr)) { + /* We dont support TX only encryption */ + return -EOPNOTSUPP; + } + + /* The wl1271 does not allow to remove unicast keys - they + will be cleared automatically on next CMD_JOIN. Ignore the + request silently, as we dont want the mac80211 to emit + an error message. */ + if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) + return 0; + + /* don't remove key if hlid was already deleted */ + if (action == KEY_REMOVE && + wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) + return 0; + + ret = wl1271_cmd_set_sta_key(wl, wlvif, action, + id, key_type, key_size, + key, addr, tx_seq_32, + tx_seq_16); + if (ret < 0) + return ret; + + } + + return 0; +} + +static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf) +{ + struct wl1271 *wl = hw->priv; + int ret; + bool might_change_spare = + key_conf->cipher == WL1271_CIPHER_SUITE_GEM || + key_conf->cipher == WLAN_CIPHER_SUITE_TKIP; + + if (might_change_spare) { + /* + * stop the queues and flush to ensure the next packets are + * in sync with FW spare block accounting + */ + wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); + wl1271_tx_flush(wl); + } + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out_wake_queues; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_wake_queues; + + ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); + + wl1271_ps_elp_sleep(wl); + +out_wake_queues: + if (might_change_spare) + wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); + + mutex_unlock(&wl->mutex); + + return ret; +} + +int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + u32 tx_seq_32 = 0; + u16 tx_seq_16 = 0; + u8 key_type; + u8 hlid; + + wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); + + wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); + wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", + key_conf->cipher, key_conf->keyidx, + key_conf->keylen, key_conf->flags); + wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); + + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + if (sta) { + struct wl1271_station *wl_sta = (void *)sta->drv_priv; + hlid = wl_sta->hlid; + } else { + hlid = wlvif->ap.bcast_hlid; + } + else + hlid = wlvif->sta.hlid; + + if (hlid != WL12XX_INVALID_LINK_ID) { + u64 tx_seq = wl->links[hlid].total_freed_pkts; + tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq); + tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq); + } + + switch (key_conf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + key_type = KEY_WEP; + + key_conf->hw_key_idx = key_conf->keyidx; + break; + case WLAN_CIPHER_SUITE_TKIP: + key_type = KEY_TKIP; + key_conf->hw_key_idx = key_conf->keyidx; + break; + case WLAN_CIPHER_SUITE_CCMP: + key_type = KEY_AES; + key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + break; + case WL1271_CIPHER_SUITE_GEM: + key_type = KEY_GEM; + break; + default: + wl1271_error("Unknown key algo 0x%x", key_conf->cipher); + + return -EOPNOTSUPP; + } + + switch (cmd) { + case SET_KEY: + ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + tx_seq_32, tx_seq_16, sta); + if (ret < 0) { + wl1271_error("Could not add or replace key"); + return ret; + } + + /* + * reconfiguring arp response if the unicast (or common) + * encryption key type was changed + */ + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + (sta || key_type == KEY_WEP) && + wlvif->encryption_type != key_type) { + wlvif->encryption_type = key_type; + ret = wl1271_cmd_build_arp_rsp(wl, wlvif); + if (ret < 0) { + wl1271_warning("build arp rsp failed: %d", ret); + return ret; + } + } + break; + + case DISABLE_KEY: + ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + 0, 0, sta); + if (ret < 0) { + wl1271_error("Could not remove key"); + return ret; + } + break; + + default: + wl1271_error("Unsupported key cmd 0x%x", cmd); + return -EOPNOTSUPP; + } + + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_set_key); + +static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int key_idx) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d", + key_idx); + + /* we don't handle unsetting of default key */ + if (key_idx == -1) + return; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out_unlock; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_unlock; + + wlvif->default_key = key_idx; + + /* the default WEP key needs to be configured at least once */ + if (wlvif->encryption_type == KEY_WEP) { + ret = wl12xx_cmd_set_default_wep_key(wl, + key_idx, + wlvif->sta.hlid); + if (ret < 0) + goto out_sleep; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out_unlock: + mutex_unlock(&wl->mutex); +} + +void wlcore_regdomain_config(struct wl1271 *wl) +{ + int ret; + + if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) + return; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_cmd_regdomain_config_locked(wl); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static int wl1271_op_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct cfg80211_scan_request *req = &hw_req->req; + struct wl1271 *wl = hw->priv; + int ret; + u8 *ssid = NULL; + size_t len = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); + + if (req->n_ssids) { + ssid = req->ssids[0].ssid; + len = req->ssids[0].ssid_len; + } + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + /* + * We cannot return -EBUSY here because cfg80211 will expect + * a call to ieee80211_scan_completed if we do - in this case + * there won't be any call. + */ + ret = -EAGAIN; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* fail if there is any role in ROC */ + if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { + /* don't allow scanning right now */ + ret = -EBUSY; + goto out_sleep; + } + + ret = wlcore_scan(hw->priv, vif, ssid, len, req); +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (wl->scan.state == WL1271_SCAN_STATE_IDLE) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (wl->scan.state != WL1271_SCAN_STATE_DONE) { + ret = wl->ops->scan_stop(wl, wlvif); + if (ret < 0) + goto out_sleep; + } + + /* + * Rearm the tx watchdog just before idling scan. This + * prevents just-finished scans from triggering the watchdog + */ + wl12xx_rearm_tx_watchdog_locked(wl); + + wl->scan.state = WL1271_SCAN_STATE_IDLE; + memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + wl->scan_wlvif = NULL; + wl->scan.req = NULL; + ieee80211_scan_completed(wl->hw, true); + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + cancel_delayed_work_sync(&wl->scan_complete_work); +} + +static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); + if (ret < 0) + goto out_sleep; + + wl->sched_vif = wlvif; + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl->ops->sched_scan_stop(wl, wlvif); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return 0; +} + +static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wl1271 *wl = hw->priv; + int ret = 0; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_acx_frag_threshold(wl, value); + if (ret < 0) + wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + int ret = 0; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + ret = wl1271_acx_rts_threshold(wl, wlvif, value); + if (ret < 0) + wl1271_warning("set rts threshold failed: %d", ret); + } + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) +{ + int len; + const u8 *next, *end = skb->data + skb->len; + u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset, + skb->len - ieoffset); + if (!ie) + return; + len = ie[1] + 2; + next = ie + len; + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); +} + +static void wl12xx_remove_vendor_ie(struct sk_buff *skb, + unsigned int oui, u8 oui_type, + int ieoffset) +{ + int len; + const u8 *next, *end = skb->data + skb->len; + u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, + skb->data + ieoffset, + skb->len - ieoffset); + if (!ie) + return; + len = ie[1] + 2; + next = ie + len; + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); +} + +static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, + struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct sk_buff *skb; + int ret; + + skb = ieee80211_proberesp_get(wl->hw, vif); + if (!skb) + return -EOPNOTSUPP; + + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_AP_PROBE_RESPONSE, + skb->data, + skb->len, 0, + rates); + dev_kfree_skb(skb); + + if (ret < 0) + goto out; + + wl1271_debug(DEBUG_AP, "probe response updated"); + set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); + +out: + return ret; +} + +static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, + struct ieee80211_vif *vif, + u8 *probe_rsp_data, + size_t probe_rsp_len, + u32 rates) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; + int ssid_ie_offset, ie_offset, templ_len; + const u8 *ptr; + + /* no need to change probe response if the SSID is set correctly */ + if (wlvif->ssid_len > 0) + return wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_AP_PROBE_RESPONSE, + probe_rsp_data, + probe_rsp_len, 0, + rates); + + if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) { + wl1271_error("probe_rsp template too big"); + return -EINVAL; + } + + /* start searching from IE offset */ + ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + + ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset, + probe_rsp_len - ie_offset); + if (!ptr) { + wl1271_error("No SSID in beacon!"); + return -EINVAL; + } + + ssid_ie_offset = ptr - probe_rsp_data; + ptr += (ptr[1] + 2); + + memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset); + + /* insert SSID from bss_conf */ + probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID; + probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len; + memcpy(probe_rsp_templ + ssid_ie_offset + 2, + bss_conf->ssid, bss_conf->ssid_len); + templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len; + + memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len, + ptr, probe_rsp_len - (ptr - probe_rsp_data)); + templ_len += probe_rsp_len - (ptr - probe_rsp_data); + + return wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_AP_PROBE_RESPONSE, + probe_rsp_templ, + templ_len, 0, + rates); +} + +static int wl1271_bss_erp_info_changed(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret = 0; + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (bss_conf->use_short_slot) + ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); + else + ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); + if (ret < 0) { + wl1271_warning("Set slot time failed %d", ret); + goto out; + } + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + if (bss_conf->use_short_preamble) + wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); + else + wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + if (bss_conf->use_cts_prot) + ret = wl1271_acx_cts_protect(wl, wlvif, + CTSPROTECT_ENABLE); + else + ret = wl1271_acx_cts_protect(wl, wlvif, + CTSPROTECT_DISABLE); + if (ret < 0) { + wl1271_warning("Set ctsprotect failed %d", ret); + goto out; + } + } + +out: + return ret; +} + +static int wlcore_set_beacon_template(struct wl1271 *wl, + struct ieee80211_vif *vif, + bool is_ap) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_hdr *hdr; + u32 min_rate; + int ret; + int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); + struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); + u16 tmpl_id; + + if (!beacon) { + ret = -EINVAL; + goto out; + } + + wl1271_debug(DEBUG_MASTER, "beacon updated"); + + ret = wl1271_ssid_set(wlvif, beacon, ieoffset); + if (ret < 0) { + dev_kfree_skb(beacon); + goto out; + } + min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : + CMD_TEMPL_BEACON; + ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id, + beacon->data, + beacon->len, 0, + min_rate); + if (ret < 0) { + dev_kfree_skb(beacon); + goto out; + } + + wlvif->wmm_enabled = + cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WMM, + beacon->data + ieoffset, + beacon->len - ieoffset); + + /* + * In case we already have a probe-resp beacon set explicitly + * by usermode, don't use the beacon data. + */ + if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) + goto end_bcn; + + /* remove TIM ie from probe response */ + wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); + + /* + * remove p2p ie from probe response. + * the fw reponds to probe requests that don't include + * the p2p ie. probe requests with p2p ie will be passed, + * and will be responded by the supplicant (the spec + * forbids including the p2p ie when responding to probe + * requests that didn't include it). + */ + wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA, + WLAN_OUI_TYPE_WFA_P2P, ieoffset); + + hdr = (struct ieee80211_hdr *) beacon->data; + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + if (is_ap) + ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, + beacon->data, + beacon->len, + min_rate); + else + ret = wl1271_cmd_template_set(wl, wlvif->role_id, + CMD_TEMPL_PROBE_RESPONSE, + beacon->data, + beacon->len, 0, + min_rate); +end_bcn: + dev_kfree_skb(beacon); + if (ret < 0) + goto out; + +out: + return ret; +} + +static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + int ret = 0; + + if (changed & BSS_CHANGED_BEACON_INT) { + wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", + bss_conf->beacon_int); + + wlvif->beacon_int = bss_conf->beacon_int; + } + + if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { + u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + + wl1271_ap_set_probe_resp_tmpl(wl, rate, vif); + } + + if (changed & BSS_CHANGED_BEACON) { + ret = wlcore_set_beacon_template(wl, vif, is_ap); + if (ret < 0) + goto out; + + if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED, + &wlvif->flags)) { + ret = wlcore_hw_dfs_master_restart(wl, wlvif); + if (ret < 0) + goto out; + } + } +out: + if (ret != 0) + wl1271_error("beacon info change failed: %d", ret); + return ret; +} + +/* AP mode changes */ +static void wl1271_bss_info_changed_ap(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret = 0; + + if (changed & BSS_CHANGED_BASIC_RATES) { + u32 rates = bss_conf->basic_rates; + + wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, + wlvif->band); + wlvif->basic_rate = wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + + ret = wl1271_init_ap_rates(wl, wlvif); + if (ret < 0) { + wl1271_error("AP rate policy change failed %d", ret); + goto out; + } + + ret = wl1271_ap_init_templates(wl, vif); + if (ret < 0) + goto out; + + ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif); + if (ret < 0) + goto out; + + ret = wlcore_set_beacon_template(wl, vif, true); + if (ret < 0) + goto out; + } + + ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); + if (ret < 0) + goto out; + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + if (bss_conf->enable_beacon) { + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + ret = wl12xx_cmd_role_start_ap(wl, wlvif); + if (ret < 0) + goto out; + + ret = wl1271_ap_init_hwenc(wl, wlvif); + if (ret < 0) + goto out; + + set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); + wl1271_debug(DEBUG_AP, "started AP"); + } + } else { + if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + /* + * AP might be in ROC in case we have just + * sent auth reply. handle it. + */ + if (test_bit(wlvif->role_id, wl->roc_map)) + wl12xx_croc(wl, wlvif->role_id); + + ret = wl12xx_cmd_role_stop_ap(wl, wlvif); + if (ret < 0) + goto out; + + clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); + clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, + &wlvif->flags); + wl1271_debug(DEBUG_AP, "stopped AP"); + } + } + } + + ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); + if (ret < 0) + goto out; + + /* Handle HT information change */ + if ((changed & BSS_CHANGED_HT) && + (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { + ret = wl1271_acx_set_ht_information(wl, wlvif, + bss_conf->ht_operation_mode); + if (ret < 0) { + wl1271_warning("Set ht information failed %d", ret); + goto out; + } + } + +out: + return; +} + +static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct ieee80211_bss_conf *bss_conf, + u32 sta_rate_set) +{ + u32 rates; + int ret; + + wl1271_debug(DEBUG_MAC80211, + "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x", + bss_conf->bssid, bss_conf->aid, + bss_conf->beacon_int, + bss_conf->basic_rates, sta_rate_set); + + wlvif->beacon_int = bss_conf->beacon_int; + rates = bss_conf->basic_rates; + wlvif->basic_rate_set = + wl1271_tx_enabled_rates_get(wl, rates, + wlvif->band); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + + if (sta_rate_set) + wlvif->rate_set = + wl1271_tx_enabled_rates_get(wl, + sta_rate_set, + wlvif->band); + + /* we only support sched_scan while not connected */ + if (wl->sched_vif == wlvif) + wl->ops->sched_scan_stop(wl, wlvif); + + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl12xx_cmd_build_null_data(wl, wlvif); + if (ret < 0) + return ret; + + ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif)); + if (ret < 0) + return ret; + + wlcore_set_ssid(wl, wlvif); + + set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + + return 0; +} + +static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int ret; + + /* revert back to minimum rates for the current band */ + wl1271_set_band_rate(wl, wlvif); + wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + return ret; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) { + ret = wl12xx_cmd_role_stop_sta(wl, wlvif); + if (ret < 0) + return ret; + } + + clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); + return 0; +} +/* STA/IBSS mode changes */ +static void wl1271_bss_info_changed_sta(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool do_join = false; + bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); + bool ibss_joined = false; + u32 sta_rate_set = 0; + int ret; + struct ieee80211_sta *sta; + bool sta_exists = false; + struct ieee80211_sta_ht_cap sta_ht_cap; + + if (is_ibss) { + ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, + changed); + if (ret < 0) + goto out; + } + + if (changed & BSS_CHANGED_IBSS) { + if (bss_conf->ibss_joined) { + set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); + ibss_joined = true; + } else { + wlcore_unset_assoc(wl, wlvif); + wl12xx_cmd_role_stop_sta(wl, wlvif); + } + } + + if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined) + do_join = true; + + /* Need to update the SSID (for filtering etc) */ + if ((changed & BSS_CHANGED_BEACON) && ibss_joined) + do_join = true; + + if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) { + wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", + bss_conf->enable_beacon ? "enabled" : "disabled"); + + do_join = true; + } + + if (changed & BSS_CHANGED_IDLE && !is_ibss) + wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); + + if (changed & BSS_CHANGED_CQM) { + bool enable = false; + if (bss_conf->cqm_rssi_thold) + enable = true; + ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, + bss_conf->cqm_rssi_thold, + bss_conf->cqm_rssi_hyst); + if (ret < 0) + goto out; + wlvif->rssi_thold = bss_conf->cqm_rssi_thold; + } + + if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT | + BSS_CHANGED_ASSOC)) { + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + u8 *rx_mask = sta->ht_cap.mcs.rx_mask; + + /* save the supp_rates of the ap */ + sta_rate_set = sta->supp_rates[wlvif->band]; + if (sta->ht_cap.ht_supported) + sta_rate_set |= + (rx_mask[0] << HW_HT_RATES_OFFSET) | + (rx_mask[1] << HW_MIMO_RATES_OFFSET); + sta_ht_cap = sta->ht_cap; + sta_exists = true; + } + + rcu_read_unlock(); + } + + if (changed & BSS_CHANGED_BSSID) { + if (!is_zero_ether_addr(bss_conf->bssid)) { + ret = wlcore_set_bssid(wl, wlvif, bss_conf, + sta_rate_set); + if (ret < 0) + goto out; + + /* Need to update the BSSID (for filtering etc) */ + do_join = true; + } else { + ret = wlcore_clear_bssid(wl, wlvif); + if (ret < 0) + goto out; + } + } + + if (changed & BSS_CHANGED_IBSS) { + wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", + bss_conf->ibss_joined); + + if (bss_conf->ibss_joined) { + u32 rates = bss_conf->basic_rates; + wlvif->basic_rate_set = + wl1271_tx_enabled_rates_get(wl, rates, + wlvif->band); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, + wlvif->basic_rate_set); + + /* by default, use 11b + OFDM rates */ + wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + if (ret < 0) + goto out; + } + } + + if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) { + /* enable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); + if (ret < 0) + goto out; + } + + ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); + if (ret < 0) + goto out; + + if (do_join) { + ret = wlcore_join(wl, wlvif); + if (ret < 0) { + wl1271_warning("cmd join failed %d", ret); + goto out; + } + } + + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + ret = wlcore_set_assoc(wl, wlvif, bss_conf, + sta_rate_set); + if (ret < 0) + goto out; + + if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) + wl12xx_set_authorized(wl, wlvif); + } else { + wlcore_unset_assoc(wl, wlvif); + } + } + + if (changed & BSS_CHANGED_PS) { + if ((bss_conf->ps) && + test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && + !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { + int ps_mode; + char *ps_mode_str; + + if (wl->conf.conn.forced_ps) { + ps_mode = STATION_POWER_SAVE_MODE; + ps_mode_str = "forced"; + } else { + ps_mode = STATION_AUTO_PS_MODE; + ps_mode_str = "auto"; + } + + wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); + + ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); + if (ret < 0) + wl1271_warning("enter %s ps failed %d", + ps_mode_str, ret); + } else if (!bss_conf->ps && + test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { + wl1271_debug(DEBUG_PSM, "auto ps disabled"); + + ret = wl1271_ps_set_mode(wl, wlvif, + STATION_ACTIVE_MODE); + if (ret < 0) + wl1271_warning("exit auto ps failed %d", ret); + } + } + + /* Handle new association with HT. Do this after join. */ + if (sta_exists) { + bool enabled = + bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; + + ret = wlcore_hw_set_peer_cap(wl, + &sta_ht_cap, + enabled, + wlvif->rate_set, + wlvif->sta.hlid); + if (ret < 0) { + wl1271_warning("Set ht cap failed %d", ret); + goto out; + + } + + if (enabled) { + ret = wl1271_acx_set_ht_information(wl, wlvif, + bss_conf->ht_operation_mode); + if (ret < 0) { + wl1271_warning("Set ht information failed %d", + ret); + goto out; + } + } + } + + /* Handle arp filtering. Done after join. */ + if ((changed & BSS_CHANGED_ARP_FILTER) || + (!is_ibss && (changed & BSS_CHANGED_QOS))) { + __be32 addr = bss_conf->arp_addr_list[0]; + wlvif->sta.qos = bss_conf->qos; + WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); + + if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) { + wlvif->ip_addr = addr; + /* + * The template should have been configured only upon + * association. however, it seems that the correct ip + * isn't being set (when sending), so we have to + * reconfigure the template upon every ip change. + */ + ret = wl1271_cmd_build_arp_rsp(wl, wlvif); + if (ret < 0) { + wl1271_warning("build arp rsp failed: %d", ret); + goto out; + } + + ret = wl1271_acx_arp_ip_filter(wl, wlvif, + (ACX_ARP_FILTER_ARP_FILTERING | + ACX_ARP_FILTER_AUTO_ARP), + addr); + } else { + wlvif->ip_addr = 0; + ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); + } + + if (ret < 0) + goto out; + } + +out: + return; +} + +static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x", + wlvif->role_id, (int)changed); + + /* + * make sure to cancel pending disconnections if our association + * state changed + */ + if (!is_ap && (changed & BSS_CHANGED_ASSOC)) + cancel_delayed_work_sync(&wlvif->connection_loss_work); + + if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) && + !bss_conf->enable_beacon) + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if ((changed & BSS_CHANGED_TXPOWER) && + bss_conf->txpower != wlvif->power_level) { + + ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower); + if (ret < 0) + goto out; + + wlvif->power_level = bss_conf->txpower; + } + + if (is_ap) + wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed); + else + wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wlcore_op_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)", + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); + return 0; +} + +static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)", + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); +} + +static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif; + int ret; + int channel = ieee80211_frequency_to_channel( + ctx->def.chan->center_freq); + + wl1271_debug(DEBUG_MAC80211, + "mac80211 change chanctx %d (type %d) changed 0x%x", + channel, cfg80211_get_chandef_type(&ctx->def), changed); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + rcu_read_lock(); + if (rcu_access_pointer(vif->chanctx_conf) != ctx) { + rcu_read_unlock(); + continue; + } + rcu_read_unlock(); + + /* start radar if needed */ + if (changed & IEEE80211_CHANCTX_CHANGE_RADAR && + wlvif->bss_type == BSS_TYPE_AP_BSS && + ctx->radar_enabled && !wlvif->radar_enabled && + ctx->def.chan->dfs_state == NL80211_DFS_USABLE) { + wl1271_debug(DEBUG_MAC80211, "Start radar detection"); + wlcore_hw_set_cac(wl, wlvif, true); + wlvif->radar_enabled = true; + } + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int channel = ieee80211_frequency_to_channel( + ctx->def.chan->center_freq); + int ret = -EINVAL; + + wl1271_debug(DEBUG_MAC80211, + "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)", + wlvif->role_id, channel, + cfg80211_get_chandef_type(&ctx->def), + ctx->radar_enabled, ctx->def.chan->dfs_state); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wlvif->band = ctx->def.chan->band; + wlvif->channel = channel; + wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def); + + /* update default rates according to the band */ + wl1271_set_band_rate(wl, wlvif); + + if (ctx->radar_enabled && + ctx->def.chan->dfs_state == NL80211_DFS_USABLE) { + wl1271_debug(DEBUG_MAC80211, "Start radar detection"); + wlcore_hw_set_cac(wl, wlvif, true); + wlvif->radar_enabled = true; + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return 0; +} + +static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, + "mac80211 unassign chanctx (role %d) %d (type %d)", + wlvif->role_id, + ieee80211_frequency_to_channel(ctx->def.chan->center_freq), + cfg80211_get_chandef_type(&ctx->def)); + + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (wlvif->radar_enabled) { + wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); + wlcore_hw_set_cac(wl, wlvif, false); + wlvif->radar_enabled = false; + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static int __wlcore_switch_vif_chan(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_chanctx_conf *new_ctx) +{ + int channel = ieee80211_frequency_to_channel( + new_ctx->def.chan->center_freq); + + wl1271_debug(DEBUG_MAC80211, + "switch vif (role %d) %d -> %d chan_type: %d", + wlvif->role_id, wlvif->channel, channel, + cfg80211_get_chandef_type(&new_ctx->def)); + + if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS)) + return 0; + + WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags)); + + if (wlvif->radar_enabled) { + wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); + wlcore_hw_set_cac(wl, wlvif, false); + wlvif->radar_enabled = false; + } + + wlvif->band = new_ctx->def.chan->band; + wlvif->channel = channel; + wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def); + + /* start radar if needed */ + if (new_ctx->radar_enabled) { + wl1271_debug(DEBUG_MAC80211, "Start radar detection"); + wlcore_hw_set_cac(wl, wlvif, true); + wlvif->radar_enabled = true; + } + + return 0; +} + +static int +wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct wl1271 *wl = hw->priv; + int i, ret; + + wl1271_debug(DEBUG_MAC80211, + "mac80211 switch chanctx n_vifs %d mode %d", + n_vifs, mode); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + for (i = 0; i < n_vifs; i++) { + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif); + + ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx); + if (ret) + goto out_sleep; + } +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return 0; +} + +static int wl1271_op_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + u8 ps_scheme; + int ret = 0; + + mutex_lock(&wl->mutex); + + wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); + + if (params->uapsd) + ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; + else + ps_scheme = CONF_PS_SCHEME_LEGACY; + + if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* + * the txop is confed in units of 32us by the mac80211, + * we need us + */ + ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), + params->cw_min, params->cw_max, + params->aifs, params->txop << 5); + if (ret < 0) + goto out_sleep; + + ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), + CONF_CHANNEL_TYPE_EDCF, + wl1271_tx_get_queue(queue), + ps_scheme, CONF_ACK_POLICY_LEGACY, + 0, 0); + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + u64 mactime = ULLONG_MAX; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return mactime; +} + +static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ieee80211_conf *conf = &hw->conf; + + if (idx != 0) + return -ENOENT; + + survey->channel = conf->chandef.chan; + survey->filled = 0; + return 0; +} + +static int wl1271_allocate_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta) +{ + struct wl1271_station *wl_sta; + int ret; + + + if (wl->active_sta_count >= wl->max_ap_stations) { + wl1271_warning("could not allocate HLID - too much stations"); + return -EBUSY; + } + + wl_sta = (struct wl1271_station *)sta->drv_priv; + ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); + if (ret < 0) { + wl1271_warning("could not allocate HLID - too many links"); + return -EBUSY; + } + + /* use the previous security seq, if this is a recovery/resume */ + wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts; + + set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); + memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); + wl->active_sta_count++; + return 0; +} + +void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) +{ + if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) + return; + + clear_bit(hlid, wlvif->ap.sta_hlid_map); + __clear_bit(hlid, &wl->ap_ps_map); + __clear_bit(hlid, &wl->ap_fw_ps_map); + + /* + * save the last used PN in the private part of iee80211_sta, + * in case of recovery/suspend + */ + wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr); + + wl12xx_free_link(wl, wlvif, &hlid); + wl->active_sta_count--; + + /* + * rearm the tx watchdog when the last STA is freed - give the FW a + * chance to return STA-buffered packets before complaining. + */ + if (wl->active_sta_count == 0) + wl12xx_rearm_tx_watchdog_locked(wl); +} + +static int wl12xx_sta_add(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta) +{ + struct wl1271_station *wl_sta; + int ret = 0; + u8 hlid; + + wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); + + ret = wl1271_allocate_sta(wl, wlvif, sta); + if (ret < 0) + return ret; + + wl_sta = (struct wl1271_station *)sta->drv_priv; + hlid = wl_sta->hlid; + + ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); + if (ret < 0) + wl1271_free_sta(wl, wlvif, hlid); + + return ret; +} + +static int wl12xx_sta_remove(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta) +{ + struct wl1271_station *wl_sta; + int ret = 0, id; + + wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); + + wl_sta = (struct wl1271_station *)sta->drv_priv; + id = wl_sta->hlid; + if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) + return -EINVAL; + + ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid); + if (ret < 0) + return ret; + + wl1271_free_sta(wl, wlvif, wl_sta->hlid); + return ret; +} + +static void wlcore_roc_if_possible(struct wl1271 *wl, + struct wl12xx_vif *wlvif) +{ + if (find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) + return; + + if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) + return; + + wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel); +} + +/* + * when wl_sta is NULL, we treat this call as if coming from a + * pending auth reply. + * wl->mutex must be taken and the FW must be awake when the call + * takes place. + */ +void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct wl1271_station *wl_sta, bool in_conn) +{ + if (in_conn) { + if (WARN_ON(wl_sta && wl_sta->in_connection)) + return; + + if (!wlvif->ap_pending_auth_reply && + !wlvif->inconn_count) + wlcore_roc_if_possible(wl, wlvif); + + if (wl_sta) { + wl_sta->in_connection = true; + wlvif->inconn_count++; + } else { + wlvif->ap_pending_auth_reply = true; + } + } else { + if (wl_sta && !wl_sta->in_connection) + return; + + if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply)) + return; + + if (WARN_ON(wl_sta && !wlvif->inconn_count)) + return; + + if (wl_sta) { + wl_sta->in_connection = false; + wlvif->inconn_count--; + } else { + wlvif->ap_pending_auth_reply = false; + } + + if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply && + test_bit(wlvif->role_id, wl->roc_map)) + wl12xx_croc(wl, wlvif->role_id); + } +} + +static int wl12xx_update_sta_state(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct wl1271_station *wl_sta; + bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; + bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; + int ret; + + wl_sta = (struct wl1271_station *)sta->drv_priv; + + /* Add station (AP mode) */ + if (is_ap && + old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + ret = wl12xx_sta_add(wl, wlvif, sta); + if (ret) + return ret; + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, true); + } + + /* Remove station (AP mode) */ + if (is_ap && + old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) { + /* must not fail */ + wl12xx_sta_remove(wl, wlvif, sta); + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); + } + + /* Authorize station (AP mode) */ + if (is_ap && + new_state == IEEE80211_STA_AUTHORIZED) { + ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid); + if (ret < 0) + return ret; + + ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, + wl_sta->hlid); + if (ret) + return ret; + + wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); + } + + /* Authorize station */ + if (is_sta && + new_state == IEEE80211_STA_AUTHORIZED) { + set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); + ret = wl12xx_set_authorized(wl, wlvif); + if (ret) + return ret; + } + + if (is_sta && + old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); + clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags); + } + + /* save seq number on disassoc (suspend) */ + if (is_sta && + old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta); + wlvif->total_freed_pkts = 0; + } + + /* restore seq number on assoc (resume) */ + if (is_sta && + old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + wlvif->total_freed_pkts = wl_sta->total_freed_pkts; + } + + /* clear ROCs on failure or authorization */ + if (is_sta && + (new_state == IEEE80211_STA_AUTHORIZED || + new_state == IEEE80211_STA_NOTEXIST)) { + if (test_bit(wlvif->role_id, wl->roc_map)) + wl12xx_croc(wl, wlvif->role_id); + } + + if (is_sta && + old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + if (find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) { + WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID); + wl12xx_roc(wl, wlvif, wlvif->role_id, + wlvif->band, wlvif->channel); + } + } + return 0; +} + +static int wl12xx_op_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d", + sta->aid, old_state, new_state); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + if (new_state < old_state) + return 0; + return ret; +} + +static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + u8 hlid, *ba_bitmap; + + wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, + tid); + + /* sanity check - the fields in FW are only 8bits wide */ + if (WARN_ON(tid > 0xFF)) + return -ENOTSUPP; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EAGAIN; + goto out; + } + + if (wlvif->bss_type == BSS_TYPE_STA_BSS) { + hlid = wlvif->sta.hlid; + } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { + struct wl1271_station *wl_sta; + + wl_sta = (struct wl1271_station *)sta->drv_priv; + hlid = wl_sta->hlid; + } else { + ret = -EINVAL; + goto out; + } + + ba_bitmap = &wl->links[hlid].ba_bitmap; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", + tid, action); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (!wlvif->ba_support || !wlvif->ba_allowed) { + ret = -ENOTSUPP; + break; + } + + if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) { + ret = -EBUSY; + wl1271_error("exceeded max RX BA sessions"); + break; + } + + if (*ba_bitmap & BIT(tid)) { + ret = -EINVAL; + wl1271_error("cannot enable RX BA session on active " + "tid: %d", tid); + break; + } + + ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, + hlid); + if (!ret) { + *ba_bitmap |= BIT(tid); + wl->ba_rx_session_count++; + } + break; + + case IEEE80211_AMPDU_RX_STOP: + if (!(*ba_bitmap & BIT(tid))) { + /* + * this happens on reconfig - so only output a debug + * message for now, and don't fail the function. + */ + wl1271_debug(DEBUG_MAC80211, + "no active RX BA session on tid: %d", + tid); + ret = 0; + break; + } + + ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, + hlid); + if (!ret) { + *ba_bitmap &= ~BIT(tid); + wl->ba_rx_session_count--; + } + break; + + /* + * The BA initiator session management in FW independently. + * Falling break here on purpose for all TX APDU commands. + */ + case IEEE80211_AMPDU_TX_START: + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = -EINVAL; + break; + + default: + wl1271_error("Incorrect ampdu action id=%x\n", action); + ret = -EINVAL; + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl1271 *wl = hw->priv; + int i, ret = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", + mask->control[NL80211_BAND_2GHZ].legacy, + mask->control[NL80211_BAND_5GHZ].legacy); + + mutex_lock(&wl->mutex); + + for (i = 0; i < WLCORE_NUM_BANDS; i++) + wlvif->bitrate_masks[i] = + wl1271_tx_enabled_rates_get(wl, + mask->control[i].legacy, + i); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (wlvif->bss_type == BSS_TYPE_STA_BSS && + !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl1271_set_band_rate(wl, wlvif); + wlvif->basic_rate = + wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); + ret = wl1271_acx_sta_rate_policies(wl, wlvif); + + wl1271_ps_elp_sleep(wl); + } +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); + + wl1271_tx_flush(wl); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WLCORE_STATE_OFF)) { + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + ieee80211_chswitch_done(vif, false); + goto out; + } else if (unlikely(wl->state != WLCORE_STATE_ON)) { + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* TODO: change mac80211 to pass vif as param */ + + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + unsigned long delay_usec; + + ret = wl->ops->channel_switch(wl, wlvif, ch_switch); + if (ret) + goto out_sleep; + + set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); + + /* indicate failure 5 seconds after channel switch time */ + delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) * + ch_switch->count; + ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work, + usecs_to_jiffies(delay_usec) + + msecs_to_jiffies(5000)); + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static const void *wlcore_get_beacon_ie(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 eid) +{ + int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); + struct sk_buff *beacon = + ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif)); + + if (!beacon) + return NULL; + + return cfg80211_find_ie(eid, + beacon->data + ieoffset, + beacon->len - ieoffset); +} + +static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 *csa_count) +{ + const u8 *ie; + const struct ieee80211_channel_sw_ie *ie_csa; + + ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH); + if (!ie) + return -EINVAL; + + ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2]; + *csa_count = ie_csa->count; + + return 0; +} + +static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct ieee80211_channel_switch ch_switch = { + .block_tx = true, + .chandef = *chandef, + }; + int ret; + + wl1271_debug(DEBUG_MAC80211, + "mac80211 channel switch beacon (role %d)", + wlvif->role_id); + + ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count); + if (ret < 0) { + wl1271_error("error getting beacon (for CSA counter)"); + return; + } + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl->ops->channel_switch(wl, wlvif, &ch_switch); + if (ret) + goto out_sleep; + + set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct wl1271 *wl = hw->priv; + + wl1271_tx_flush(wl); +} + +static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct wl1271 *wl = hw->priv; + int channel, ret = 0; + + channel = ieee80211_frequency_to_channel(chan->center_freq); + + wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)", + channel, wlvif->role_id); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* return EBUSY if we can't ROC right now */ + if (WARN_ON(wl->roc_vif || + find_first_bit(wl->roc_map, + WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); + if (ret < 0) + goto out_sleep; + + wl->roc_vif = vif; + ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, + msecs_to_jiffies(duration)); +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static int __wlcore_roc_completed(struct wl1271 *wl) +{ + struct wl12xx_vif *wlvif; + int ret; + + /* already completed */ + if (unlikely(!wl->roc_vif)) + return 0; + + wlvif = wl12xx_vif_to_data(wl->roc_vif); + + if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) + return -EBUSY; + + ret = wl12xx_stop_dev(wl, wlvif); + if (ret < 0) + return ret; + + wl->roc_vif = NULL; + + return 0; +} + +static int wlcore_roc_completed(struct wl1271 *wl) +{ + int ret; + + wl1271_debug(DEBUG_MAC80211, "roc complete"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EBUSY; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = __wlcore_roc_completed(wl); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wlcore_roc_complete_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, roc_complete_work); + + ret = wlcore_roc_completed(wl); + if (!ret) + ieee80211_remain_on_channel_expired(wl->hw); +} + +static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + + wl1271_debug(DEBUG_MAC80211, "mac80211 croc"); + + /* TODO: per-vif */ + wl1271_tx_flush(wl); + + /* + * we can't just flush_work here, because it might deadlock + * (as we might get called from the same workqueue) + */ + cancel_delayed_work_sync(&wl->roc_complete_work); + wlcore_roc_completed(wl); + + return 0; +} + +static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + + wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update"); + + if (!(changed & IEEE80211_RC_BW_CHANGED)) + return; + + /* this callback is atomic, so schedule a new work */ + wlvif->rc_update_bw = sta->bandwidth; + ieee80211_queue_work(hw, &wlvif->rc_update_work); +} + +static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct wl1271 *wl = hw->priv; + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + s8 rssi_dbm; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_sleep; + + ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm); + if (ret < 0) + goto out_sleep; + + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->signal = rssi_dbm; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + bool ret = false; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* packets are considered pending if in the TX queue or the FW */ + ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_rate wl1271_rates[] = { + { .bitrate = 10, + .hw_value = CONF_HW_BIT_RATE_1MBPS, + .hw_value_short = CONF_HW_BIT_RATE_1MBPS, }, + { .bitrate = 20, + .hw_value = CONF_HW_BIT_RATE_2MBPS, + .hw_value_short = CONF_HW_BIT_RATE_2MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = CONF_HW_BIT_RATE_5_5MBPS, + .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = CONF_HW_BIT_RATE_11MBPS, + .hw_value_short = CONF_HW_BIT_RATE_11MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = CONF_HW_BIT_RATE_6MBPS, + .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, + { .bitrate = 90, + .hw_value = CONF_HW_BIT_RATE_9MBPS, + .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, + { .bitrate = 120, + .hw_value = CONF_HW_BIT_RATE_12MBPS, + .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, + { .bitrate = 180, + .hw_value = CONF_HW_BIT_RATE_18MBPS, + .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, + { .bitrate = 240, + .hw_value = CONF_HW_BIT_RATE_24MBPS, + .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, + { .bitrate = 360, + .hw_value = CONF_HW_BIT_RATE_36MBPS, + .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, + { .bitrate = 480, + .hw_value = CONF_HW_BIT_RATE_48MBPS, + .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, + { .bitrate = 540, + .hw_value = CONF_HW_BIT_RATE_54MBPS, + .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, +}; + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_channel wl1271_channels[] = { + { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR }, +}; + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_supported_band wl1271_band_2ghz = { + .channels = wl1271_channels, + .n_channels = ARRAY_SIZE(wl1271_channels), + .bitrates = wl1271_rates, + .n_bitrates = ARRAY_SIZE(wl1271_rates), +}; + +/* 5 GHz data rates for WL1273 */ +static struct ieee80211_rate wl1271_rates_5ghz[] = { + { .bitrate = 60, + .hw_value = CONF_HW_BIT_RATE_6MBPS, + .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, + { .bitrate = 90, + .hw_value = CONF_HW_BIT_RATE_9MBPS, + .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, + { .bitrate = 120, + .hw_value = CONF_HW_BIT_RATE_12MBPS, + .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, + { .bitrate = 180, + .hw_value = CONF_HW_BIT_RATE_18MBPS, + .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, + { .bitrate = 240, + .hw_value = CONF_HW_BIT_RATE_24MBPS, + .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, + { .bitrate = 360, + .hw_value = CONF_HW_BIT_RATE_36MBPS, + .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, + { .bitrate = 480, + .hw_value = CONF_HW_BIT_RATE_48MBPS, + .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, + { .bitrate = 540, + .hw_value = CONF_HW_BIT_RATE_54MBPS, + .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, +}; + +/* 5 GHz band channels for WL1273 */ +static struct ieee80211_channel wl1271_channels_5ghz[] = { + { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR }, + { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR }, +}; + +static struct ieee80211_supported_band wl1271_band_5ghz = { + .channels = wl1271_channels_5ghz, + .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), + .bitrates = wl1271_rates_5ghz, + .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), +}; + +static const struct ieee80211_ops wl1271_ops = { + .start = wl1271_op_start, + .stop = wlcore_op_stop, + .add_interface = wl1271_op_add_interface, + .remove_interface = wl1271_op_remove_interface, + .change_interface = wl12xx_op_change_interface, +#ifdef CONFIG_PM + .suspend = wl1271_op_suspend, + .resume = wl1271_op_resume, +#endif + .config = wl1271_op_config, + .prepare_multicast = wl1271_op_prepare_multicast, + .configure_filter = wl1271_op_configure_filter, + .tx = wl1271_op_tx, + .set_key = wlcore_op_set_key, + .hw_scan = wl1271_op_hw_scan, + .cancel_hw_scan = wl1271_op_cancel_hw_scan, + .sched_scan_start = wl1271_op_sched_scan_start, + .sched_scan_stop = wl1271_op_sched_scan_stop, + .bss_info_changed = wl1271_op_bss_info_changed, + .set_frag_threshold = wl1271_op_set_frag_threshold, + .set_rts_threshold = wl1271_op_set_rts_threshold, + .conf_tx = wl1271_op_conf_tx, + .get_tsf = wl1271_op_get_tsf, + .get_survey = wl1271_op_get_survey, + .sta_state = wl12xx_op_sta_state, + .ampdu_action = wl1271_op_ampdu_action, + .tx_frames_pending = wl1271_tx_frames_pending, + .set_bitrate_mask = wl12xx_set_bitrate_mask, + .set_default_unicast_key = wl1271_op_set_default_key_idx, + .channel_switch = wl12xx_op_channel_switch, + .channel_switch_beacon = wlcore_op_channel_switch_beacon, + .flush = wlcore_op_flush, + .remain_on_channel = wlcore_op_remain_on_channel, + .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel, + .add_chanctx = wlcore_op_add_chanctx, + .remove_chanctx = wlcore_op_remove_chanctx, + .change_chanctx = wlcore_op_change_chanctx, + .assign_vif_chanctx = wlcore_op_assign_vif_chanctx, + .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx, + .switch_vif_chanctx = wlcore_op_switch_vif_chanctx, + .sta_rc_update = wlcore_op_sta_rc_update, + .sta_statistics = wlcore_op_sta_statistics, + CFG80211_TESTMODE_CMD(wl1271_tm_cmd) +}; + + +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) +{ + u8 idx; + + BUG_ON(band >= 2); + + if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) { + wl1271_error("Illegal RX rate from HW: %d", rate); + return 0; + } + + idx = wl->band_rate_to_idx[band][rate]; + if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { + wl1271_error("Unsupported RX rate from HW: %d", rate); + return 0; + } + + return idx; +} + +static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) +{ + int i; + + wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x", + oui, nic); + + if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff) + wl1271_warning("NIC part of the MAC address wraps around!"); + + for (i = 0; i < wl->num_mac_addr; i++) { + wl->addresses[i].addr[0] = (u8)(oui >> 16); + wl->addresses[i].addr[1] = (u8)(oui >> 8); + wl->addresses[i].addr[2] = (u8) oui; + wl->addresses[i].addr[3] = (u8)(nic >> 16); + wl->addresses[i].addr[4] = (u8)(nic >> 8); + wl->addresses[i].addr[5] = (u8) nic; + nic++; + } + + /* we may be one address short at the most */ + WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES); + + /* + * turn on the LAA bit in the first address and use it as + * the last address. + */ + if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) { + int idx = WLCORE_NUM_MAC_ADDRESSES - 1; + memcpy(&wl->addresses[idx], &wl->addresses[0], + sizeof(wl->addresses[0])); + /* LAA bit */ + wl->addresses[idx].addr[0] |= BIT(1); + } + + wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES; + wl->hw->wiphy->addresses = wl->addresses; +} + +static int wl12xx_get_hw_info(struct wl1271 *wl) +{ + int ret; + + ret = wl12xx_set_power_on(wl); + if (ret < 0) + return ret; + + ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id); + if (ret < 0) + goto out; + + wl->fuse_oui_addr = 0; + wl->fuse_nic_addr = 0; + + ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver); + if (ret < 0) + goto out; + + if (wl->ops->get_mac) + ret = wl->ops->get_mac(wl); + +out: + wl1271_power_off(wl); + return ret; +} + +static int wl1271_register_hw(struct wl1271 *wl) +{ + int ret; + u32 oui_addr = 0, nic_addr = 0; + + if (wl->mac80211_registered) + return 0; + + if (wl->nvs_len >= 12) { + /* NOTE: The wl->nvs->nvs element must be first, in + * order to simplify the casting, we assume it is at + * the beginning of the wl->nvs structure. + */ + u8 *nvs_ptr = (u8 *)wl->nvs; + + oui_addr = + (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6]; + nic_addr = + (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3]; + } + + /* if the MAC address is zeroed in the NVS derive from fuse */ + if (oui_addr == 0 && nic_addr == 0) { + oui_addr = wl->fuse_oui_addr; + /* fuse has the BD_ADDR, the WLAN addresses are the next two */ + nic_addr = wl->fuse_nic_addr + 1; + } + + wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr); + + ret = ieee80211_register_hw(wl->hw); + if (ret < 0) { + wl1271_error("unable to register mac80211 hw: %d", ret); + goto out; + } + + wl->mac80211_registered = true; + + wl1271_debugfs_init(wl); + + wl1271_notice("loaded"); + +out: + return ret; +} + +static void wl1271_unregister_hw(struct wl1271 *wl) +{ + if (wl->plt) + wl1271_plt_stop(wl); + + ieee80211_unregister_hw(wl->hw); + wl->mac80211_registered = false; + +} + +static int wl1271_init_ieee80211(struct wl1271 *wl) +{ + int i; + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WL1271_CIPHER_SUITE_GEM, + }; + + /* The tx descriptor buffer */ + wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr); + + if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) + wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP; + + /* unit us */ + /* FIXME: find a proper value */ + wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval; + + wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_HAS_RATE_CONTROL | + IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_AP_LINK_PS | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | + IEEE80211_HW_QUEUE_CONTROL | + IEEE80211_HW_CHANCTX_STA_CSA; + + wl->hw->wiphy->cipher_suites = cipher_suites; + wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); + wl->hw->wiphy->max_scan_ssids = 1; + wl->hw->wiphy->max_sched_scan_ssids = 16; + wl->hw->wiphy->max_match_sets = 16; + /* + * Maximum length of elements in scanning probe request templates + * should be the maximum length possible for a template, without + * the IEEE80211 header of the template + */ + wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - + sizeof(struct ieee80211_header); + + wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - + sizeof(struct ieee80211_header); + + wl->hw->wiphy->max_remain_on_channel_duration = 30000; + + wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_SUPPORTS_SCHED_SCAN | + WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + /* make sure all our channels fit in the scanned_ch bitmask */ + BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + + ARRAY_SIZE(wl1271_channels_5ghz) > + WL1271_MAX_CHANNELS); + /* + * clear channel flags from the previous usage + * and restore max_power & max_antenna_gain values. + */ + for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) { + wl1271_band_2ghz.channels[i].flags = 0; + wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR; + wl1271_band_2ghz.channels[i].max_antenna_gain = 0; + } + + for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) { + wl1271_band_5ghz.channels[i].flags = 0; + wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR; + wl1271_band_5ghz.channels[i].max_antenna_gain = 0; + } + + /* + * We keep local copies of the band structs because we need to + * modify them on a per-device basis. + */ + memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, + sizeof(wl1271_band_2ghz)); + memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, + &wl->ht_cap[IEEE80211_BAND_2GHZ], + sizeof(*wl->ht_cap)); + memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, + sizeof(wl1271_band_5ghz)); + memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, + &wl->ht_cap[IEEE80211_BAND_5GHZ], + sizeof(*wl->ht_cap)); + + wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &wl->bands[IEEE80211_BAND_2GHZ]; + wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &wl->bands[IEEE80211_BAND_5GHZ]; + + /* + * allow 4 queues per mac address we support + + * 1 cab queue per mac + one global offchannel Tx queue + */ + wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1; + + /* the last queue is the offchannel queue */ + wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1; + wl->hw->max_rates = 1; + + wl->hw->wiphy->reg_notifier = wl1271_reg_notify; + + /* the FW answers probe-requests in AP-mode */ + wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + wl->hw->wiphy->probe_resp_offload = + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + + /* allowed interface combinations */ + wl->hw->wiphy->iface_combinations = wl->iface_combinations; + wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations; + + /* register vendor commands */ + wlcore_set_vendor_commands(wl->hw->wiphy); + + SET_IEEE80211_DEV(wl->hw, wl->dev); + + wl->hw->sta_data_size = sizeof(struct wl1271_station); + wl->hw->vif_data_size = sizeof(struct wl12xx_vif); + + wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size; + + return 0; +} + +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, + u32 mbox_size) +{ + struct ieee80211_hw *hw; + struct wl1271 *wl; + int i, j, ret; + unsigned int order; + + hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); + if (!hw) { + wl1271_error("could not alloc ieee80211_hw"); + ret = -ENOMEM; + goto err_hw_alloc; + } + + wl = hw->priv; + memset(wl, 0, sizeof(*wl)); + + wl->priv = kzalloc(priv_size, GFP_KERNEL); + if (!wl->priv) { + wl1271_error("could not alloc wl priv"); + ret = -ENOMEM; + goto err_priv_alloc; + } + + INIT_LIST_HEAD(&wl->wlvif_list); + + wl->hw = hw; + + /* + * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS. + * we don't allocate any additional resource here, so that's fine. + */ + for (i = 0; i < NUM_TX_QUEUES; i++) + for (j = 0; j < WLCORE_MAX_LINKS; j++) + skb_queue_head_init(&wl->links[j].tx_queue[i]); + + skb_queue_head_init(&wl->deferred_rx_queue); + skb_queue_head_init(&wl->deferred_tx_queue); + + INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); + INIT_WORK(&wl->netstack_work, wl1271_netstack_work); + INIT_WORK(&wl->tx_work, wl1271_tx_work); + INIT_WORK(&wl->recovery_work, wl1271_recovery_work); + INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); + INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work); + INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work); + + wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); + if (!wl->freezable_wq) { + ret = -ENOMEM; + goto err_hw; + } + + wl->channel = 0; + wl->rx_counter = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->band = IEEE80211_BAND_2GHZ; + wl->channel_type = NL80211_CHAN_NO_HT; + wl->flags = 0; + wl->sg_enabled = true; + wl->sleep_auth = WL1271_PSM_ILLEGAL; + wl->recovery_count = 0; + wl->hw_pg_ver = -1; + wl->ap_ps_map = 0; + wl->ap_fw_ps_map = 0; + wl->quirks = 0; + wl->system_hlid = WL12XX_SYSTEM_HLID; + wl->active_sta_count = 0; + wl->active_link_count = 0; + wl->fwlog_size = 0; + init_waitqueue_head(&wl->fwlog_waitq); + + /* The system link is always allocated */ + __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); + + memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); + for (i = 0; i < wl->num_tx_desc; i++) + wl->tx_frames[i] = NULL; + + spin_lock_init(&wl->wl_lock); + + wl->state = WLCORE_STATE_OFF; + wl->fw_type = WL12XX_FW_TYPE_NONE; + mutex_init(&wl->mutex); + mutex_init(&wl->flush_mutex); + init_completion(&wl->nvs_loading_complete); + + order = get_order(aggr_buf_size); + wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); + if (!wl->aggr_buf) { + ret = -ENOMEM; + goto err_wq; + } + wl->aggr_buf_size = aggr_buf_size; + + wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); + if (!wl->dummy_packet) { + ret = -ENOMEM; + goto err_aggr; + } + + /* Allocate one page for the FW log */ + wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL); + if (!wl->fwlog) { + ret = -ENOMEM; + goto err_dummy_packet; + } + + wl->mbox_size = mbox_size; + wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA); + if (!wl->mbox) { + ret = -ENOMEM; + goto err_fwlog; + } + + wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL); + if (!wl->buffer_32) { + ret = -ENOMEM; + goto err_mbox; + } + + return hw; + +err_mbox: + kfree(wl->mbox); + +err_fwlog: + free_page((unsigned long)wl->fwlog); + +err_dummy_packet: + dev_kfree_skb(wl->dummy_packet); + +err_aggr: + free_pages((unsigned long)wl->aggr_buf, order); + +err_wq: + destroy_workqueue(wl->freezable_wq); + +err_hw: + wl1271_debugfs_exit(wl); + kfree(wl->priv); + +err_priv_alloc: + ieee80211_free_hw(hw); + +err_hw_alloc: + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(wlcore_alloc_hw); + +int wlcore_free_hw(struct wl1271 *wl) +{ + /* Unblock any fwlog readers */ + mutex_lock(&wl->mutex); + wl->fwlog_size = -1; + wake_up_interruptible_all(&wl->fwlog_waitq); + mutex_unlock(&wl->mutex); + + wlcore_sysfs_free(wl); + + kfree(wl->buffer_32); + kfree(wl->mbox); + free_page((unsigned long)wl->fwlog); + dev_kfree_skb(wl->dummy_packet); + free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size)); + + wl1271_debugfs_exit(wl); + + vfree(wl->fw); + wl->fw = NULL; + wl->fw_type = WL12XX_FW_TYPE_NONE; + kfree(wl->nvs); + wl->nvs = NULL; + + kfree(wl->raw_fw_status); + kfree(wl->fw_status); + kfree(wl->tx_res_if); + destroy_workqueue(wl->freezable_wq); + + kfree(wl->priv); + ieee80211_free_hw(wl->hw); + + return 0; +} +EXPORT_SYMBOL_GPL(wlcore_free_hw); + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support wlcore_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL1271_MAX_RX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, +}; +#endif + +static irqreturn_t wlcore_hardirq(int irq, void *cookie) +{ + return IRQ_WAKE_THREAD; +} + +static void wlcore_nvs_cb(const struct firmware *fw, void *context) +{ + struct wl1271 *wl = context; + struct platform_device *pdev = wl->pdev; + struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); + struct resource *res; + + int ret; + irq_handler_t hardirq_fn = NULL; + + if (fw) { + wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!wl->nvs) { + wl1271_error("Could not allocate nvs data"); + goto out; + } + wl->nvs_len = fw->size; + } else { + wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s", + WL12XX_NVS_NAME); + wl->nvs = NULL; + wl->nvs_len = 0; + } + + ret = wl->ops->setup(wl); + if (ret < 0) + goto out_free_nvs; + + BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); + + /* adjust some runtime configuration parameters */ + wlcore_adjust_conf(wl); + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + wl1271_error("Could not get IRQ resource"); + goto out_free_nvs; + } + + wl->irq = res->start; + wl->irq_flags = res->flags & IRQF_TRIGGER_MASK; + wl->if_ops = pdev_data->if_ops; + + if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) + hardirq_fn = wlcore_hardirq; + else + wl->irq_flags |= IRQF_ONESHOT; + + ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq, + wl->irq_flags, pdev->name, wl); + if (ret < 0) { + wl1271_error("request_irq() failed: %d", ret); + goto out_free_nvs; + } + +#ifdef CONFIG_PM + ret = enable_irq_wake(wl->irq); + if (!ret) { + wl->irq_wake_enabled = true; + device_init_wakeup(wl->dev, 1); + if (pdev_data->pwr_in_suspend) + wl->hw->wiphy->wowlan = &wlcore_wowlan_support; + } +#endif + disable_irq(wl->irq); + + ret = wl12xx_get_hw_info(wl); + if (ret < 0) { + wl1271_error("couldn't get hw info"); + goto out_irq; + } + + ret = wl->ops->identify_chip(wl); + if (ret < 0) + goto out_irq; + + ret = wl1271_init_ieee80211(wl); + if (ret) + goto out_irq; + + ret = wl1271_register_hw(wl); + if (ret) + goto out_irq; + + ret = wlcore_sysfs_init(wl); + if (ret) + goto out_unreg; + + wl->initialized = true; + goto out; + +out_unreg: + wl1271_unregister_hw(wl); + +out_irq: + free_irq(wl->irq, wl); + +out_free_nvs: + kfree(wl->nvs); + +out: + release_firmware(fw); + complete_all(&wl->nvs_loading_complete); +} + +int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) +{ + int ret; + + if (!wl->ops || !wl->ptable) + return -EINVAL; + + wl->dev = &pdev->dev; + wl->pdev = pdev; + platform_set_drvdata(pdev, wl); + + ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL, + wl, wlcore_nvs_cb); + if (ret < 0) { + wl1271_error("request_firmware_nowait failed: %d", ret); + complete_all(&wl->nvs_loading_complete); + } + + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_probe); + +int wlcore_remove(struct platform_device *pdev) +{ + struct wl1271 *wl = platform_get_drvdata(pdev); + + wait_for_completion(&wl->nvs_loading_complete); + if (!wl->initialized) + return 0; + + if (wl->irq_wake_enabled) { + device_init_wakeup(wl->dev, 0); + disable_irq_wake(wl->irq); + } + wl1271_unregister_hw(wl); + free_irq(wl->irq, wl); + wlcore_free_hw(wl); + + return 0; +} +EXPORT_SYMBOL_GPL(wlcore_remove); + +u32 wl12xx_debug_level = DEBUG_NONE; +EXPORT_SYMBOL_GPL(wl12xx_debug_level); +module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); + +module_param_named(fwlog, fwlog_param, charp, 0); +MODULE_PARM_DESC(fwlog, + "FW logger options: continuous, ondemand, dbgpins or disable"); + +module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks"); + +module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); + +module_param(no_recovery, int, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Luciano Coelho "); +MODULE_AUTHOR("Juuso Oikarinen "); +/*(DEBLOBBED)*/ diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c new file mode 100644 index 000000000..4cd316e61 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -0,0 +1,332 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "ps.h" +#include "io.h" +#include "tx.h" +#include "debug.h" + +#define WL1271_WAKEUP_TIMEOUT 500 + +#define ELP_ENTRY_DELAY 30 +#define ELP_ENTRY_DELAY_FORCE_PS 5 + +void wl1271_elp_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct wl12xx_vif *wlvif; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, elp_work); + + wl1271_debug(DEBUG_PSM, "elp work"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + /* our work might have been already cancelled */ + if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) + goto out; + + if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + goto out; + + wl12xx_for_each_wlvif(wl, wlvif) { + if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + goto out; + } + + wl1271_debug(DEBUG_PSM, "chip to elp"); + ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } + + set_bit(WL1271_FLAG_IN_ELP, &wl->flags); + +out: + mutex_unlock(&wl->mutex); +} + +/* Routines to toggle sleep mode while in ELP */ +void wl1271_ps_elp_sleep(struct wl1271 *wl) +{ + struct wl12xx_vif *wlvif; + u32 timeout; + + /* We do not enter elp sleep in PLT mode */ + if (wl->plt) + return; + + if (wl->sleep_auth != WL1271_PSM_ELP) + return; + + /* we shouldn't get consecutive sleep requests */ + if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) + return; + + wl12xx_for_each_wlvif(wl, wlvif) { + if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + return; + } + + timeout = wl->conf.conn.forced_ps ? + ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY; + ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, + msecs_to_jiffies(timeout)); +} +EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep); + +int wl1271_ps_elp_wakeup(struct wl1271 *wl) +{ + DECLARE_COMPLETION_ONSTACK(compl); + unsigned long flags; + int ret; + unsigned long start_time = jiffies; + bool pending = false; + + /* + * we might try to wake up even if we didn't go to sleep + * before (e.g. on boot) + */ + if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)) + return 0; + + /* don't cancel_sync as it might contend for a mutex and deadlock */ + cancel_delayed_work(&wl->elp_work); + + if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + return 0; + + wl1271_debug(DEBUG_PSM, "waking up chip from elp"); + + /* + * The spinlock is required here to synchronize both the work and + * the completion variable in one entity. + */ + spin_lock_irqsave(&wl->wl_lock, flags); + if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) + pending = true; + else + wl->elp_compl = &compl; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto err; + } + + if (!pending) { + ret = wait_for_completion_timeout( + &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); + if (ret == 0) { + wl1271_error("ELP wakeup timeout!"); + wl12xx_queue_recovery_work(wl); + ret = -ETIMEDOUT; + goto err; + } + } + + clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start_time)); + goto out; + +err: + spin_lock_irqsave(&wl->wl_lock, flags); + wl->elp_compl = NULL; + spin_unlock_irqrestore(&wl->wl_lock, flags); + return ret; + +out: + return 0; +} +EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup); + +int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum wl1271_cmd_ps_mode mode) +{ + int ret; + u16 timeout = wl->conf.conn.dynamic_ps_timeout; + + switch (mode) { + case STATION_AUTO_PS_MODE: + case STATION_POWER_SAVE_MODE: + wl1271_debug(DEBUG_PSM, "entering psm (mode=%d,timeout=%u)", + mode, timeout); + + ret = wl1271_acx_wake_up_conditions(wl, wlvif, + wl->conf.conn.wake_up_event, + wl->conf.conn.listen_interval); + if (ret < 0) { + wl1271_error("couldn't set wake up conditions"); + return ret; + } + + ret = wl1271_cmd_ps_mode(wl, wlvif, mode, timeout); + if (ret < 0) + return ret; + + set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags); + + /* + * enable beacon early termination. + * Not relevant for 5GHz and for high rates. + */ + if ((wlvif->band == IEEE80211_BAND_2GHZ) && + (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { + ret = wl1271_acx_bet_enable(wl, wlvif, true); + if (ret < 0) + return ret; + } + break; + case STATION_ACTIVE_MODE: + wl1271_debug(DEBUG_PSM, "leaving psm"); + + /* disable beacon early termination */ + if ((wlvif->band == IEEE80211_BAND_2GHZ) && + (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) { + ret = wl1271_acx_bet_enable(wl, wlvif, false); + if (ret < 0) + return ret; + } + + ret = wl1271_cmd_ps_mode(wl, wlvif, mode, 0); + if (ret < 0) + return ret; + + clear_bit(WLVIF_FLAG_IN_PS, &wlvif->flags); + break; + default: + wl1271_warning("trying to set ps to unsupported mode %d", mode); + ret = -EINVAL; + } + + return ret; +} + +static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) +{ + int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + unsigned long flags; + int filtered[NUM_TX_QUEUES]; + struct wl1271_link *lnk = &wl->links[hlid]; + + /* filter all frames currently in the low level queues for this hlid */ + for (i = 0; i < NUM_TX_QUEUES; i++) { + filtered[i] = 0; + while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { + filtered[i]++; + + if (WARN_ON(wl12xx_is_dummy_packet(wl, skb))) + continue; + + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + info->status.rates[0].idx = -1; + ieee80211_tx_status_ni(wl->hw, skb); + } + } + + spin_lock_irqsave(&wl->wl_lock, flags); + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->tx_queue_count[i] -= filtered[i]; + if (lnk->wlvif) + lnk->wlvif->tx_queue_count[i] -= filtered[i]; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl1271_handle_tx_low_watermark(wl); +} + +void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid, bool clean_queues) +{ + struct ieee80211_sta *sta; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS)) + return; + + if (!test_bit(hlid, wlvif->ap.sta_hlid_map) || + test_bit(hlid, &wl->ap_ps_map)) + return; + + wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d " + "clean_queues %d", hlid, wl->links[hlid].allocated_pkts, + clean_queues); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, wl->links[hlid].addr); + if (!sta) { + wl1271_error("could not find sta %pM for starting ps", + wl->links[hlid].addr); + rcu_read_unlock(); + return; + } + + ieee80211_sta_ps_transition_ni(sta, true); + rcu_read_unlock(); + + /* do we want to filter all frames from this link's queues? */ + if (clean_queues) + wl1271_ps_filter_frames(wl, hlid); + + __set_bit(hlid, &wl->ap_ps_map); +} + +void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) +{ + struct ieee80211_sta *sta; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + + if (!test_bit(hlid, &wl->ap_ps_map)) + return; + + wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid); + + __clear_bit(hlid, &wl->ap_ps_map); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, wl->links[hlid].addr); + if (!sta) { + wl1271_error("could not find sta %pM for ending ps", + wl->links[hlid].addr); + goto end; + } + + ieee80211_sta_ps_transition_ni(sta, false); +end: + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h new file mode 100644 index 000000000..de4f9da8e --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/ps.h @@ -0,0 +1,41 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __PS_H__ +#define __PS_H__ + +#include "wlcore.h" +#include "acx.h" + +int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, + enum wl1271_cmd_ps_mode mode); +void wl1271_ps_elp_sleep(struct wl1271 *wl); +int wl1271_ps_elp_wakeup(struct wl1271 *wl); +void wl1271_elp_work(struct work_struct *work); +void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 hlid, bool clean_queues); +void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); + +#define WL1271_PS_COMPLETE_TIMEOUT 500 + +#endif /* __WL1271_PS_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c new file mode 100644 index 000000000..e12597428 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -0,0 +1,342 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "acx.h" +#include "rx.h" +#include "tx.h" +#include "io.h" +#include "hw_ops.h" + +/* + * TODO: this is here just for now, it must be removed when the data + * operations are in place. + */ +#include "../wl12xx/reg.h" + +static u32 wlcore_rx_get_buf_size(struct wl1271 *wl, + u32 rx_pkt_desc) +{ + if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) + return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >> + ALIGNED_RX_BUF_SIZE_SHIFT; + + return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; +} + +static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len) +{ + if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) + return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE); + + return pkt_len; +} + +static void wl1271_rx_status(struct wl1271 *wl, + struct wl1271_rx_descriptor *desc, + struct ieee80211_rx_status *status, + u8 beacon) +{ + memset(status, 0, sizeof(struct ieee80211_rx_status)); + + if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) + status->band = IEEE80211_BAND_2GHZ; + else + status->band = IEEE80211_BAND_5GHZ; + + status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); + + /* 11n support */ + if (desc->rate <= wl->hw_min_ht_rate) + status->flag |= RX_FLAG_HT; + + status->signal = desc->rssi; + + /* + * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we + * need to divide by two for now, but TI has been discussing about + * changing it. This needs to be rechecked. + */ + wl->noise = desc->rssi - (desc->snr >> 1); + + status->freq = ieee80211_channel_to_frequency(desc->channel, + status->band); + + if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { + u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK; + + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED | + RX_FLAG_DECRYPTED; + + if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) { + status->flag |= RX_FLAG_MMIC_ERROR; + wl1271_warning("Michael MIC error. Desc: 0x%x", + desc_err_code); + } + } + + if (beacon) + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, + status->band); +} + +static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, + enum wl_rx_buf_align rx_align, u8 *hlid) +{ + struct wl1271_rx_descriptor *desc; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + u8 *buf; + u8 beacon = 0; + u8 is_data = 0; + u8 reserved = 0, offset_to_data = 0; + u16 seq_num; + u32 pkt_data_len; + + /* + * In PLT mode we seem to get frames and mac80211 warns about them, + * workaround this by not retrieving them at all. + */ + if (unlikely(wl->plt)) + return -EINVAL; + + pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length); + if (!pkt_data_len) { + wl1271_error("Invalid packet arrived from HW. length %d", + length); + return -EINVAL; + } + + if (rx_align == WLCORE_RX_BUF_UNALIGNED) + reserved = RX_BUF_ALIGN; + else if (rx_align == WLCORE_RX_BUF_PADDED) + offset_to_data = RX_BUF_ALIGN; + + /* the data read starts with the descriptor */ + desc = (struct wl1271_rx_descriptor *) data; + + if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { + size_t len = length - sizeof(*desc); + wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); + wake_up_interruptible(&wl->fwlog_waitq); + return 0; + } + + /* discard corrupted packets */ + if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) { + hdr = (void *)(data + sizeof(*desc) + offset_to_data); + wl1271_warning("corrupted packet in RX: status: 0x%x len: %d", + desc->status & WL1271_RX_DESC_STATUS_MASK, + pkt_data_len); + wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc), + min(pkt_data_len, + ieee80211_hdrlen(hdr->frame_control))); + return -EINVAL; + } + + /* skb length not including rx descriptor */ + skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL); + if (!skb) { + wl1271_error("Couldn't allocate RX frame"); + return -ENOMEM; + } + + /* reserve the unaligned payload(if any) */ + skb_reserve(skb, reserved); + + buf = skb_put(skb, pkt_data_len); + + /* + * Copy packets from aggregation buffer to the skbs without rx + * descriptor and with packet payload aligned care. In case of unaligned + * packets copy the packets in offset of 2 bytes guarantee IP header + * payload aligned to 4 bytes. + */ + memcpy(buf, data + sizeof(*desc), pkt_data_len); + if (rx_align == WLCORE_RX_BUF_PADDED) + skb_pull(skb, RX_BUF_ALIGN); + + *hlid = desc->hlid; + + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_beacon(hdr->frame_control)) + beacon = 1; + if (ieee80211_is_data_present(hdr->frame_control)) + is_data = 1; + + wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); + wlcore_hw_set_rx_csum(wl, desc, skb); + + seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb, + skb->len - desc->pad_len, + beacon ? "beacon" : "", + seq_num, *hlid); + + skb_queue_tail(&wl->deferred_rx_queue, skb); + queue_work(wl->freezable_wq, &wl->netstack_work); + + return is_data; +} + +int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status) +{ + unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; + u32 buf_size; + u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc; + u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc; + u32 rx_counter; + u32 pkt_len, align_pkt_len; + u32 pkt_offset, des; + u8 hlid; + enum wl_rx_buf_align rx_align; + int ret = 0; + + while (drv_rx_counter != fw_rx_counter) { + buf_size = 0; + rx_counter = drv_rx_counter; + while (rx_counter != fw_rx_counter) { + des = le32_to_cpu(status->rx_pkt_descs[rx_counter]); + pkt_len = wlcore_rx_get_buf_size(wl, des); + align_pkt_len = wlcore_rx_get_align_buf_size(wl, + pkt_len); + if (buf_size + align_pkt_len > wl->aggr_buf_size) + break; + buf_size += align_pkt_len; + rx_counter++; + rx_counter %= wl->num_rx_desc; + } + + if (buf_size == 0) { + wl1271_warning("received empty data"); + break; + } + + /* Read all available packets at once */ + des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); + ret = wlcore_hw_prepare_read(wl, des, buf_size); + if (ret < 0) + goto out; + + ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_size, true); + if (ret < 0) + goto out; + + /* Split data into separate packets */ + pkt_offset = 0; + while (pkt_offset < buf_size) { + des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]); + pkt_len = wlcore_rx_get_buf_size(wl, des); + rx_align = wlcore_hw_get_rx_buf_align(wl, des); + + /* + * the handle data call can only fail in memory-outage + * conditions, in that case the received frame will just + * be dropped. + */ + if (wl1271_rx_handle_data(wl, + wl->aggr_buf + pkt_offset, + pkt_len, rx_align, + &hlid) == 1) { + if (hlid < wl->num_links) + __set_bit(hlid, active_hlids); + else + WARN(1, + "hlid (%d) exceeded MAX_LINKS\n", + hlid); + } + + wl->rx_counter++; + drv_rx_counter++; + drv_rx_counter %= wl->num_rx_desc; + pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len); + } + } + + /* + * Write the driver's packet counter to the FW. This is only required + * for older hardware revisions + */ + if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { + ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER, + wl->rx_counter); + if (ret < 0) + goto out; + } + + wl12xx_rearm_rx_streaming(wl, active_hlids); + +out: + return ret; +} + +#ifdef CONFIG_PM +int wl1271_rx_filter_enable(struct wl1271 *wl, + int index, bool enable, + struct wl12xx_rx_filter *filter) +{ + int ret; + + if (!!test_bit(index, wl->rx_filter_enabled) == enable) { + wl1271_warning("Request to enable an already " + "enabled rx filter %d", index); + return 0; + } + + ret = wl1271_acx_set_rx_filter(wl, index, enable, filter); + + if (ret) { + wl1271_error("Failed to %s rx data filter %d (err=%d)", + enable ? "enable" : "disable", index, ret); + return ret; + } + + if (enable) + __set_bit(index, wl->rx_filter_enabled); + else + __clear_bit(index, wl->rx_filter_enabled); + + return 0; +} + +int wl1271_rx_filter_clear_all(struct wl1271 *wl) +{ + int i, ret = 0; + + for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) { + if (!test_bit(i, wl->rx_filter_enabled)) + continue; + ret = wl1271_rx_filter_enable(wl, i, 0, NULL); + if (ret) + goto out; + } + +out: + return ret; +} +#endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h new file mode 100644 index 000000000..a3b1618db --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -0,0 +1,152 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __RX_H__ +#define __RX_H__ + +#include + +#define WL1271_RX_MAX_RSSI -30 +#define WL1271_RX_MIN_RSSI -95 + +#define SHORT_PREAMBLE_BIT BIT(0) +#define OFDM_RATE_BIT BIT(6) +#define PBCC_RATE_BIT BIT(7) + +#define PLCP_HEADER_LENGTH 8 +#define RX_DESC_PACKETID_SHIFT 11 +#define RX_MAX_PACKET_ID 3 + +#define RX_DESC_VALID_FCS 0x0001 +#define RX_DESC_MATCH_RXADDR1 0x0002 +#define RX_DESC_MCAST 0x0004 +#define RX_DESC_STAINTIM 0x0008 +#define RX_DESC_VIRTUAL_BM 0x0010 +#define RX_DESC_BCAST 0x0020 +#define RX_DESC_MATCH_SSID 0x0040 +#define RX_DESC_MATCH_BSSID 0x0080 +#define RX_DESC_ENCRYPTION_MASK 0x0300 +#define RX_DESC_MEASURMENT 0x0400 +#define RX_DESC_SEQNUM_MASK 0x1800 +#define RX_DESC_MIC_FAIL 0x2000 +#define RX_DESC_DECRYPT_FAIL 0x4000 + +/* + * RX Descriptor flags: + * + * Bits 0-1 - band + * Bit 2 - STBC + * Bit 3 - A-MPDU + * Bit 4 - HT + * Bits 5-7 - encryption + */ +#define WL1271_RX_DESC_BAND_MASK 0x03 +#define WL1271_RX_DESC_ENCRYPT_MASK 0xE0 + +#define WL1271_RX_DESC_BAND_BG 0x00 +#define WL1271_RX_DESC_BAND_J 0x01 +#define WL1271_RX_DESC_BAND_A 0x02 + +#define WL1271_RX_DESC_STBC BIT(2) +#define WL1271_RX_DESC_A_MPDU BIT(3) +#define WL1271_RX_DESC_HT BIT(4) + +#define WL1271_RX_DESC_ENCRYPT_WEP 0x20 +#define WL1271_RX_DESC_ENCRYPT_TKIP 0x40 +#define WL1271_RX_DESC_ENCRYPT_AES 0x60 +#define WL1271_RX_DESC_ENCRYPT_GEM 0x80 + +/* + * RX Descriptor status + * + * Bits 0-2 - error code + * Bits 3-5 - process_id tag (AP mode FW) + * Bits 6-7 - reserved + */ +#define WL1271_RX_DESC_STATUS_MASK 0x07 + +#define WL1271_RX_DESC_SUCCESS 0x00 +#define WL1271_RX_DESC_DECRYPT_FAIL 0x01 +#define WL1271_RX_DESC_MIC_FAIL 0x02 + +#define RX_MEM_BLOCK_MASK 0xFF +#define RX_BUF_SIZE_MASK 0xFFF00 +#define RX_BUF_SIZE_SHIFT_DIV 6 +#define ALIGNED_RX_BUF_SIZE_MASK 0xFFFF00 +#define ALIGNED_RX_BUF_SIZE_SHIFT 8 + +/* If set, the start of IP payload is not 4 bytes aligned */ +#define RX_BUF_UNALIGNED_PAYLOAD BIT(20) + +/* If set, the buffer was padded by the FW to be 4 bytes aligned */ +#define RX_BUF_PADDED_PAYLOAD BIT(30) + +/* + * Account for the padding inserted by the FW in case of RX_ALIGNMENT + * or for fixing alignment in case the packet wasn't aligned. + */ +#define RX_BUF_ALIGN 2 + +/* Describes the alignment state of a Rx buffer */ +enum wl_rx_buf_align { + WLCORE_RX_BUF_ALIGNED, + WLCORE_RX_BUF_UNALIGNED, + WLCORE_RX_BUF_PADDED, +}; + +enum { + WL12XX_RX_CLASS_UNKNOWN, + WL12XX_RX_CLASS_MANAGEMENT, + WL12XX_RX_CLASS_DATA, + WL12XX_RX_CLASS_QOS_DATA, + WL12XX_RX_CLASS_BCN_PRBRSP, + WL12XX_RX_CLASS_EAPOL, + WL12XX_RX_CLASS_BA_EVENT, + WL12XX_RX_CLASS_AMSDU, + WL12XX_RX_CLASS_LOGGER, +}; + +struct wl1271_rx_descriptor { + __le16 length; + u8 status; + u8 flags; + u8 rate; + u8 channel; + s8 rssi; + u8 snr; + __le32 timestamp; + u8 packet_class; + u8 hlid; + u8 pad_len; + u8 reserved; +} __packed; + +int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status); +u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); +int wl1271_rx_filter_enable(struct wl1271 *wl, + int index, bool enable, + struct wl12xx_rx_filter *filter); +int wl1271_rx_filter_clear_all(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c new file mode 100644 index 000000000..1e3d51cd6 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -0,0 +1,488 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include + +#include "wlcore.h" +#include "debug.h" +#include "cmd.h" +#include "scan.h" +#include "acx.h" +#include "ps.h" +#include "tx.h" + +void wl1271_scan_complete_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + struct wl12xx_vif *wlvif; + int ret; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, scan_complete_work); + + wl1271_debug(DEBUG_SCAN, "Scanning complete"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + if (wl->scan.state == WL1271_SCAN_STATE_IDLE) + goto out; + + wlvif = wl->scan_wlvif; + + /* + * Rearm the tx watchdog just before idling scan. This + * prevents just-finished scans from triggering the watchdog + */ + wl12xx_rearm_tx_watchdog_locked(wl); + + wl->scan.state = WL1271_SCAN_STATE_IDLE; + memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + wl->scan.req = NULL; + wl->scan_wlvif = NULL; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { + /* restore hardware connection monitoring template */ + wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); + } + + wl1271_ps_elp_sleep(wl); + + if (wl->scan.failed) { + wl1271_info("Scan completed due to error."); + wl12xx_queue_recovery_work(wl); + } + + wlcore_cmd_regdomain_config_locked(wl); + + ieee80211_scan_completed(wl->hw, false); + +out: + mutex_unlock(&wl->mutex); + +} + +static void wlcore_started_vifs_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + bool active = false; + int *count = (int *)data; + + /* + * count active interfaces according to interface type. + * checking only bss_conf.idle is bad for some cases, e.g. + * we don't want to count sta in p2p_find as active interface. + */ + switch (wlvif->bss_type) { + case BSS_TYPE_STA_BSS: + if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) + active = true; + break; + + case BSS_TYPE_AP_BSS: + if (wlvif->wl->active_sta_count > 0) + active = true; + break; + + default: + break; + } + + if (active) + (*count)++; +} + +static int wlcore_count_started_vifs(struct wl1271 *wl) +{ + int count = 0; + + ieee80211_iterate_active_interfaces_atomic(wl->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + wlcore_started_vifs_iter, &count); + return count; +} + +static int +wlcore_scan_get_channels(struct wl1271 *wl, + struct ieee80211_channel *req_channels[], + u32 n_channels, + u32 n_ssids, + struct conn_scan_ch_params *channels, + u32 band, bool radar, bool passive, + int start, int max_channels, + u8 *n_pactive_ch, + int scan_type) +{ + int i, j; + u32 flags; + bool force_passive = !n_ssids; + u32 min_dwell_time_active, max_dwell_time_active; + u32 dwell_time_passive, dwell_time_dfs; + + /* configure dwell times according to scan type */ + if (scan_type == SCAN_TYPE_SEARCH) { + struct conf_scan_settings *c = &wl->conf.scan; + bool active_vif_exists = !!wlcore_count_started_vifs(wl); + + min_dwell_time_active = active_vif_exists ? + c->min_dwell_time_active : + c->min_dwell_time_active_long; + max_dwell_time_active = active_vif_exists ? + c->max_dwell_time_active : + c->max_dwell_time_active_long; + dwell_time_passive = c->dwell_time_passive; + dwell_time_dfs = c->dwell_time_dfs; + } else { + struct conf_sched_scan_settings *c = &wl->conf.sched_scan; + u32 delta_per_probe; + + if (band == IEEE80211_BAND_5GHZ) + delta_per_probe = c->dwell_time_delta_per_probe_5; + else + delta_per_probe = c->dwell_time_delta_per_probe; + + min_dwell_time_active = c->base_dwell_time + + n_ssids * c->num_probe_reqs * delta_per_probe; + + max_dwell_time_active = min_dwell_time_active + + c->max_dwell_time_delta; + dwell_time_passive = c->dwell_time_passive; + dwell_time_dfs = c->dwell_time_dfs; + } + min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000); + max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000); + dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000); + dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000); + + for (i = 0, j = start; + i < n_channels && j < max_channels; + i++) { + flags = req_channels[i]->flags; + + if (force_passive) + flags |= IEEE80211_CHAN_NO_IR; + + if ((req_channels[i]->band == band) && + !(flags & IEEE80211_CHAN_DISABLED) && + (!!(flags & IEEE80211_CHAN_RADAR) == radar) && + /* if radar is set, we ignore the passive flag */ + (radar || + !!(flags & IEEE80211_CHAN_NO_IR) == passive)) { + if (flags & IEEE80211_CHAN_RADAR) { + channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; + + channels[j].passive_duration = + cpu_to_le16(dwell_time_dfs); + } else { + channels[j].passive_duration = + cpu_to_le16(dwell_time_passive); + } + + channels[j].min_duration = + cpu_to_le16(min_dwell_time_active); + channels[j].max_duration = + cpu_to_le16(max_dwell_time_active); + + channels[j].tx_power_att = req_channels[i]->max_power; + channels[j].channel = req_channels[i]->hw_value; + + if (n_pactive_ch && + (band == IEEE80211_BAND_2GHZ) && + (channels[j].channel >= 12) && + (channels[j].channel <= 14) && + (flags & IEEE80211_CHAN_NO_IR) && + !force_passive) { + /* pactive channels treated as DFS */ + channels[j].flags = SCAN_CHANNEL_FLAGS_DFS; + + /* + * n_pactive_ch is counted down from the end of + * the passive channel list + */ + (*n_pactive_ch)++; + wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d", + *n_pactive_ch); + } + + wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s", + req_channels[i]->center_freq, + req_channels[i]->hw_value, + req_channels[i]->flags, + req_channels[i]->max_power, + min_dwell_time_active, + max_dwell_time_active, + flags & IEEE80211_CHAN_RADAR ? + ", DFS" : "", + flags & IEEE80211_CHAN_NO_IR ? + ", NO-IR" : ""); + j++; + } + } + + return j - start; +} + +bool +wlcore_set_scan_chan_params(struct wl1271 *wl, + struct wlcore_scan_channels *cfg, + struct ieee80211_channel *channels[], + u32 n_channels, + u32 n_ssids, + int scan_type) +{ + u8 n_pactive_ch = 0; + + cfg->passive[0] = + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_2, + IEEE80211_BAND_2GHZ, + false, true, 0, + MAX_CHANNELS_2GHZ, + &n_pactive_ch, + scan_type); + cfg->active[0] = + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_2, + IEEE80211_BAND_2GHZ, + false, false, + cfg->passive[0], + MAX_CHANNELS_2GHZ, + &n_pactive_ch, + scan_type); + cfg->passive[1] = + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + false, true, 0, + wl->max_channels_5, + &n_pactive_ch, + scan_type); + cfg->dfs = + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + true, true, + cfg->passive[1], + wl->max_channels_5, + &n_pactive_ch, + scan_type); + cfg->active[1] = + wlcore_scan_get_channels(wl, + channels, + n_channels, + n_ssids, + cfg->channels_5, + IEEE80211_BAND_5GHZ, + false, false, + cfg->passive[1] + cfg->dfs, + wl->max_channels_5, + &n_pactive_ch, + scan_type); + + /* 802.11j channels are not supported yet */ + cfg->passive[2] = 0; + cfg->active[2] = 0; + + cfg->passive_active = n_pactive_ch; + + wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", + cfg->active[0], cfg->passive[0]); + wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", + cfg->active[1], cfg->passive[1]); + wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs); + + return cfg->passive[0] || cfg->active[0] || + cfg->passive[1] || cfg->active[1] || cfg->dfs || + cfg->passive[2] || cfg->active[2]; +} +EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params); + +int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + struct cfg80211_scan_request *req) +{ + struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + + /* + * cfg80211 should guarantee that we don't get more channels + * than what we have registered. + */ + BUG_ON(req->n_channels > WL1271_MAX_CHANNELS); + + if (wl->scan.state != WL1271_SCAN_STATE_IDLE) + return -EBUSY; + + wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE; + + if (ssid_len && ssid) { + wl->scan.ssid_len = ssid_len; + memcpy(wl->scan.ssid, ssid, ssid_len); + } else { + wl->scan.ssid_len = 0; + } + + wl->scan_wlvif = wlvif; + wl->scan.req = req; + memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); + + /* we assume failure so that timeout scenarios are handled correctly */ + wl->scan.failed = true; + ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, + msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); + + wl->ops->scan_start(wl, wlvif, req); + + return 0; +} +/* Returns the scan type to be used or a negative value on error */ +int +wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req) +{ + struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL; + struct cfg80211_match_set *sets = req->match_sets; + struct cfg80211_ssid *ssids = req->ssids; + int ret = 0, type, i, j, n_match_ssids = 0; + + wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list"); + + /* count the match sets that contain SSIDs */ + for (i = 0; i < req->n_match_sets; i++) + if (sets[i].ssid.ssid_len > 0) + n_match_ssids++; + + /* No filter, no ssids or only bcast ssid */ + if (!n_match_ssids && + (!req->n_ssids || + (req->n_ssids == 1 && req->ssids[0].ssid_len == 0))) { + type = SCAN_SSID_FILTER_ANY; + goto out; + } + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->role_id = wlvif->role_id; + if (!n_match_ssids) { + /* No filter, with ssids */ + type = SCAN_SSID_FILTER_DISABLED; + + for (i = 0; i < req->n_ssids; i++) { + cmd->ssids[cmd->n_ssids].type = (ssids[i].ssid_len) ? + SCAN_SSID_TYPE_HIDDEN : SCAN_SSID_TYPE_PUBLIC; + cmd->ssids[cmd->n_ssids].len = ssids[i].ssid_len; + memcpy(cmd->ssids[cmd->n_ssids].ssid, ssids[i].ssid, + ssids[i].ssid_len); + cmd->n_ssids++; + } + } else { + type = SCAN_SSID_FILTER_LIST; + + /* Add all SSIDs from the filters */ + for (i = 0; i < req->n_match_sets; i++) { + /* ignore sets without SSIDs */ + if (!sets[i].ssid.ssid_len) + continue; + + cmd->ssids[cmd->n_ssids].type = SCAN_SSID_TYPE_PUBLIC; + cmd->ssids[cmd->n_ssids].len = sets[i].ssid.ssid_len; + memcpy(cmd->ssids[cmd->n_ssids].ssid, + sets[i].ssid.ssid, sets[i].ssid.ssid_len); + cmd->n_ssids++; + } + if ((req->n_ssids > 1) || + (req->n_ssids == 1 && req->ssids[0].ssid_len > 0)) { + /* + * Mark all the SSIDs passed in the SSID list as HIDDEN, + * so they're used in probe requests. + */ + for (i = 0; i < req->n_ssids; i++) { + if (!req->ssids[i].ssid_len) + continue; + + for (j = 0; j < cmd->n_ssids; j++) + if ((req->ssids[i].ssid_len == + cmd->ssids[j].len) && + !memcmp(req->ssids[i].ssid, + cmd->ssids[j].ssid, + req->ssids[i].ssid_len)) { + cmd->ssids[j].type = + SCAN_SSID_TYPE_HIDDEN; + break; + } + /* Fail if SSID isn't present in the filters */ + if (j == cmd->n_ssids) { + ret = -EINVAL; + goto out_free; + } + } + } + } + + ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd, + sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("cmd sched scan ssid list failed"); + goto out_free; + } + +out_free: + kfree(cmd); +out: + if (ret < 0) + return ret; + return type; +} +EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list); + +void wlcore_scan_sched_scan_results(struct wl1271 *wl) +{ + wl1271_debug(DEBUG_SCAN, "got periodic scan results"); + + ieee80211_sched_scan_results(wl->hw); +} +EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results); diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h new file mode 100644 index 000000000..4dadd0c62 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/scan.h @@ -0,0 +1,172 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __SCAN_H__ +#define __SCAN_H__ + +#include "wlcore.h" + +int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + struct cfg80211_scan_request *req); +int wl1271_scan_build_probe_req(struct wl1271 *wl, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len, u8 band); +void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl1271_scan_complete_work(struct work_struct *work); +int wl1271_scan_sched_scan_config(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); +int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wlcore_scan_sched_scan_results(struct wl1271 *wl); + +#define WL1271_SCAN_MAX_CHANNELS 24 +#define WL1271_SCAN_DEFAULT_TAG 1 +#define WL1271_SCAN_CURRENT_TX_PWR 0 +#define WL1271_SCAN_OPT_ACTIVE 0 +#define WL1271_SCAN_OPT_PASSIVE 1 +#define WL1271_SCAN_OPT_SPLIT_SCAN 2 +#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 +/* scan even if we fail to enter psm */ +#define WL1271_SCAN_OPT_FORCE 8 +#define WL1271_SCAN_BAND_2_4_GHZ 0 +#define WL1271_SCAN_BAND_5_GHZ 1 + +#define WL1271_SCAN_TIMEOUT 30000 /* msec */ + +enum { + WL1271_SCAN_STATE_IDLE, + WL1271_SCAN_STATE_2GHZ_ACTIVE, + WL1271_SCAN_STATE_2GHZ_PASSIVE, + WL1271_SCAN_STATE_5GHZ_ACTIVE, + WL1271_SCAN_STATE_5GHZ_PASSIVE, + WL1271_SCAN_STATE_DONE +}; + +struct wl1271_cmd_trigger_scan_to { + struct wl1271_cmd_header header; + + __le32 timeout; +} __packed; + +#define MAX_CHANNELS_2GHZ 14 +#define MAX_CHANNELS_4GHZ 4 + +/* + * This max value here is used only for the struct definition of + * wlcore_scan_channels. This struct is used by both 12xx + * and 18xx (which have different max 5ghz channels value). + * In order to make sure this is large enough, just use the + * max possible 5ghz channels. + */ +#define MAX_CHANNELS_5GHZ 42 + +#define SCAN_MAX_CYCLE_INTERVALS 16 +#define SCAN_MAX_BANDS 3 + +enum { + SCAN_SSID_FILTER_ANY = 0, + SCAN_SSID_FILTER_SPECIFIC = 1, + SCAN_SSID_FILTER_LIST = 2, + SCAN_SSID_FILTER_DISABLED = 3 +}; + +enum { + SCAN_BSS_TYPE_INDEPENDENT, + SCAN_BSS_TYPE_INFRASTRUCTURE, + SCAN_BSS_TYPE_ANY, +}; + +#define SCAN_CHANNEL_FLAGS_DFS BIT(0) /* channel is passive until an + activity is detected on it */ +#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1) + +struct conn_scan_ch_params { + __le16 min_duration; + __le16 max_duration; + __le16 passive_duration; + + u8 channel; + u8 tx_power_att; + + /* bit 0: DFS channel; bit 1: DFS enabled */ + u8 flags; + + u8 padding[3]; +} __packed; + +#define SCHED_SCAN_MAX_SSIDS 16 + +enum { + SCAN_SSID_TYPE_PUBLIC = 0, + SCAN_SSID_TYPE_HIDDEN = 1, +}; + +struct wl1271_ssid { + u8 type; + u8 len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + /* u8 padding[2]; */ +} __packed; + +struct wl1271_cmd_sched_scan_ssid_list { + struct wl1271_cmd_header header; + + u8 n_ssids; + struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS]; + u8 role_id; + u8 padding[2]; +} __packed; + +struct wlcore_scan_channels { + u8 passive[SCAN_MAX_BANDS]; /* number of passive scan channels */ + u8 active[SCAN_MAX_BANDS]; /* number of active scan channels */ + u8 dfs; /* number of dfs channels in 5ghz */ + u8 passive_active; /* number of passive before active channels 2.4ghz */ + + struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ]; + struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ]; + struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ]; +}; + +enum { + SCAN_TYPE_SEARCH = 0, + SCAN_TYPE_PERIODIC = 1, + SCAN_TYPE_TRACKING = 2, +}; + +bool +wlcore_set_scan_chan_params(struct wl1271 *wl, + struct wlcore_scan_channels *cfg, + struct ieee80211_channel *channels[], + u32 n_channels, + u32 n_ssids, + int scan_type); + +int +wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req); + +#endif /* __WL1271_SCAN_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c new file mode 100644 index 000000000..ea7e07abc --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -0,0 +1,458 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlcore.h" +#include "wl12xx_80211.h" +#include "io.h" + +#ifndef SDIO_VENDOR_ID_TI +#define SDIO_VENDOR_ID_TI 0x0097 +#endif + +#ifndef SDIO_DEVICE_ID_TI_WL1271 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#endif + +static bool dump = false; + +struct wl12xx_sdio_glue { + struct device *dev; + struct platform_device *core; +}; + +static const struct sdio_device_id wl1271_devices[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, + {} +}; +MODULE_DEVICE_TABLE(sdio, wl1271_devices); + +static void wl1271_sdio_set_block_size(struct device *child, + unsigned int blksz) +{ + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); + + sdio_claim_host(func); + sdio_set_block_size(func, blksz); + sdio_release_host(func); +} + +static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr, + void *buf, size_t len, bool fixed) +{ + int ret; + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); + + sdio_claim_host(func); + + if (unlikely(dump)) { + printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr); + print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ", + DUMP_PREFIX_OFFSET, 16, 1, + buf, len, false); + } + + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { + ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); + dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", + addr, ((u8 *)buf)[0]); + } else { + if (fixed) + ret = sdio_readsb(func, buf, addr, len); + else + ret = sdio_memcpy_fromio(func, buf, addr, len); + + dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n", + addr, len); + } + + sdio_release_host(func); + + if (WARN_ON(ret)) + dev_err(child->parent, "sdio read failed (%d)\n", ret); + + return ret; +} + +static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr, + void *buf, size_t len, bool fixed) +{ + int ret; + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + struct sdio_func *func = dev_to_sdio_func(glue->dev); + + sdio_claim_host(func); + + if (unlikely(dump)) { + printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr); + print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ", + DUMP_PREFIX_OFFSET, 16, 1, + buf, len, false); + } + + if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) { + sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); + dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", + addr, ((u8 *)buf)[0]); + } else { + dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n", + addr, len); + + if (fixed) + ret = sdio_writesb(func, addr, buf, len); + else + ret = sdio_memcpy_toio(func, addr, buf, len); + } + + sdio_release_host(func); + + if (WARN_ON(ret)) + dev_err(child->parent, "sdio write failed (%d)\n", ret); + + return ret; +} + +static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) +{ + int ret; + struct sdio_func *func = dev_to_sdio_func(glue->dev); + struct mmc_card *card = func->card; + + ret = pm_runtime_get_sync(&card->dev); + if (ret) { + /* + * Runtime PM might be temporarily disabled, or the device + * might have a positive reference counter. Make sure it is + * really powered on. + */ + ret = mmc_power_restore_host(card->host); + if (ret < 0) { + pm_runtime_put_sync(&card->dev); + goto out; + } + } + + sdio_claim_host(func); + sdio_enable_func(func); + sdio_release_host(func); + +out: + return ret; +} + +static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) +{ + int ret; + struct sdio_func *func = dev_to_sdio_func(glue->dev); + struct mmc_card *card = func->card; + + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + + /* Power off the card manually in case it wasn't powered off above */ + ret = mmc_power_save_host(card->host); + if (ret < 0) + goto out; + + /* Let runtime PM know the card is powered off */ + pm_runtime_put_sync(&card->dev); + +out: + return ret; +} + +static int wl12xx_sdio_set_power(struct device *child, bool enable) +{ + struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent); + + if (enable) + return wl12xx_sdio_power_on(glue); + else + return wl12xx_sdio_power_off(glue); +} + +static struct wl1271_if_operations sdio_ops = { + .read = wl12xx_sdio_raw_read, + .write = wl12xx_sdio_raw_write, + .power = wl12xx_sdio_set_power, + .set_block_size = wl1271_sdio_set_block_size, +}; + +#ifdef CONFIG_OF +static const struct of_device_id wlcore_sdio_of_match_table[] = { + { .compatible = "ti,wl1271" }, + { .compatible = "ti,wl1273" }, + { .compatible = "ti,wl1281" }, + { .compatible = "ti,wl1283" }, + { .compatible = "ti,wl1801" }, + { .compatible = "ti,wl1805" }, + { .compatible = "ti,wl1807" }, + { .compatible = "ti,wl1831" }, + { .compatible = "ti,wl1835" }, + { .compatible = "ti,wl1837" }, + { } +}; + +static int wlcore_probe_of(struct device *dev, int *irq, + struct wlcore_platdev_data *pdev_data) +{ + struct device_node *np = dev->of_node; + + if (!np || !of_match_node(wlcore_sdio_of_match_table, np)) + return -ENODATA; + + *irq = irq_of_parse_and_map(np, 0); + if (!*irq) { + dev_err(dev, "No irq in platform data\n"); + kfree(pdev_data); + return -EINVAL; + } + + /* optional clock frequency params */ + of_property_read_u32(np, "ref-clock-frequency", + &pdev_data->ref_clock_freq); + of_property_read_u32(np, "tcxo-clock-frequency", + &pdev_data->tcxo_clock_freq); + + return 0; +} +#else +static int wlcore_probe_of(struct device *dev, int *irq, + struct wlcore_platdev_data *pdev_data) +{ + return -ENODATA; +} +#endif + +static int wl1271_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct wlcore_platdev_data pdev_data; + struct wl12xx_sdio_glue *glue; + struct resource res[1]; + mmc_pm_flag_t mmcflags; + int ret = -ENOMEM; + int irq; + const char *chip_family; + + /* We are only able to handle the wlan function */ + if (func->num != 0x02) + return -ENODEV; + + memset(&pdev_data, 0x00, sizeof(pdev_data)); + pdev_data.if_ops = &sdio_ops; + + glue = kzalloc(sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&func->dev, "can't allocate glue\n"); + goto out; + } + + glue->dev = &func->dev; + + /* Grab access to FN0 for ELP reg. */ + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + /* Use block mode for transferring over one block size of data */ + func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; + + if (wlcore_probe_of(&func->dev, &irq, &pdev_data)) + goto out_free_glue; + + /* if sdio can keep power while host is suspended, enable wow */ + mmcflags = sdio_get_host_pm_caps(func); + dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); + + if (mmcflags & MMC_PM_KEEP_POWER) + pdev_data.pwr_in_suspend = true; + + sdio_set_drvdata(func, glue); + + /* Tell PM core that we don't need the card to be powered now */ + pm_runtime_put_noidle(&func->dev); + + /* + * Due to a hardware bug, we can't differentiate wl18xx from + * wl12xx, because both report the same device ID. The only + * way to differentiate is by checking the SDIO revision, + * which is 3.00 on the wl18xx chips. + */ + if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00) + chip_family = "wl18xx"; + else + chip_family = "wl12xx"; + + glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO); + if (!glue->core) { + dev_err(glue->dev, "can't allocate platform_device"); + ret = -ENOMEM; + goto out_free_glue; + } + + glue->core->dev.parent = &func->dev; + + memset(res, 0x00, sizeof(res)); + + res[0].start = irq; + res[0].flags = IORESOURCE_IRQ | + irqd_get_trigger_type(irq_get_irq_data(irq)); + res[0].name = "irq"; + + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); + if (ret) { + dev_err(glue->dev, "can't add resources\n"); + goto out_dev_put; + } + + ret = platform_device_add_data(glue->core, &pdev_data, + sizeof(pdev_data)); + if (ret) { + dev_err(glue->dev, "can't add platform data\n"); + goto out_dev_put; + } + + ret = platform_device_add(glue->core); + if (ret) { + dev_err(glue->dev, "can't add platform device\n"); + goto out_dev_put; + } + return 0; + +out_dev_put: + platform_device_put(glue->core); + +out_free_glue: + kfree(glue); + +out: + return ret; +} + +static void wl1271_remove(struct sdio_func *func) +{ + struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); + + /* Undo decrement done above in wl1271_probe */ + pm_runtime_get_noresume(&func->dev); + + platform_device_unregister(glue->core); + kfree(glue); +} + +#ifdef CONFIG_PM +static int wl1271_suspend(struct device *dev) +{ + /* Tell MMC/SDIO core it's OK to power down the card + * (if it isn't already), but not to remove it completely */ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); + struct wl1271 *wl = platform_get_drvdata(glue->core); + mmc_pm_flag_t sdio_flags; + int ret = 0; + + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", + wl->wow_enabled); + + /* check whether sdio should keep power */ + if (wl->wow_enabled) { + sdio_flags = sdio_get_host_pm_caps(func); + + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + dev_err(dev, "can't keep power while host " + "is suspended\n"); + ret = -EINVAL; + goto out; + } + + /* keep power while host suspended */ + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) { + dev_err(dev, "error while trying to keep power\n"); + goto out; + } + } +out: + return ret; +} + +static int wl1271_resume(struct device *dev) +{ + dev_dbg(dev, "wl1271 resume\n"); + + return 0; +} + +static const struct dev_pm_ops wl1271_sdio_pm_ops = { + .suspend = wl1271_suspend, + .resume = wl1271_resume, +}; +#endif + +static struct sdio_driver wl1271_sdio_driver = { + .name = "wl1271_sdio", + .id_table = wl1271_devices, + .probe = wl1271_probe, + .remove = wl1271_remove, +#ifdef CONFIG_PM + .drv = { + .pm = &wl1271_sdio_pm_ops, + }, +#endif +}; + +static int __init wl1271_init(void) +{ + return sdio_register_driver(&wl1271_sdio_driver); +} + +static void __exit wl1271_exit(void) +{ + sdio_unregister_driver(&wl1271_sdio_driver); +} + +module_init(wl1271_init); +module_exit(wl1271_exit); + +module_param(dump, bool, S_IRUSR | S_IWUSR); +MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Luciano Coelho "); +MODULE_AUTHOR("Juuso Oikarinen "); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c new file mode 100644 index 000000000..f1ac2839d --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -0,0 +1,422 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wlcore.h" +#include "wl12xx_80211.h" +#include "io.h" + +#define WSPI_CMD_READ 0x40000000 +#define WSPI_CMD_WRITE 0x00000000 +#define WSPI_CMD_FIXED 0x20000000 +#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000 +#define WSPI_CMD_BYTE_LENGTH_OFFSET 17 +#define WSPI_CMD_BYTE_ADDR 0x0001FFFF + +#define WSPI_INIT_CMD_CRC_LEN 5 + +#define WSPI_INIT_CMD_START 0x00 +#define WSPI_INIT_CMD_TX 0x40 +/* the extra bypass bit is sampled by the TNET as '1' */ +#define WSPI_INIT_CMD_BYPASS_BIT 0x80 +#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07 +#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80 +#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00 +#define WSPI_INIT_CMD_IOD 0x40 +#define WSPI_INIT_CMD_IP 0x20 +#define WSPI_INIT_CMD_CS 0x10 +#define WSPI_INIT_CMD_WS 0x08 +#define WSPI_INIT_CMD_WSPI 0x01 +#define WSPI_INIT_CMD_END 0x01 + +#define WSPI_INIT_CMD_LEN 8 + +#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \ + ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) +#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 + +/* HW limitation: maximum possible chunk size is 4095 bytes */ +#define WSPI_MAX_CHUNK_SIZE 4092 + +/* + * only support SPI for 12xx - this code should be reworked when 18xx + * support is introduced + */ +#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) + +#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + +struct wl12xx_spi_glue { + struct device *dev; + struct platform_device *core; +}; + +static void wl12xx_spi_reset(struct device *child) +{ + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + u8 *cmd; + struct spi_transfer t; + struct spi_message m; + + cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + if (!cmd) { + dev_err(child->parent, + "could not allocate cmd for spi reset\n"); + return; + } + + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + memset(cmd, 0xff, WSPI_INIT_CMD_LEN); + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(to_spi_device(glue->dev), &m); + + kfree(cmd); +} + +static void wl12xx_spi_init(struct device *child) +{ + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct spi_transfer t; + struct spi_message m; + u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + + if (!cmd) { + dev_err(child->parent, + "could not allocate cmd for spi init\n"); + return; + } + + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + /* + * Set WSPI_INIT_COMMAND + * the data is being send from the MSB to LSB + */ + cmd[0] = 0xff; + cmd[1] = 0xff; + cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; + cmd[3] = 0; + cmd[4] = 0; + cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3; + cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; + + cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS + | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; + + if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) + cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; + else + cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY; + + cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END; + /* + * The above is the logical order; it must actually be stored + * in the buffer byte-swapped. + */ + __swab32s((u32 *)cmd); + __swab32s((u32 *)cmd+1); + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(to_spi_device(glue->dev), &m); + kfree(cmd); +} + +#define WL1271_BUSY_WORD_TIMEOUT 1000 + +static int wl12xx_spi_read_busy(struct device *child) +{ + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct wl1271 *wl = dev_get_drvdata(child); + struct spi_transfer t[1]; + struct spi_message m; + u32 *busy_buf; + int num_busy_bytes = 0; + + /* + * Read further busy words from SPI until a non-busy word is + * encountered, then read the data itself into the buffer. + */ + + num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT; + busy_buf = wl->buffer_busyword; + while (num_busy_bytes) { + num_busy_bytes--; + spi_message_init(&m); + memset(t, 0, sizeof(t)); + t[0].rx_buf = busy_buf; + t[0].len = sizeof(u32); + t[0].cs_change = true; + spi_message_add_tail(&t[0], &m); + spi_sync(to_spi_device(glue->dev), &m); + + if (*busy_buf & 0x1) + return 0; + } + + /* The SPI bus is unresponsive, the read failed. */ + dev_err(child->parent, "SPI read busy-word timeout!\n"); + return -ETIMEDOUT; +} + +static int __must_check wl12xx_spi_raw_read(struct device *child, int addr, + void *buf, size_t len, bool fixed) +{ + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct wl1271 *wl = dev_get_drvdata(child); + struct spi_transfer t[2]; + struct spi_message m; + u32 *busy_buf; + u32 *cmd; + u32 chunk_len; + + while (len > 0) { + chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len); + + cmd = &wl->buffer_cmd; + busy_buf = wl->buffer_busyword; + + *cmd = 0; + *cmd |= WSPI_CMD_READ; + *cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) & + WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + if (fixed) + *cmd |= WSPI_CMD_FIXED; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = 4; + t[0].cs_change = true; + spi_message_add_tail(&t[0], &m); + + /* Busy and non busy words read */ + t[1].rx_buf = busy_buf; + t[1].len = WL1271_BUSY_WORD_LEN; + t[1].cs_change = true; + spi_message_add_tail(&t[1], &m); + + spi_sync(to_spi_device(glue->dev), &m); + + if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && + wl12xx_spi_read_busy(child)) { + memset(buf, 0, chunk_len); + return 0; + } + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].rx_buf = buf; + t[0].len = chunk_len; + t[0].cs_change = true; + spi_message_add_tail(&t[0], &m); + + spi_sync(to_spi_device(glue->dev), &m); + + if (!fixed) + addr += chunk_len; + buf += chunk_len; + len -= chunk_len; + } + + return 0; +} + +static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, + void *buf, size_t len, bool fixed) +{ + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)]; + struct spi_message m; + u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; + u32 *cmd; + u32 chunk_len; + int i; + + WARN_ON(len > SPI_AGGR_BUFFER_SIZE); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + cmd = &commands[0]; + i = 0; + while (len > 0) { + chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len); + + *cmd = 0; + *cmd |= WSPI_CMD_WRITE; + *cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) & + WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + if (fixed) + *cmd |= WSPI_CMD_FIXED; + + t[i].tx_buf = cmd; + t[i].len = sizeof(*cmd); + spi_message_add_tail(&t[i++], &m); + + t[i].tx_buf = buf; + t[i].len = chunk_len; + spi_message_add_tail(&t[i++], &m); + + if (!fixed) + addr += chunk_len; + buf += chunk_len; + len -= chunk_len; + cmd++; + } + + spi_sync(to_spi_device(glue->dev), &m); + + return 0; +} + +static struct wl1271_if_operations spi_ops = { + .read = wl12xx_spi_raw_read, + .write = wl12xx_spi_raw_write, + .reset = wl12xx_spi_reset, + .init = wl12xx_spi_init, + .set_block_size = NULL, +}; + +static int wl1271_probe(struct spi_device *spi) +{ + struct wl12xx_spi_glue *glue; + struct wlcore_platdev_data pdev_data; + struct resource res[1]; + int ret; + + memset(&pdev_data, 0x00, sizeof(pdev_data)); + + /* TODO: add DT parsing when needed */ + + pdev_data.if_ops = &spi_ops; + + glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); + if (!glue) { + dev_err(&spi->dev, "can't allocate glue\n"); + return -ENOMEM; + } + + glue->dev = &spi->dev; + + spi_set_drvdata(spi, glue); + + /* This is the only SPI value that we need to set here, the rest + * comes from the board-peripherals file */ + spi->bits_per_word = 32; + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(glue->dev, "spi_setup failed\n"); + return ret; + } + + glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); + if (!glue->core) { + dev_err(glue->dev, "can't allocate platform_device\n"); + return -ENOMEM; + } + + glue->core->dev.parent = &spi->dev; + + memset(res, 0x00, sizeof(res)); + + res[0].start = spi->irq; + res[0].flags = IORESOURCE_IRQ; + res[0].name = "irq"; + + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); + if (ret) { + dev_err(glue->dev, "can't add resources\n"); + goto out_dev_put; + } + + ret = platform_device_add_data(glue->core, &pdev_data, + sizeof(pdev_data)); + if (ret) { + dev_err(glue->dev, "can't add platform data\n"); + goto out_dev_put; + } + + ret = platform_device_add(glue->core); + if (ret) { + dev_err(glue->dev, "can't register platform device\n"); + goto out_dev_put; + } + + return 0; + +out_dev_put: + platform_device_put(glue->core); + return ret; +} + +static int wl1271_remove(struct spi_device *spi) +{ + struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); + + platform_device_unregister(glue->core); + + return 0; +} + + +static struct spi_driver wl1271_spi_driver = { + .driver = { + .name = "wl1271_spi", + .owner = THIS_MODULE, + }, + + .probe = wl1271_probe, + .remove = wl1271_remove, +}; + +module_spi_driver(wl1271_spi_driver); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Luciano Coelho "); +MODULE_AUTHOR("Juuso Oikarinen "); +MODULE_ALIAS("spi:wl1271"); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c new file mode 100644 index 000000000..24dd288d6 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -0,0 +1,216 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2013 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wlcore.h" +#include "debug.h" +#include "ps.h" +#include "sysfs.h" + +static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + ssize_t len; + + len = PAGE_SIZE; + + mutex_lock(&wl->mutex); + len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", + wl->sg_enabled); + mutex_unlock(&wl->mutex); + + return len; + +} + +static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + unsigned long res; + int ret; + + ret = kstrtoul(buf, 10, &res); + if (ret < 0) { + wl1271_warning("incorrect value written to bt_coex_mode"); + return count; + } + + mutex_lock(&wl->mutex); + + res = !!res; + + if (res == wl->sg_enabled) + goto out; + + wl->sg_enabled = res; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl1271_acx_sg_enable(wl, wl->sg_enabled); + wl1271_ps_elp_sleep(wl); + + out: + mutex_unlock(&wl->mutex); + return count; +} + +static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, + wl1271_sysfs_show_bt_coex_state, + wl1271_sysfs_store_bt_coex_state); + +static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + ssize_t len; + + len = PAGE_SIZE; + + mutex_lock(&wl->mutex); + if (wl->hw_pg_ver >= 0) + len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); + else + len = snprintf(buf, len, "n/a\n"); + mutex_unlock(&wl->mutex); + + return len; +} + +static DEVICE_ATTR(hw_pg_ver, S_IRUGO, + wl1271_sysfs_show_hw_pg_ver, NULL); + +static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t pos, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct wl1271 *wl = dev_get_drvdata(dev); + ssize_t len; + int ret; + + ret = mutex_lock_interruptible(&wl->mutex); + if (ret < 0) + return -ERESTARTSYS; + + /* Let only one thread read the log at a time, blocking others */ + while (wl->fwlog_size == 0) { + DEFINE_WAIT(wait); + + prepare_to_wait_exclusive(&wl->fwlog_waitq, + &wait, + TASK_INTERRUPTIBLE); + + if (wl->fwlog_size != 0) { + finish_wait(&wl->fwlog_waitq, &wait); + break; + } + + mutex_unlock(&wl->mutex); + + schedule(); + finish_wait(&wl->fwlog_waitq, &wait); + + if (signal_pending(current)) + return -ERESTARTSYS; + + ret = mutex_lock_interruptible(&wl->mutex); + if (ret < 0) + return -ERESTARTSYS; + } + + /* Check if the fwlog is still valid */ + if (wl->fwlog_size < 0) { + mutex_unlock(&wl->mutex); + return 0; + } + + /* Seeking is not supported - old logs are not kept. Disregard pos. */ + len = min_t(size_t, count, wl->fwlog_size); + wl->fwlog_size -= len; + memcpy(buffer, wl->fwlog, len); + + /* Make room for new messages */ + memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); + + mutex_unlock(&wl->mutex); + + return len; +} + +static struct bin_attribute fwlog_attr = { + .attr = {.name = "fwlog", .mode = S_IRUSR}, + .read = wl1271_sysfs_read_fwlog, +}; + +int wlcore_sysfs_init(struct wl1271 *wl) +{ + int ret; + + /* Create sysfs file to control bt coex state */ + ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); + if (ret < 0) { + wl1271_error("failed to create sysfs file bt_coex_state"); + goto out; + } + + /* Create sysfs file to get HW PG version */ + ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); + if (ret < 0) { + wl1271_error("failed to create sysfs file hw_pg_ver"); + goto out_bt_coex_state; + } + + /* Create sysfs file for the FW log */ + ret = device_create_bin_file(wl->dev, &fwlog_attr); + if (ret < 0) { + wl1271_error("failed to create sysfs file fwlog"); + goto out_hw_pg_ver; + } + + goto out; + +out_hw_pg_ver: + device_remove_file(wl->dev, &dev_attr_hw_pg_ver); + +out_bt_coex_state: + device_remove_file(wl->dev, &dev_attr_bt_coex_state); + +out: + return ret; +} + +void wlcore_sysfs_free(struct wl1271 *wl) +{ + device_remove_bin_file(wl->dev, &fwlog_attr); + + device_remove_file(wl->dev, &dev_attr_hw_pg_ver); + + device_remove_file(wl->dev, &dev_attr_bt_coex_state); +} diff --git a/drivers/net/wireless/ti/wlcore/sysfs.h b/drivers/net/wireless/ti/wlcore/sysfs.h new file mode 100644 index 000000000..c14889218 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/sysfs.h @@ -0,0 +1,28 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2013 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __SYSFS_H__ +#define __SYSFS_H__ + +int wlcore_sysfs_init(struct wl1271 *wl); +void wlcore_sysfs_free(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c new file mode 100644 index 000000000..ddad58f61 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -0,0 +1,397 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ +#include "testmode.h" + +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "acx.h" +#include "ps.h" +#include "io.h" + +#define WL1271_TM_MAX_DATA_LENGTH 1024 + +enum wl1271_tm_commands { + WL1271_TM_CMD_UNSPEC, + WL1271_TM_CMD_TEST, + WL1271_TM_CMD_INTERROGATE, + WL1271_TM_CMD_CONFIGURE, + WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ + WL1271_TM_CMD_SET_PLT_MODE, + WL1271_TM_CMD_RECOVER, /* Not in use. Keep to not break ABI */ + WL1271_TM_CMD_GET_MAC, + + __WL1271_TM_CMD_AFTER_LAST +}; +#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1) + +enum wl1271_tm_attrs { + WL1271_TM_ATTR_UNSPEC, + WL1271_TM_ATTR_CMD_ID, + WL1271_TM_ATTR_ANSWER, + WL1271_TM_ATTR_DATA, + WL1271_TM_ATTR_IE_ID, + WL1271_TM_ATTR_PLT_MODE, + + __WL1271_TM_ATTR_AFTER_LAST +}; +#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1) + +static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { + [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 }, + [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 }, + [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = WL1271_TM_MAX_DATA_LENGTH }, + [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 }, + [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 }, +}; + + +static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) +{ + int buf_len, ret, len; + struct sk_buff *skb; + void *buf; + u8 answer = 0; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd test"); + + if (!tb[WL1271_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[WL1271_TM_ATTR_DATA]); + buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); + + if (tb[WL1271_TM_ATTR_ANSWER]) + answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); + + if (buf_len > sizeof(struct wl1271_command)) + return -EMSGSIZE; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_cmd_test(wl, buf, buf_len, answer); + if (ret < 0) { + wl1271_warning("testmode cmd test failed: %d", ret); + goto out_sleep; + } + + if (answer) { + /* If we got bip calibration answer print radio status */ + struct wl1271_cmd_cal_p2g *params = + (struct wl1271_cmd_cal_p2g *) buf; + + s16 radio_status = (s16) le16_to_cpu(params->radio_status); + + if (params->test.id == TEST_CMD_P2G_CAL && + radio_status < 0) + wl1271_warning("testmode cmd: radio status=%d", + radio_status); + else + wl1271_info("testmode cmd: radio status=%d", + radio_status); + + len = nla_total_size(buf_len); + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); + if (!skb) { + ret = -ENOMEM; + goto out_sleep; + } + + if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) { + kfree_skb(skb); + ret = -EMSGSIZE; + goto out_sleep; + } + + ret = cfg80211_testmode_reply(skb); + if (ret < 0) + goto out_sleep; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) +{ + int ret; + struct wl1271_command *cmd; + struct sk_buff *skb; + u8 ie_id; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate"); + + if (!tb[WL1271_TM_ATTR_IE_ID]) + return -EINVAL; + + ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out_sleep; + } + + ret = wl1271_cmd_interrogate(wl, ie_id, cmd, + sizeof(struct acx_header), sizeof(*cmd)); + if (ret < 0) { + wl1271_warning("testmode cmd interrogate failed: %d", ret); + goto out_free; + } + + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); + if (!skb) { + ret = -ENOMEM; + goto out_free; + } + + if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) { + kfree_skb(skb); + ret = -EMSGSIZE; + goto out_free; + } + + ret = cfg80211_testmode_reply(skb); + if (ret < 0) + goto out_free; + +out_free: + kfree(cmd); +out_sleep: + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) +{ + int buf_len, ret; + void *buf; + u8 ie_id; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure"); + + if (!tb[WL1271_TM_ATTR_DATA]) + return -EINVAL; + if (!tb[WL1271_TM_ATTR_IE_ID]) + return -EINVAL; + + ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); + buf = nla_data(tb[WL1271_TM_ATTR_DATA]); + buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); + + if (buf_len > sizeof(struct wl1271_command)) + return -EMSGSIZE; + + mutex_lock(&wl->mutex); + ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len); + mutex_unlock(&wl->mutex); + + if (ret < 0) { + wl1271_warning("testmode cmd configure failed: %d", ret); + return ret; + } + + return 0; +} + +static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[]) +{ + /* return FEM type */ + int ret, len; + struct sk_buff *skb; + + ret = wl1271_plt_start(wl, PLT_FEM_DETECT); + if (ret < 0) + goto out; + + mutex_lock(&wl->mutex); + + len = nla_total_size(sizeof(wl->fem_manuf)); + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); + if (!skb) { + ret = -ENOMEM; + goto out_mutex; + } + + if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(wl->fem_manuf), + &wl->fem_manuf)) { + kfree_skb(skb); + ret = -EMSGSIZE; + goto out_mutex; + } + + ret = cfg80211_testmode_reply(skb); + +out_mutex: + mutex_unlock(&wl->mutex); + + /* We always stop plt after DETECT mode */ + wl1271_plt_stop(wl); +out: + return ret; +} + +static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) +{ + u32 val; + int ret; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode"); + + if (!tb[WL1271_TM_ATTR_PLT_MODE]) + return -EINVAL; + + val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]); + + switch (val) { + case PLT_OFF: + ret = wl1271_plt_stop(wl); + break; + case PLT_ON: + case PLT_CHIP_AWAKE: + ret = wl1271_plt_start(wl, val); + break; + case PLT_FEM_DETECT: + ret = wl1271_tm_detect_fem(wl, tb); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) +{ + struct sk_buff *skb; + u8 mac_addr[ETH_ALEN]; + int ret = 0; + + mutex_lock(&wl->mutex); + + if (!wl->plt) { + ret = -EINVAL; + goto out; + } + + if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { + ret = -EOPNOTSUPP; + goto out; + } + + mac_addr[0] = (u8)(wl->fuse_oui_addr >> 16); + mac_addr[1] = (u8)(wl->fuse_oui_addr >> 8); + mac_addr[2] = (u8) wl->fuse_oui_addr; + mac_addr[3] = (u8)(wl->fuse_nic_addr >> 16); + mac_addr[4] = (u8)(wl->fuse_nic_addr >> 8); + mac_addr[5] = (u8) wl->fuse_nic_addr; + + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) { + kfree_skb(skb); + ret = -EMSGSIZE; + goto out; + } + + ret = cfg80211_testmode_reply(skb); + if (ret < 0) + goto out; + +out: + mutex_unlock(&wl->mutex); + return ret; +} + +int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct wl1271 *wl = hw->priv; + struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; + u32 nla_cmd; + int err; + + err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); + if (err) + return err; + + if (!tb[WL1271_TM_ATTR_CMD_ID]) + return -EINVAL; + + nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]); + + /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */ + if (wl->plt_mode == PLT_CHIP_AWAKE && + nla_cmd != WL1271_TM_CMD_SET_PLT_MODE) + return -EOPNOTSUPP; + + switch (nla_cmd) { + case WL1271_TM_CMD_TEST: + return wl1271_tm_cmd_test(wl, tb); + case WL1271_TM_CMD_INTERROGATE: + return wl1271_tm_cmd_interrogate(wl, tb); + case WL1271_TM_CMD_CONFIGURE: + return wl1271_tm_cmd_configure(wl, tb); + case WL1271_TM_CMD_SET_PLT_MODE: + return wl1271_tm_cmd_set_plt_mode(wl, tb); + case WL1271_TM_CMD_GET_MAC: + return wl12xx_tm_cmd_get_mac(wl, tb); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/wireless/ti/wlcore/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h new file mode 100644 index 000000000..61d8434d8 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/testmode.h @@ -0,0 +1,32 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TESTMODE_H__ +#define __TESTMODE_H__ + +#include + +int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#endif /* __WL1271_TESTMODE_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c new file mode 100644 index 000000000..f0ac36139 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -0,0 +1,1329 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "io.h" +#include "ps.h" +#include "tx.h" +#include "event.h" +#include "hw_ops.h" + +/* + * TODO: this is here just for now, it must be removed when the data + * operations are in place. + */ +#include "../wl12xx/reg.h" + +static int wl1271_set_default_wep_key(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 id) +{ + int ret; + bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); + + if (is_ap) + ret = wl12xx_cmd_set_default_wep_key(wl, id, + wlvif->ap.bcast_hlid); + else + ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); + + if (ret < 0) + return ret; + + wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); + return 0; +} + +static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) +{ + int id; + + id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); + if (id >= wl->num_tx_desc) + return -EBUSY; + + __set_bit(id, wl->tx_frames_map); + wl->tx_frames[id] = skb; + wl->tx_frames_cnt++; + return id; +} + +void wl1271_free_tx_id(struct wl1271 *wl, int id) +{ + if (__test_and_clear_bit(id, wl->tx_frames_map)) { + if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) + clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); + + wl->tx_frames[id] = NULL; + wl->tx_frames_cnt--; + } +} +EXPORT_SYMBOL(wl1271_free_tx_id); + +static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)(skb->data + + sizeof(struct wl1271_tx_hw_descr)); + if (!ieee80211_is_auth(hdr->frame_control)) + return; + + /* + * add the station to the known list before transmitting the + * authentication response. this way it won't get de-authed by FW + * when transmitting too soon. + */ + wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); + + /* + * ROC for 1 second on the AP channel for completing the connection. + * Note the ROC will be continued by the update_sta_state callbacks + * once the station reaches the associated state. + */ + wlcore_update_inconn_sta(wl, wlvif, NULL, true); + wlvif->pending_auth_reply_time = jiffies; + cancel_delayed_work(&wlvif->pending_auth_complete_work); + ieee80211_queue_delayed_work(wl->hw, + &wlvif->pending_auth_complete_work, + msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); +} + +static void wl1271_tx_regulate_link(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 hlid) +{ + bool fw_ps; + u8 tx_pkts; + + if (WARN_ON(!test_bit(hlid, wlvif->links_map))) + return; + + fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); + tx_pkts = wl->links[hlid].allocated_pkts; + + /* + * if in FW PS and there is enough data in FW we can put the link + * into high-level PS and clean out its TX queues. + * Make an exception if this is the only connected link. In this + * case FW-memory congestion is less of a problem. + * Note that a single connected STA means 2*ap_count + 1 active links, + * since we must account for the global and broadcast AP links + * for each AP. The "fw_ps" check assures us the other link is a STA + * connected to the AP. Otherwise the FW would not set the PSM bit. + */ + if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && + tx_pkts >= WL1271_PS_STA_MAX_PACKETS) + wl12xx_ps_link_start(wl, wlvif, hlid, true); +} + +bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) +{ + return wl->dummy_packet == skb; +} +EXPORT_SYMBOL(wl12xx_is_dummy_packet); + +static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, struct ieee80211_sta *sta) +{ + if (sta) { + struct wl1271_station *wl_sta; + + wl_sta = (struct wl1271_station *)sta->drv_priv; + return wl_sta->hlid; + } else { + struct ieee80211_hdr *hdr; + + if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) + return wl->system_hlid; + + hdr = (struct ieee80211_hdr *)skb->data; + if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + return wlvif->ap.bcast_hlid; + else + return wlvif->ap.global_hlid; + } +} + +u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_tx_info *control; + + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); + + control = IEEE80211_SKB_CB(skb); + if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + wl1271_debug(DEBUG_TX, "tx offchannel"); + return wlvif->dev_hlid; + } + + return wlvif->sta.hlid; +} + +unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, + unsigned int packet_length) +{ + if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || + !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) + return ALIGN(packet_length, WL1271_TX_ALIGN_TO); + else + return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); +} +EXPORT_SYMBOL(wlcore_calc_packet_alignment); + +static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 extra, u32 buf_offset, + u8 hlid, bool is_gem) +{ + struct wl1271_tx_hw_descr *desc; + u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; + u32 total_blocks; + int id, ret = -EBUSY, ac; + u32 spare_blocks; + + if (buf_offset + total_len > wl->aggr_buf_size) + return -EAGAIN; + + spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); + + /* allocate free identifier for the packet */ + id = wl1271_alloc_tx_id(wl, skb); + if (id < 0) + return id; + + total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); + + if (total_blocks <= wl->tx_blocks_available) { + desc = (struct wl1271_tx_hw_descr *)skb_push( + skb, total_len - skb->len); + + wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, + spare_blocks); + + desc->id = id; + + wl->tx_blocks_available -= total_blocks; + wl->tx_allocated_blocks += total_blocks; + + /* + * If the FW was empty before, arm the Tx watchdog. Also do + * this on the first Tx after resume, as we always cancel the + * watchdog on suspend. + */ + if (wl->tx_allocated_blocks == total_blocks || + test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) + wl12xx_rearm_tx_watchdog_locked(wl); + + ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + wl->tx_allocated_pkts[ac]++; + + if (test_bit(hlid, wl->links_map)) + wl->links[hlid].allocated_pkts++; + + ret = 0; + + wl1271_debug(DEBUG_TX, + "tx_allocate: size: %d, blocks: %d, id: %d", + total_len, total_blocks, id); + } else { + wl1271_free_tx_id(wl, id); + } + + return ret; +} + +static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 extra, + struct ieee80211_tx_info *control, u8 hlid) +{ + struct timespec ts; + struct wl1271_tx_hw_descr *desc; + int ac, rate_idx; + s64 hosttime; + u16 tx_attr = 0; + __le16 frame_control; + struct ieee80211_hdr *hdr; + u8 *frame_start; + bool is_dummy; + + desc = (struct wl1271_tx_hw_descr *) skb->data; + frame_start = (u8 *)(desc + 1); + hdr = (struct ieee80211_hdr *)(frame_start + extra); + frame_control = hdr->frame_control; + + /* relocate space for security header */ + if (extra) { + int hdrlen = ieee80211_hdrlen(frame_control); + memmove(frame_start, hdr, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + extra); + } + + /* configure packet life time */ + getnstimeofday(&ts); + hosttime = (timespec_to_ns(&ts) >> 10); + desc->start_time = cpu_to_le32(hosttime - wl->time_offset); + + is_dummy = wl12xx_is_dummy_packet(wl, skb); + if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) + desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); + else + desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); + + /* queue */ + ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + desc->tid = skb->priority; + + if (is_dummy) { + /* + * FW expects the dummy packet to have an invalid session id - + * any session id that is different than the one set in the join + */ + tx_attr = (SESSION_COUNTER_INVALID << + TX_HW_ATTR_OFST_SESSION_COUNTER) & + TX_HW_ATTR_SESSION_COUNTER; + + tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; + } else if (wlvif) { + u8 session_id = wl->session_ids[hlid]; + + if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && + (wlvif->bss_type == BSS_TYPE_AP_BSS)) + session_id = 0; + + /* configure the tx attributes */ + tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; + } + + desc->hlid = hlid; + if (is_dummy || !wlvif) + rate_idx = 0; + else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { + /* + * if the packets are data packets + * send them with AP rate policies (EAPOLs are an exception), + * otherwise use default basic rates + */ + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + rate_idx = wlvif->sta.basic_rate_idx; + else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + rate_idx = wlvif->sta.p2p_rate_idx; + else if (ieee80211_is_data(frame_control)) + rate_idx = wlvif->sta.ap_rate_idx; + else + rate_idx = wlvif->sta.basic_rate_idx; + } else { + if (hlid == wlvif->ap.global_hlid) + rate_idx = wlvif->ap.mgmt_rate_idx; + else if (hlid == wlvif->ap.bcast_hlid || + skb->protocol == cpu_to_be16(ETH_P_PAE) || + !ieee80211_is_data(frame_control)) + /* + * send non-data, bcast and EAPOLs using the + * min basic rate + */ + rate_idx = wlvif->ap.bcast_rate_idx; + else + rate_idx = wlvif->ap.ucast_rate_idx[ac]; + } + + tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; + + /* for WEP shared auth - no fw encryption is needed */ + if (ieee80211_is_auth(frame_control) && + ieee80211_has_protected(frame_control)) + tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; + + /* send EAPOL frames as voice */ + if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) + tx_attr |= TX_HW_ATTR_EAPOL_FRAME; + + desc->tx_attr = cpu_to_le16(tx_attr); + + wlcore_hw_set_tx_desc_csum(wl, desc, skb); + wlcore_hw_set_tx_desc_data_len(wl, desc, skb); +} + +/* caller must hold wl->mutex */ +static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u32 buf_offset, u8 hlid) +{ + struct ieee80211_tx_info *info; + u32 extra = 0; + int ret = 0; + u32 total_len; + bool is_dummy; + bool is_gem = false; + + if (!skb) { + wl1271_error("discarding null skb"); + return -EINVAL; + } + + if (hlid == WL12XX_INVALID_LINK_ID) { + wl1271_error("invalid hlid. dropping skb 0x%p", skb); + return -EINVAL; + } + + info = IEEE80211_SKB_CB(skb); + + is_dummy = wl12xx_is_dummy_packet(wl, skb); + + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && + info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + extra = WL1271_EXTRA_SPACE_TKIP; + + if (info->control.hw_key) { + bool is_wep; + u8 idx = info->control.hw_key->hw_key_idx; + u32 cipher = info->control.hw_key->cipher; + + is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || + (cipher == WLAN_CIPHER_SUITE_WEP104); + + if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { + ret = wl1271_set_default_wep_key(wl, wlvif, idx); + if (ret < 0) + return ret; + wlvif->default_key = idx; + } + + is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); + } + + ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, + is_gem); + if (ret < 0) + return ret; + + wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); + + if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { + wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); + wl1271_tx_regulate_link(wl, wlvif, hlid); + } + + /* + * The length of each packet is stored in terms of + * words. Thus, we must pad the skb data to make sure its + * length is aligned. The number of padding bytes is computed + * and set in wl1271_tx_fill_hdr. + * In special cases, we want to align to a specific block size + * (eg. for wl128x with SDIO we align to 256). + */ + total_len = wlcore_calc_packet_alignment(wl, skb->len); + + memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); + memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); + + /* Revert side effects in the dummy packet skb, so it can be reused */ + if (is_dummy) + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); + + return total_len; +} + +u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, + enum ieee80211_band rate_band) +{ + struct ieee80211_supported_band *band; + u32 enabled_rates = 0; + int bit; + + band = wl->hw->wiphy->bands[rate_band]; + for (bit = 0; bit < band->n_bitrates; bit++) { + if (rate_set & 0x1) + enabled_rates |= band->bitrates[bit].hw_value; + rate_set >>= 1; + } + + /* MCS rates indication are on bits 16 - 31 */ + rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; + + for (bit = 0; bit < 16; bit++) { + if (rate_set & 0x1) + enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); + rate_set >>= 1; + } + + return enabled_rates; +} + +void wl1271_handle_tx_low_watermark(struct wl1271 *wl) +{ + int i; + struct wl12xx_vif *wlvif; + + wl12xx_for_each_wlvif(wl, wlvif) { + for (i = 0; i < NUM_TX_QUEUES; i++) { + if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK) && + wlvif->tx_queue_count[i] <= + WL1271_TX_QUEUE_LOW_WATERMARK) + /* firmware buffer has space, restart queues */ + wlcore_wake_queue(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK); + } + } +} + +static int wlcore_select_ac(struct wl1271 *wl) +{ + int i, q = -1, ac; + u32 min_pkts = 0xffffffff; + + /* + * Find a non-empty ac where: + * 1. There are packets to transmit + * 2. The FW has the least allocated blocks + * + * We prioritize the ACs according to VO>VI>BE>BK + */ + for (i = 0; i < NUM_TX_QUEUES; i++) { + ac = wl1271_tx_get_queue(i); + if (wl->tx_queue_count[ac] && + wl->tx_allocated_pkts[ac] < min_pkts) { + q = ac; + min_pkts = wl->tx_allocated_pkts[q]; + } + } + + return q; +} + +static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, + struct wl1271_link *lnk, u8 q) +{ + struct sk_buff *skb; + unsigned long flags; + + skb = skb_dequeue(&lnk->tx_queue[q]); + if (skb) { + spin_lock_irqsave(&wl->wl_lock, flags); + WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); + wl->tx_queue_count[q]--; + if (lnk->wlvif) { + WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); + lnk->wlvif->tx_queue_count[q]--; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + + return skb; +} + +static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, + u8 hlid, u8 ac, + u8 *low_prio_hlid) +{ + struct wl1271_link *lnk = &wl->links[hlid]; + + if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { + if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && + !skb_queue_empty(&lnk->tx_queue[ac]) && + wlcore_hw_lnk_low_prio(wl, hlid, lnk)) + /* we found the first non-empty low priority queue */ + *low_prio_hlid = hlid; + + return NULL; + } + + return wlcore_lnk_dequeue(wl, lnk, ac); +} + +static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 ac, u8 *hlid, + u8 *low_prio_hlid) +{ + struct sk_buff *skb = NULL; + int i, h, start_hlid; + + /* start from the link after the last one */ + start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; + + /* dequeue according to AC, round robin on each link */ + for (i = 0; i < wl->num_links; i++) { + h = (start_hlid + i) % wl->num_links; + + /* only consider connected stations */ + if (!test_bit(h, wlvif->links_map)) + continue; + + skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, + low_prio_hlid); + if (!skb) + continue; + + wlvif->last_tx_hlid = h; + break; + } + + if (!skb) + wlvif->last_tx_hlid = 0; + + *hlid = wlvif->last_tx_hlid; + return skb; +} + +static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) +{ + unsigned long flags; + struct wl12xx_vif *wlvif = wl->last_wlvif; + struct sk_buff *skb = NULL; + int ac; + u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; + + ac = wlcore_select_ac(wl); + if (ac < 0) + goto out; + + /* continue from last wlvif (round robin) */ + if (wlvif) { + wl12xx_for_each_wlvif_continue(wl, wlvif) { + if (!wlvif->tx_queue_count[ac]) + continue; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); + if (!skb) + continue; + + wl->last_wlvif = wlvif; + break; + } + } + + /* dequeue from the system HLID before the restarting wlvif list */ + if (!skb) { + skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, + ac, &low_prio_hlid); + if (skb) { + *hlid = wl->system_hlid; + wl->last_wlvif = NULL; + } + } + + /* Do a new pass over the wlvif list. But no need to continue + * after last_wlvif. The previous pass should have found it. */ + if (!skb) { + wl12xx_for_each_wlvif(wl, wlvif) { + if (!wlvif->tx_queue_count[ac]) + goto next; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); + if (skb) { + wl->last_wlvif = wlvif; + break; + } + +next: + if (wlvif == wl->last_wlvif) + break; + } + } + + /* no high priority skbs found - but maybe a low priority one? */ + if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { + struct wl1271_link *lnk = &wl->links[low_prio_hlid]; + skb = wlcore_lnk_dequeue(wl, lnk, ac); + + WARN_ON(!skb); /* we checked this before */ + *hlid = low_prio_hlid; + + /* ensure proper round robin in the vif/link levels */ + wl->last_wlvif = lnk->wlvif; + if (lnk->wlvif) + lnk->wlvif->last_tx_hlid = low_prio_hlid; + + } + +out: + if (!skb && + test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { + int q; + + skb = wl->dummy_packet; + *hlid = wl->system_hlid; + q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + spin_lock_irqsave(&wl->wl_lock, flags); + WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); + wl->tx_queue_count[q]--; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + + return skb; +} + +static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, u8 hlid) +{ + unsigned long flags; + int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + + if (wl12xx_is_dummy_packet(wl, skb)) { + set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); + } else { + skb_queue_head(&wl->links[hlid].tx_queue[q], skb); + + /* make sure we dequeue the same packet next time */ + wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % + wl->num_links; + } + + spin_lock_irqsave(&wl->wl_lock, flags); + wl->tx_queue_count[q]++; + if (wlvif) + wlvif->tx_queue_count[q]++; + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +static bool wl1271_tx_is_data_present(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); + + return ieee80211_is_data_present(hdr->frame_control); +} + +void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) +{ + struct wl12xx_vif *wlvif; + u32 timeout; + u8 hlid; + + if (!wl->conf.rx_streaming.interval) + return; + + if (!wl->conf.rx_streaming.always && + !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) + return; + + timeout = wl->conf.rx_streaming.duration; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + bool found = false; + for_each_set_bit(hlid, active_hlids, wl->num_links) { + if (test_bit(hlid, wlvif->links_map)) { + found = true; + break; + } + } + + if (!found) + continue; + + /* enable rx streaming */ + if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) + ieee80211_queue_work(wl->hw, + &wlvif->rx_streaming_enable_work); + + mod_timer(&wlvif->rx_streaming_timer, + jiffies + msecs_to_jiffies(timeout)); + } +} + +/* + * Returns failure values only in case of failed bus ops within this function. + * wl1271_prepare_tx_frame retvals won't be returned in order to avoid + * triggering recovery by higher layers when not necessary. + * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery + * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame + * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING + * within prepare_tx_frame code but there's nothing we should do about those + * as well. + */ +int wlcore_tx_work_locked(struct wl1271 *wl) +{ + struct wl12xx_vif *wlvif; + struct sk_buff *skb; + struct wl1271_tx_hw_descr *desc; + u32 buf_offset = 0, last_len = 0; + bool sent_packets = false; + unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; + int ret = 0; + int bus_ret = 0; + u8 hlid; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + return 0; + + while ((skb = wl1271_skb_dequeue(wl, &hlid))) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool has_data = false; + + wlvif = NULL; + if (!wl12xx_is_dummy_packet(wl, skb)) + wlvif = wl12xx_vif_to_data(info->control.vif); + else + hlid = wl->system_hlid; + + has_data = wlvif && wl1271_tx_is_data_present(skb); + ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, + hlid); + if (ret == -EAGAIN) { + /* + * Aggregation buffer is full. + * Flush buffer and try again. + */ + wl1271_skb_queue_head(wl, wlvif, skb, hlid); + + buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, + last_len); + bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, + wl->aggr_buf, buf_offset, true); + if (bus_ret < 0) + goto out; + + sent_packets = true; + buf_offset = 0; + continue; + } else if (ret == -EBUSY) { + /* + * Firmware buffer is full. + * Queue back last skb, and stop aggregating. + */ + wl1271_skb_queue_head(wl, wlvif, skb, hlid); + /* No work left, avoid scheduling redundant tx work */ + set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); + goto out_ack; + } else if (ret < 0) { + if (wl12xx_is_dummy_packet(wl, skb)) + /* + * fw still expects dummy packet, + * so re-enqueue it + */ + wl1271_skb_queue_head(wl, wlvif, skb, hlid); + else + ieee80211_free_txskb(wl->hw, skb); + goto out_ack; + } + last_len = ret; + buf_offset += last_len; + wl->tx_packets_count++; + if (has_data) { + desc = (struct wl1271_tx_hw_descr *) skb->data; + __set_bit(desc->hlid, active_hlids); + } + } + +out_ack: + if (buf_offset) { + buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); + bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_offset, true); + if (bus_ret < 0) + goto out; + + sent_packets = true; + } + if (sent_packets) { + /* + * Interrupt the firmware with the new packets. This is only + * required for older hardware revisions + */ + if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { + bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, + wl->tx_packets_count); + if (bus_ret < 0) + goto out; + } + + wl1271_handle_tx_low_watermark(wl); + } + wl12xx_rearm_rx_streaming(wl, active_hlids); + +out: + return bus_ret; +} + +void wl1271_tx_work(struct work_struct *work) +{ + struct wl1271 *wl = container_of(work, struct wl1271, tx_work); + int ret; + + mutex_lock(&wl->mutex); + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_tx_work_locked(wl); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); +} + +static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) +{ + u8 flags = 0; + + /* + * TODO: use wl12xx constants when this code is moved to wl12xx, as + * only it uses Tx-completion. + */ + if (rate_class_index <= 8) + flags |= IEEE80211_TX_RC_MCS; + + /* + * TODO: use wl12xx constants when this code is moved to wl12xx, as + * only it uses Tx-completion. + */ + if (rate_class_index == 0) + flags |= IEEE80211_TX_RC_SHORT_GI; + + return flags; +} + +static void wl1271_tx_complete_packet(struct wl1271 *wl, + struct wl1271_tx_hw_res_descr *result) +{ + struct ieee80211_tx_info *info; + struct ieee80211_vif *vif; + struct wl12xx_vif *wlvif; + struct sk_buff *skb; + int id = result->id; + int rate = -1; + u8 rate_flags = 0; + u8 retries = 0; + + /* check for id legality */ + if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { + wl1271_warning("TX result illegal id: %d", id); + return; + } + + skb = wl->tx_frames[id]; + info = IEEE80211_SKB_CB(skb); + + if (wl12xx_is_dummy_packet(wl, skb)) { + wl1271_free_tx_id(wl, id); + return; + } + + /* info->control is valid as long as we don't update info->status */ + vif = info->control.vif; + wlvif = wl12xx_vif_to_data(vif); + + /* update the TX status info */ + if (result->status == TX_SUCCESS) { + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + info->flags |= IEEE80211_TX_STAT_ACK; + rate = wlcore_rate_to_idx(wl, result->rate_class_index, + wlvif->band); + rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); + retries = result->ack_failures; + } else if (result->status == TX_RETRY_EXCEEDED) { + wl->stats.excessive_retries++; + retries = result->ack_failures; + } + + info->status.rates[0].idx = rate; + info->status.rates[0].count = retries; + info->status.rates[0].flags = rate_flags; + info->status.ack_signal = -1; + + wl->stats.retry_count += result->ack_failures; + + /* remove private header from packet */ + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); + + /* remove TKIP header space if present */ + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && + info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, + hdrlen); + skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); + } + + wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" + " status 0x%x", + result->id, skb, result->ack_failures, + result->rate_class_index, result->status); + + /* return the packet to the stack */ + skb_queue_tail(&wl->deferred_tx_queue, skb); + queue_work(wl->freezable_wq, &wl->netstack_work); + wl1271_free_tx_id(wl, result->id); +} + +/* Called upon reception of a TX complete interrupt */ +int wlcore_tx_complete(struct wl1271 *wl) +{ + struct wl1271_acx_mem_map *memmap = wl->target_mem_map; + u32 count, fw_counter; + u32 i; + int ret; + + /* read the tx results from the chipset */ + ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), + wl->tx_res_if, sizeof(*wl->tx_res_if), false); + if (ret < 0) + goto out; + + fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); + + /* write host counter to chipset (to ack) */ + ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + + offsetof(struct wl1271_tx_hw_res_if, + tx_result_host_counter), fw_counter); + if (ret < 0) + goto out; + + count = fw_counter - wl->tx_results_count; + wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); + + /* verify that the result buffer is not getting overrun */ + if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) + wl1271_warning("TX result overflow from chipset: %d", count); + + /* process the results */ + for (i = 0; i < count; i++) { + struct wl1271_tx_hw_res_descr *result; + u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; + + /* process the packet */ + result = &(wl->tx_res_if->tx_results_queue[offset]); + wl1271_tx_complete_packet(wl, result); + + wl->tx_results_count++; + } + +out: + return ret; +} +EXPORT_SYMBOL(wlcore_tx_complete); + +void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) +{ + struct sk_buff *skb; + int i; + unsigned long flags; + struct ieee80211_tx_info *info; + int total[NUM_TX_QUEUES]; + struct wl1271_link *lnk = &wl->links[hlid]; + + for (i = 0; i < NUM_TX_QUEUES; i++) { + total[i] = 0; + while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { + wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); + + if (!wl12xx_is_dummy_packet(wl, skb)) { + info = IEEE80211_SKB_CB(skb); + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + ieee80211_tx_status_ni(wl->hw, skb); + } + + total[i]++; + } + } + + spin_lock_irqsave(&wl->wl_lock, flags); + for (i = 0; i < NUM_TX_QUEUES; i++) { + wl->tx_queue_count[i] -= total[i]; + if (lnk->wlvif) + lnk->wlvif->tx_queue_count[i] -= total[i]; + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl1271_handle_tx_low_watermark(wl); +} + +/* caller must hold wl->mutex and TX must be stopped */ +void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) +{ + int i; + + /* TX failure */ + for_each_set_bit(i, wlvif->links_map, wl->num_links) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS && + i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { + /* this calls wl12xx_free_link */ + wl1271_free_sta(wl, wlvif, i); + } else { + u8 hlid = i; + wl12xx_free_link(wl, wlvif, &hlid); + } + } + wlvif->last_tx_hlid = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) + wlvif->tx_queue_count[i] = 0; +} +/* caller must hold wl->mutex and TX must be stopped */ +void wl12xx_tx_reset(struct wl1271 *wl) +{ + int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + + /* only reset the queues if something bad happened */ + if (wl1271_tx_total_queue_count(wl) != 0) { + for (i = 0; i < wl->num_links; i++) + wl1271_tx_reset_link_queues(wl, i); + + for (i = 0; i < NUM_TX_QUEUES; i++) + wl->tx_queue_count[i] = 0; + } + + /* + * Make sure the driver is at a consistent state, in case this + * function is called from a context other than interface removal. + * This call will always wake the TX queues. + */ + wl1271_handle_tx_low_watermark(wl); + + for (i = 0; i < wl->num_tx_desc; i++) { + if (wl->tx_frames[i] == NULL) + continue; + + skb = wl->tx_frames[i]; + wl1271_free_tx_id(wl, i); + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); + + if (!wl12xx_is_dummy_packet(wl, skb)) { + /* + * Remove private headers before passing the skb to + * mac80211 + */ + info = IEEE80211_SKB_CB(skb); + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && + info->control.hw_key->cipher == + WLAN_CIPHER_SUITE_TKIP) { + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, + skb->data, hdrlen); + skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); + } + + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + + ieee80211_tx_status_ni(wl->hw, skb); + } + } +} + +#define WL1271_TX_FLUSH_TIMEOUT 500000 + +/* caller must *NOT* hold wl->mutex */ +void wl1271_tx_flush(struct wl1271 *wl) +{ + unsigned long timeout, start_time; + int i; + start_time = jiffies; + timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); + + /* only one flush should be in progress, for consistent queue state */ + mutex_lock(&wl->flush_mutex); + + mutex_lock(&wl->mutex); + if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { + mutex_unlock(&wl->mutex); + goto out; + } + + wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); + + while (!time_after(jiffies, timeout)) { + wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", + wl->tx_frames_cnt, + wl1271_tx_total_queue_count(wl)); + + /* force Tx and give the driver some time to flush data */ + mutex_unlock(&wl->mutex); + if (wl1271_tx_total_queue_count(wl)) + wl1271_tx_work(&wl->tx_work); + msleep(20); + mutex_lock(&wl->mutex); + + if ((wl->tx_frames_cnt == 0) && + (wl1271_tx_total_queue_count(wl) == 0)) { + wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", + jiffies_to_msecs(jiffies - start_time)); + goto out_wake; + } + } + + wl1271_warning("Unable to flush all TX buffers, " + "timed out (timeout %d ms", + WL1271_TX_FLUSH_TIMEOUT / 1000); + + /* forcibly flush all Tx buffers on our queues */ + for (i = 0; i < wl->num_links; i++) + wl1271_tx_reset_link_queues(wl, i); + +out_wake: + wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); + mutex_unlock(&wl->mutex); +out: + mutex_unlock(&wl->flush_mutex); +} +EXPORT_SYMBOL_GPL(wl1271_tx_flush); + +u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) +{ + if (WARN_ON(!rate_set)) + return 0; + + return BIT(__ffs(rate_set)); +} +EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); + +void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue, enum wlcore_queue_stop_reason reason) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + bool stopped = !!wl->queue_stop_reasons[hwq]; + + /* queue should not be stopped for this reason */ + WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); + + if (stopped) + return; + + ieee80211_stop_queue(wl->hw, hwq); +} + +void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + wlcore_stop_queue_locked(wl, wlvif, queue, reason); + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* queue should not be clear for this reason */ + WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); + + if (wl->queue_stop_reasons[hwq]) + goto out; + + ieee80211_wake_queue(wl->hw, hwq); + +out: + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_stop_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as stopped */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(test_and_set_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are stopped. + */ + ieee80211_stop_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_wake_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as awake */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(!test_and_clear_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are woken up. + */ + ieee80211_wake_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + bool stopped; + + spin_lock_irqsave(&wl->wl_lock, flags); + stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, + reason); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return stopped; +} + +bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + assert_spin_locked(&wl->wl_lock); + return test_bit(reason, &wl->queue_stop_reasons[hwq]); +} + +bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + assert_spin_locked(&wl->wl_lock); + return !!wl->queue_stop_reasons[hwq]; +} diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h new file mode 100644 index 000000000..79cb3ff8b --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/tx.h @@ -0,0 +1,287 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __TX_H__ +#define __TX_H__ + +#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 +#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 + +#define TX_HW_ATTR_SAVE_RETRIES BIT(0) +#define TX_HW_ATTR_HEADER_PAD BIT(1) +#define TX_HW_ATTR_SESSION_COUNTER (BIT(2) | BIT(3) | BIT(4)) +#define TX_HW_ATTR_RATE_POLICY (BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9)) +#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11)) +#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12) +#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13) +#define TX_HW_ATTR_HOST_ENCRYPT BIT(14) +#define TX_HW_ATTR_EAPOL_FRAME BIT(15) + +#define TX_HW_ATTR_OFST_SAVE_RETRIES 0 +#define TX_HW_ATTR_OFST_HEADER_PAD 1 +#define TX_HW_ATTR_OFST_SESSION_COUNTER 2 +#define TX_HW_ATTR_OFST_RATE_POLICY 5 +#define TX_HW_ATTR_OFST_LAST_WORD_PAD 10 +#define TX_HW_ATTR_OFST_TX_CMPLT_REQ 12 + +#define TX_HW_RESULT_QUEUE_LEN 16 +#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf + +#define WL1271_TX_ALIGN_TO 4 +#define WL1271_EXTRA_SPACE_TKIP 4 +#define WL1271_EXTRA_SPACE_AES 8 +#define WL1271_EXTRA_SPACE_MAX 8 + +/* Used for management frames and dummy packets */ +#define WL1271_TID_MGMT 7 + +/* stop a ROC for pending authentication reply after this time (ms) */ +#define WLCORE_PEND_AUTH_ROC_TIMEOUT 1000 + +struct wl127x_tx_mem { + /* + * Number of extra memory blocks to allocate for this packet + * in addition to the number of blocks derived from the packet + * length. + */ + u8 extra_blocks; + /* + * Total number of memory blocks allocated by the host for + * this packet. Must be equal or greater than the actual + * blocks number allocated by HW. + */ + u8 total_mem_blocks; +} __packed; + +struct wl128x_tx_mem { + /* + * Total number of memory blocks allocated by the host for + * this packet. + */ + u8 total_mem_blocks; + /* + * Number of extra bytes, at the end of the frame. the host + * uses this padding to complete each frame to integer number + * of SDIO blocks. + */ + u8 extra_bytes; +} __packed; + +struct wl18xx_tx_mem { + /* + * Total number of memory blocks allocated by the host for + * this packet. + */ + u8 total_mem_blocks; + + /* + * control bits + */ + u8 ctrl; +} __packed; + +/* + * On wl128x based devices, when TX packets are aggregated, each packet + * size must be aligned to the SDIO block size. The maximum block size + * is bounded by the type of the padded bytes field that is sent to the + * FW. Currently the type is u8, so the maximum block size is 256 bytes. + */ +#define WL12XX_BUS_BLOCK_SIZE min(512u, \ + (1u << (8 * sizeof(((struct wl128x_tx_mem *) 0)->extra_bytes)))) + +struct wl1271_tx_hw_descr { + /* Length of packet in words, including descriptor+header+data */ + __le16 length; + union { + struct wl127x_tx_mem wl127x_mem; + struct wl128x_tx_mem wl128x_mem; + struct wl18xx_tx_mem wl18xx_mem; + } __packed; + /* Device time (in us) when the packet arrived to the driver */ + __le32 start_time; + /* + * Max delay in TUs until transmission. The last device time the + * packet can be transmitted is: start_time + (1024 * life_time) + */ + __le16 life_time; + /* Bitwise fields - see TX_ATTR... definitions above. */ + __le16 tx_attr; + /* Packet identifier used also in the Tx-Result. */ + u8 id; + /* The packet TID value (as User-Priority) */ + u8 tid; + /* host link ID (HLID) */ + u8 hlid; + + union { + u8 wl12xx_reserved; + + /* + * bit 0 -> 0 = udp, 1 = tcp + * bit 1:7 -> IP header offset + */ + u8 wl18xx_checksum_data; + } __packed; +} __packed; + +enum wl1271_tx_hw_res_status { + TX_SUCCESS = 0, + TX_HW_ERROR = 1, + TX_DISABLED = 2, + TX_RETRY_EXCEEDED = 3, + TX_TIMEOUT = 4, + TX_KEY_NOT_FOUND = 5, + TX_PEER_NOT_FOUND = 6, + TX_SESSION_MISMATCH = 7, + TX_LINK_NOT_VALID = 8, +}; + +struct wl1271_tx_hw_res_descr { + /* Packet Identifier - same value used in the Tx descriptor.*/ + u8 id; + /* The status of the transmission, indicating success or one of + several possible reasons for failure. */ + u8 status; + /* Total air access duration including all retrys and overheads.*/ + __le16 medium_usage; + /* The time passed from host xfer to Tx-complete.*/ + __le32 fw_handling_time; + /* Total media delay + (from 1st EDCA AIFS counter until TX Complete). */ + __le32 medium_delay; + /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ + u8 tx_security_sequence_number_lsb; + /* Retry count - number of transmissions without successful ACK.*/ + u8 ack_failures; + /* The rate that succeeded getting ACK + (Valid only if status=SUCCESS). */ + u8 rate_class_index; + /* for 4-byte alignment. */ + u8 spare; +} __packed; + +struct wl1271_tx_hw_res_if { + __le32 tx_result_fw_counter; + __le32 tx_result_host_counter; + struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; +} __packed; + +enum wlcore_queue_stop_reason { + WLCORE_QUEUE_STOP_REASON_WATERMARK, + WLCORE_QUEUE_STOP_REASON_FW_RESTART, + WLCORE_QUEUE_STOP_REASON_FLUSH, + WLCORE_QUEUE_STOP_REASON_SPARE_BLK, /* 18xx specific */ +}; + +static inline int wl1271_tx_get_queue(int queue) +{ + switch (queue) { + case 0: + return CONF_TX_AC_VO; + case 1: + return CONF_TX_AC_VI; + case 2: + return CONF_TX_AC_BE; + case 3: + return CONF_TX_AC_BK; + default: + return CONF_TX_AC_BE; + } +} + +static inline +int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue) +{ + int mac_queue = wlvif->hw_queue_base; + + switch (queue) { + case CONF_TX_AC_VO: + return mac_queue + 0; + case CONF_TX_AC_VI: + return mac_queue + 1; + case CONF_TX_AC_BE: + return mac_queue + 2; + case CONF_TX_AC_BK: + return mac_queue + 3; + default: + return mac_queue + 2; + } +} + +static inline int wl1271_tx_total_queue_count(struct wl1271 *wl) +{ + int i, count = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) + count += wl->tx_queue_count[i]; + + return count; +} + +void wl1271_tx_work(struct work_struct *work); +int wlcore_tx_work_locked(struct wl1271 *wl); +int wlcore_tx_complete(struct wl1271 *wl); +void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl12xx_tx_reset(struct wl1271 *wl); +void wl1271_tx_flush(struct wl1271 *wl); +u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); +u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, + enum ieee80211_band rate_band); +u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); +u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, struct ieee80211_sta *sta); +void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); +void wl1271_handle_tx_low_watermark(struct wl1271 *wl); +bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); +void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); +unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, + unsigned int packet_length); +void wl1271_free_tx_id(struct wl1271 *wl, int id); +void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue, enum wlcore_queue_stop_reason reason); +void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason); +void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason); +void wlcore_stop_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason); +void wlcore_wake_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason); +bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason); +bool +wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 queue, + enum wlcore_queue_stop_reason reason); +bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue); + +/* from main.c */ +void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); +void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c new file mode 100644 index 000000000..fd4e9ba17 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -0,0 +1,197 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2014 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include +#include + +#include "wlcore.h" +#include "debug.h" +#include "ps.h" +#include "hw_ops.h" +#include "vendor_cmd.h" + +static const +struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = { + [WLCORE_VENDOR_ATTR_FREQ] = { .type = NLA_U32 }, + [WLCORE_VENDOR_ATTR_GROUP_ID] = { .type = NLA_U32 }, + [WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_BINARY, + .len = WLAN_MAX_KEY_LEN }, +}; + +static int +wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wl1271 *wl = hw->priv; + struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; + int ret; + + wl1271_debug(DEBUG_CMD, "vendor cmd smart config start"); + + if (!data) + return -EINVAL; + + ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, + wlcore_vendor_attr_policy); + if (ret) + return ret; + + if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID]) + return -EINVAL; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_smart_config_start(wl, + nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return 0; +} + +static int +wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wl1271 *wl = hw->priv; + int ret; + + wl1271_debug(DEBUG_CMD, "testmode cmd smart config stop"); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_smart_config_stop(wl); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int +wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wl1271 *wl = hw->priv; + struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; + int ret; + + wl1271_debug(DEBUG_CMD, "testmode cmd smart config set group key"); + + if (!data) + return -EINVAL; + + ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, + wlcore_vendor_attr_policy); + if (ret) + return ret; + + if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] || + !tb[WLCORE_VENDOR_ATTR_GROUP_KEY]) + return -EINVAL; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state != WLCORE_STATE_ON)) { + ret = -EINVAL; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wlcore_smart_config_set_group_key(wl, + nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), + nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), + nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static const struct wiphy_vendor_command wlcore_vendor_commands[] = { + { + .info = { + .vendor_id = TI_OUI, + .subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_START, + }, + .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wlcore_vendor_cmd_smart_config_start, + }, + { + .info = { + .vendor_id = TI_OUI, + .subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_STOP, + }, + .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wlcore_vendor_cmd_smart_config_stop, + }, + { + .info = { + .vendor_id = TI_OUI, + .subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_SET_GROUP_KEY, + }, + .flags = WIPHY_VENDOR_CMD_NEED_NETDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wlcore_vendor_cmd_smart_config_set_group_key, + }, +}; + +static const struct nl80211_vendor_cmd_info wlcore_vendor_events[] = { + { + .vendor_id = TI_OUI, + .subcmd = WLCORE_VENDOR_EVENT_SC_SYNC, + }, + { + .vendor_id = TI_OUI, + .subcmd = WLCORE_VENDOR_EVENT_SC_DECODE, + }, +}; + +void wlcore_set_vendor_commands(struct wiphy *wiphy) +{ + wiphy->vendor_commands = wlcore_vendor_commands; + wiphy->n_vendor_commands = ARRAY_SIZE(wlcore_vendor_commands); + wiphy->vendor_events = wlcore_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wlcore_vendor_events); +} diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.h b/drivers/net/wireless/ti/wlcore/vendor_cmd.h new file mode 100644 index 000000000..6e0c15e30 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.h @@ -0,0 +1,45 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2014 Texas Instruments. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#ifndef __WLCORE_VENDOR_H__ +#define __WLCORE_VENDOR_H__ + +#ifdef __KERNEL__ +void wlcore_set_vendor_commands(struct wiphy *wiphy); +#endif + +#define TI_OUI 0x080028 + +enum wlcore_vendor_commands { + WLCORE_VENDOR_CMD_SMART_CONFIG_START, + WLCORE_VENDOR_CMD_SMART_CONFIG_STOP, + WLCORE_VENDOR_CMD_SMART_CONFIG_SET_GROUP_KEY, + + NUM_WLCORE_VENDOR_CMD, + MAX_WLCORE_VENDOR_CMD = NUM_WLCORE_VENDOR_CMD - 1 +}; + +enum wlcore_vendor_attributes { + WLCORE_VENDOR_ATTR_FREQ, + WLCORE_VENDOR_ATTR_PSK, + WLCORE_VENDOR_ATTR_SSID, + WLCORE_VENDOR_ATTR_GROUP_ID, + WLCORE_VENDOR_ATTR_GROUP_KEY, + + NUM_WLCORE_VENDOR_ATTR, + MAX_WLCORE_VENDOR_ATTR = NUM_WLCORE_VENDOR_ATTR - 1 +}; + +enum wlcore_vendor_events { + WLCORE_VENDOR_EVENT_SC_SYNC, + WLCORE_VENDOR_EVENT_SC_DECODE, +}; + +#endif /* __WLCORE_VENDOR_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h new file mode 100644 index 000000000..22b0bc98d --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h @@ -0,0 +1,137 @@ +#ifndef __WL12XX_80211_H__ +#define __WL12XX_80211_H__ + +#include /* ETH_ALEN */ +#include + +/* RATES */ +#define IEEE80211_CCK_RATE_1MB 0x02 +#define IEEE80211_CCK_RATE_2MB 0x04 +#define IEEE80211_CCK_RATE_5MB 0x0B +#define IEEE80211_CCK_RATE_11MB 0x16 +#define IEEE80211_OFDM_RATE_6MB 0x0C +#define IEEE80211_OFDM_RATE_9MB 0x12 +#define IEEE80211_OFDM_RATE_12MB 0x18 +#define IEEE80211_OFDM_RATE_18MB 0x24 +#define IEEE80211_OFDM_RATE_24MB 0x30 +#define IEEE80211_OFDM_RATE_36MB 0x48 +#define IEEE80211_OFDM_RATE_48MB 0x60 +#define IEEE80211_OFDM_RATE_54MB 0x6C +#define IEEE80211_BASIC_RATE_MASK 0x80 + +#define IEEE80211_CCK_RATE_1MB_MASK (1<<0) +#define IEEE80211_CCK_RATE_2MB_MASK (1<<1) +#define IEEE80211_CCK_RATE_5MB_MASK (1<<2) +#define IEEE80211_CCK_RATE_11MB_MASK (1<<3) +#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4) +#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5) +#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6) +#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7) +#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8) +#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9) +#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10) +#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11) + +#define IEEE80211_CCK_RATES_MASK 0x0000000F +#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \ + IEEE80211_CCK_RATE_2MB_MASK) +#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \ + IEEE80211_CCK_RATE_5MB_MASK | \ + IEEE80211_CCK_RATE_11MB_MASK) + +#define IEEE80211_OFDM_RATES_MASK 0x00000FF0 +#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \ + IEEE80211_OFDM_RATE_12MB_MASK | \ + IEEE80211_OFDM_RATE_24MB_MASK) +#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \ + IEEE80211_OFDM_RATE_9MB_MASK | \ + IEEE80211_OFDM_RATE_18MB_MASK | \ + IEEE80211_OFDM_RATE_36MB_MASK | \ + IEEE80211_OFDM_RATE_48MB_MASK | \ + IEEE80211_OFDM_RATE_54MB_MASK) +#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \ + IEEE80211_CCK_DEFAULT_RATES_MASK) + + +/* This really should be 8, but not for our firmware */ +#define MAX_SUPPORTED_RATES 32 +#define MAX_COUNTRY_TRIPLETS 32 + +/* Headers */ +struct ieee80211_header { + __le16 frame_ctl; + __le16 duration_id; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; +} __packed; + +struct wl12xx_ie_header { + u8 id; + u8 len; +} __packed; + +/* IEs */ + +struct wl12xx_ie_ssid { + struct wl12xx_ie_header header; + char ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; + +struct wl12xx_ie_rates { + struct wl12xx_ie_header header; + u8 rates[MAX_SUPPORTED_RATES]; +} __packed; + +struct wl12xx_ie_ds_params { + struct wl12xx_ie_header header; + u8 channel; +} __packed; + +struct country_triplet { + u8 channel; + u8 num_channels; + u8 max_tx_power; +} __packed; + +struct wl12xx_ie_country { + struct wl12xx_ie_header header; + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; + struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; +} __packed; + + +/* Templates */ + +struct wl12xx_null_data_template { + struct ieee80211_header header; +} __packed; + +struct wl12xx_ps_poll_template { + __le16 fc; + __le16 aid; + u8 bssid[ETH_ALEN]; + u8 ta[ETH_ALEN]; +} __packed; + +struct wl12xx_arp_rsp_template { + /* not including ieee80211 header */ + + u8 llc_hdr[sizeof(rfc1042_header)]; + __be16 llc_type; + + struct arphdr arp_hdr; + u8 sender_hw[ETH_ALEN]; + __be32 sender_ip; + u8 target_hw[ETH_ALEN]; + __be32 target_ip; +} __packed; + +struct wl12xx_disconn_template { + struct ieee80211_header header; + __le16 disconn_reason; +} __packed; + +#endif diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h new file mode 100644 index 000000000..7f363fa56 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -0,0 +1,649 @@ +/* + * This file is part of wlcore + * + * Copyright (C) 2011 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WLCORE_H__ +#define __WLCORE_H__ + +#include + +#include "wlcore_i.h" +#include "event.h" +#include "boot.h" + +/* The maximum number of Tx descriptors in all chip families */ +#define WLCORE_MAX_TX_DESCRIPTORS 32 + +/* + * We always allocate this number of mac addresses. If we don't + * have enough allocated addresses, the LAA bit is used + */ +#define WLCORE_NUM_MAC_ADDRESSES 3 + +/* wl12xx/wl18xx maximum transmission power (in dBm) */ +#define WLCORE_MAX_TXPWR 25 + +/* forward declaration */ +struct wl1271_tx_hw_descr; +enum wl_rx_buf_align; +struct wl1271_rx_descriptor; + +struct wlcore_ops { + int (*setup)(struct wl1271 *wl); + int (*identify_chip)(struct wl1271 *wl); + int (*identify_fw)(struct wl1271 *wl); + int (*boot)(struct wl1271 *wl); + int (*plt_init)(struct wl1271 *wl); + int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr, + void *buf, size_t len); + int (*ack_event)(struct wl1271 *wl); + int (*wait_for_event)(struct wl1271 *wl, enum wlcore_wait_event event, + bool *timeout); + int (*process_mailbox_events)(struct wl1271 *wl); + u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks); + void (*set_tx_desc_blocks)(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + u32 blks, u32 spare_blks); + void (*set_tx_desc_data_len)(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb); + enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl, + u32 rx_desc); + int (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len); + u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data, + u32 data_len); + int (*tx_delayed_compl)(struct wl1271 *wl); + void (*tx_immediate_compl)(struct wl1271 *wl); + int (*hw_init)(struct wl1271 *wl); + int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status, + struct wl_fw_status *fw_status); + u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl, + struct wl12xx_vif *wlvif); + int (*get_pg_ver)(struct wl1271 *wl, s8 *ver); + int (*get_mac)(struct wl1271 *wl); + void (*set_tx_desc_csum)(struct wl1271 *wl, + struct wl1271_tx_hw_descr *desc, + struct sk_buff *skb); + void (*set_rx_csum)(struct wl1271 *wl, + struct wl1271_rx_descriptor *desc, + struct sk_buff *skb); + u32 (*ap_get_mimo_wide_rate_mask)(struct wl1271 *wl, + struct wl12xx_vif *wlvif); + int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir); + int (*handle_static_data)(struct wl1271 *wl, + struct wl1271_static_data *static_data); + int (*scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_scan_request *req); + int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); + void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem); + int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); + int (*channel_switch)(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + struct ieee80211_channel_switch *ch_switch); + u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); + void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif); + int (*set_peer_cap)(struct wl1271 *wl, + struct ieee80211_sta_ht_cap *ht_cap, + bool allow_ht_operation, + u32 rate_set, u8 hlid); + u32 (*convert_hwaddr)(struct wl1271 *wl, u32 hwaddr); + bool (*lnk_high_prio)(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk); + bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid, + struct wl1271_link *lnk); + int (*interrupt_notify)(struct wl1271 *wl, bool action); + int (*rx_ba_filter)(struct wl1271 *wl, bool action); + int (*ap_sleep)(struct wl1271 *wl); + int (*smart_config_start)(struct wl1271 *wl, u32 group_bitmap); + int (*smart_config_stop)(struct wl1271 *wl); + int (*smart_config_set_group_key)(struct wl1271 *wl, u16 group_id, + u8 key_len, u8 *key); + int (*set_cac)(struct wl1271 *wl, struct wl12xx_vif *wlvif, + bool start); + int (*dfs_master_restart)(struct wl1271 *wl, struct wl12xx_vif *wlvif); +}; + +enum wlcore_partitions { + PART_DOWN, + PART_WORK, + PART_BOOT, + PART_DRPW, + PART_TOP_PRCM_ELP_SOC, + PART_PHY_INIT, + + PART_TABLE_LEN, +}; + +struct wlcore_partition { + u32 size; + u32 start; +}; + +struct wlcore_partition_set { + struct wlcore_partition mem; + struct wlcore_partition reg; + struct wlcore_partition mem2; + struct wlcore_partition mem3; +}; + +enum wlcore_registers { + /* register addresses, used with partition translation */ + REG_ECPU_CONTROL, + REG_INTERRUPT_NO_CLEAR, + REG_INTERRUPT_ACK, + REG_COMMAND_MAILBOX_PTR, + REG_EVENT_MAILBOX_PTR, + REG_INTERRUPT_TRIG, + REG_INTERRUPT_MASK, + REG_PC_ON_RECOVERY, + REG_CHIP_ID_B, + REG_CMD_MBOX_ADDRESS, + + /* data access memory addresses, used with partition translation */ + REG_SLV_MEM_DATA, + REG_SLV_REG_DATA, + + /* raw data access memory addresses */ + REG_RAW_FW_STATUS_ADDR, + + REG_TABLE_LEN, +}; + +struct wl1271_stats { + void *fw_stats; + unsigned long fw_stats_update; + size_t fw_stats_len; + + unsigned int retry_count; + unsigned int excessive_retries; +}; + +struct wl1271 { + bool initialized; + struct ieee80211_hw *hw; + bool mac80211_registered; + + struct device *dev; + struct platform_device *pdev; + + void *if_priv; + + struct wl1271_if_operations *if_ops; + + int irq; + + int irq_flags; + + spinlock_t wl_lock; + + enum wlcore_state state; + enum wl12xx_fw_type fw_type; + bool plt; + enum plt_mode plt_mode; + u8 fem_manuf; + u8 last_vif_count; + struct mutex mutex; + + unsigned long flags; + + struct wlcore_partition_set curr_part; + + struct wl1271_chip chip; + + int cmd_box_addr; + + u8 *fw; + size_t fw_len; + void *nvs; + size_t nvs_len; + + s8 hw_pg_ver; + + /* address read from the fuse ROM */ + u32 fuse_oui_addr; + u32 fuse_nic_addr; + + /* we have up to 2 MAC addresses */ + struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES]; + int channel; + u8 system_hlid; + + unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)]; + unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; + unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; + unsigned long rate_policies_map[ + BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)]; + unsigned long klv_templates_map[ + BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)]; + + u8 session_ids[WLCORE_MAX_LINKS]; + + struct list_head wlvif_list; + + u8 sta_count; + u8 ap_count; + + struct wl1271_acx_mem_map *target_mem_map; + + /* Accounting for allocated / available TX blocks on HW */ + u32 tx_blocks_freed; + u32 tx_blocks_available; + u32 tx_allocated_blocks; + u32 tx_results_count; + + /* Accounting for allocated / available Tx packets in HW */ + u32 tx_pkts_freed[NUM_TX_QUEUES]; + u32 tx_allocated_pkts[NUM_TX_QUEUES]; + + /* Transmitted TX packets counter for chipset interface */ + u32 tx_packets_count; + + /* Time-offset between host and chipset clocks */ + s64 time_offset; + + /* Frames scheduled for transmission, not handled yet */ + int tx_queue_count[NUM_TX_QUEUES]; + unsigned long queue_stop_reasons[ + NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES]; + + /* Frames received, not handled yet by mac80211 */ + struct sk_buff_head deferred_rx_queue; + + /* Frames sent, not returned yet to mac80211 */ + struct sk_buff_head deferred_tx_queue; + + struct work_struct tx_work; + struct workqueue_struct *freezable_wq; + + /* Pending TX frames */ + unsigned long tx_frames_map[BITS_TO_LONGS(WLCORE_MAX_TX_DESCRIPTORS)]; + struct sk_buff *tx_frames[WLCORE_MAX_TX_DESCRIPTORS]; + int tx_frames_cnt; + + /* FW Rx counter */ + u32 rx_counter; + + /* Intermediate buffer, used for packet aggregation */ + u8 *aggr_buf; + u32 aggr_buf_size; + + /* Reusable dummy packet template */ + struct sk_buff *dummy_packet; + + /* Network stack work */ + struct work_struct netstack_work; + + /* FW log buffer */ + u8 *fwlog; + + /* Number of valid bytes in the FW log buffer */ + ssize_t fwlog_size; + + /* FW log end marker */ + u32 fwlog_end; + + /* FW memory block size */ + u32 fw_mem_block_size; + + /* Sysfs FW log entry readers wait queue */ + wait_queue_head_t fwlog_waitq; + + /* Hardware recovery work */ + struct work_struct recovery_work; + bool watchdog_recovery; + + /* Reg domain last configuration */ + u32 reg_ch_conf_last[2]; + /* Reg domain pending configuration */ + u32 reg_ch_conf_pending[2]; + + /* Pointer that holds DMA-friendly block for the mailbox */ + void *mbox; + + /* The mbox event mask */ + u32 event_mask; + /* events to unmask only when ap interface is up */ + u32 ap_event_mask; + + /* Mailbox pointers */ + u32 mbox_size; + u32 mbox_ptr[2]; + + /* Are we currently scanning */ + struct wl12xx_vif *scan_wlvif; + struct wl1271_scan scan; + struct delayed_work scan_complete_work; + + struct ieee80211_vif *roc_vif; + struct delayed_work roc_complete_work; + + struct wl12xx_vif *sched_vif; + + /* The current band */ + enum ieee80211_band band; + + struct completion *elp_compl; + struct delayed_work elp_work; + + /* in dBm */ + int power_level; + + struct wl1271_stats stats; + + __le32 *buffer_32; + u32 buffer_cmd; + u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; + + void *raw_fw_status; + struct wl_fw_status *fw_status; + struct wl1271_tx_hw_res_if *tx_res_if; + + /* Current chipset configuration */ + struct wlcore_conf conf; + + bool sg_enabled; + + bool enable_11a; + + int recovery_count; + + /* Most recently reported noise in dBm */ + s8 noise; + + /* bands supported by this instance of wl12xx */ + struct ieee80211_supported_band bands[WLCORE_NUM_BANDS]; + + /* + * wowlan trigger was configured during suspend. + * (currently, only "ANY" trigger is supported) + */ + bool wow_enabled; + bool irq_wake_enabled; + + /* + * AP-mode - links indexed by HLID. The global and broadcast links + * are always active. + */ + struct wl1271_link links[WLCORE_MAX_LINKS]; + + /* number of currently active links */ + int active_link_count; + + /* Fast/slow links bitmap according to FW */ + unsigned long fw_fast_lnk_map; + + /* AP-mode - a bitmap of links currently in PS mode according to FW */ + unsigned long ap_fw_ps_map; + + /* AP-mode - a bitmap of links currently in PS mode in mac80211 */ + unsigned long ap_ps_map; + + /* Quirks of specific hardware revisions */ + unsigned int quirks; + + /* number of currently active RX BA sessions */ + int ba_rx_session_count; + + /* Maximum number of supported RX BA sessions */ + int ba_rx_session_count_max; + + /* AP-mode - number of currently connected stations */ + int active_sta_count; + + /* Flag determining whether AP should broadcast OFDM-only rates */ + bool ofdm_only_ap; + + /* last wlvif we transmitted from */ + struct wl12xx_vif *last_wlvif; + + /* work to fire when Tx is stuck */ + struct delayed_work tx_watchdog_work; + + struct wlcore_ops *ops; + /* pointer to the lower driver partition table */ + const struct wlcore_partition_set *ptable; + /* pointer to the lower driver register table */ + const int *rtable; + /* name of the firmwares to load - for PLT, single role, multi-role */ + const char *plt_fw_name; + const char *sr_fw_name; + const char *mr_fw_name; + + u8 scan_templ_id_2_4; + u8 scan_templ_id_5; + u8 sched_scan_templ_id_2_4; + u8 sched_scan_templ_id_5; + u8 max_channels_5; + + /* per-chip-family private structure */ + void *priv; + + /* number of TX descriptors the HW supports. */ + u32 num_tx_desc; + /* number of RX descriptors the HW supports. */ + u32 num_rx_desc; + /* number of links the HW supports */ + u8 num_links; + /* max stations a single AP can support */ + u8 max_ap_stations; + + /* translate HW Tx rates to standard rate-indices */ + const u8 **band_rate_to_idx; + + /* size of table for HW rates that can be received from chip */ + u8 hw_tx_rate_tbl_size; + + /* this HW rate and below are considered HT rates for this chip */ + u8 hw_min_ht_rate; + + /* HW HT (11n) capabilities */ + struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS]; + + /* the current dfs region */ + enum nl80211_dfs_regions dfs_region; + + /* size of the private FW status data */ + size_t fw_status_len; + size_t fw_status_priv_len; + + /* RX Data filter rule state - enabled/disabled */ + unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)]; + + /* size of the private static data */ + size_t static_data_priv_len; + + /* the current channel type */ + enum nl80211_channel_type channel_type; + + /* mutex for protecting the tx_flush function */ + struct mutex flush_mutex; + + /* sleep auth value currently configured to FW */ + int sleep_auth; + + /* the number of allocated MAC addresses in this chip */ + int num_mac_addr; + + /* minimum FW version required for the driver to work in single-role */ + unsigned int min_sr_fw_ver[NUM_FW_VER]; + + /* minimum FW version required for the driver to work in multi-role */ + unsigned int min_mr_fw_ver[NUM_FW_VER]; + + struct completion nvs_loading_complete; + + /* interface combinations supported by the hw */ + const struct ieee80211_iface_combination *iface_combinations; + u8 n_iface_combinations; +}; + +int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); +int wlcore_remove(struct platform_device *pdev); +struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, + u32 mbox_size); +int wlcore_free_hw(struct wl1271 *wl); +int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); +void wlcore_regdomain_config(struct wl1271 *wl); +void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct wl1271_station *wl_sta, bool in_conn); + +static inline void +wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band, + struct ieee80211_sta_ht_cap *ht_cap) +{ + memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap)); +} + +/* Tell wlcore not to care about this element when checking the version */ +#define WLCORE_FW_VER_IGNORE -1 + +static inline void +wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, + unsigned int iftype_sr, unsigned int major_sr, + unsigned int subtype_sr, unsigned int minor_sr, + unsigned int iftype_mr, unsigned int major_mr, + unsigned int subtype_mr, unsigned int minor_mr) +{ + wl->min_sr_fw_ver[FW_VER_CHIP] = chip; + wl->min_sr_fw_ver[FW_VER_IF_TYPE] = iftype_sr; + wl->min_sr_fw_ver[FW_VER_MAJOR] = major_sr; + wl->min_sr_fw_ver[FW_VER_SUBTYPE] = subtype_sr; + wl->min_sr_fw_ver[FW_VER_MINOR] = minor_sr; + + wl->min_mr_fw_ver[FW_VER_CHIP] = chip; + wl->min_mr_fw_ver[FW_VER_IF_TYPE] = iftype_mr; + wl->min_mr_fw_ver[FW_VER_MAJOR] = major_mr; + wl->min_mr_fw_ver[FW_VER_SUBTYPE] = subtype_mr; + wl->min_mr_fw_ver[FW_VER_MINOR] = minor_mr; +} + +/* Firmware image load chunk size */ +#define CHUNK_SIZE 16384 + +/* Quirks */ + +/* Each RX/TX transaction requires an end-of-transaction transfer */ +#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) + +/* the first start_role(sta) sometimes doesn't work on wl12xx */ +#define WLCORE_QUIRK_START_STA_FAILS BIT(1) + +/* wl127x and SPI don't support SDIO block size alignment */ +#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) + +/* means aggregated Rx packets are aligned to a SDIO block */ +#define WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN BIT(3) + +/* Older firmwares did not implement the FW logger over bus feature */ +#define WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) + +/* Older firmwares use an old NVS format */ +#define WLCORE_QUIRK_LEGACY_NVS BIT(5) + +/* pad only the last frame in the aggregate buffer */ +#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7) + +/* extra header space is required for TKIP */ +#define WLCORE_QUIRK_TKIP_HEADER_SPACE BIT(8) + +/* Some firmwares not support sched scans while connected */ +#define WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN BIT(9) + +/* separate probe response templates for one-shot and sched scans */ +#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10) + +/* Firmware requires reg domain configuration for active calibration */ +#define WLCORE_QUIRK_REGDOMAIN_CONF BIT(11) + +/* The FW only support a zero session id for AP */ +#define WLCORE_QUIRK_AP_ZERO_SESSION_ID BIT(12) + +/* TODO: move all these common registers and values elsewhere */ +#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC + +/* ELP register commands */ +#define ELPCTRL_WAKE_UP 0x1 +#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 +#define ELPCTRL_SLEEP 0x0 +/* ELP WLAN_READY bit */ +#define ELPCTRL_WLAN_READY 0x2 + +/************************************************************************* + + Interrupt Trigger Register (Host -> WiLink) + +**************************************************************************/ + +/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ + +/* + * The host sets this bit to inform the Wlan + * FW that a TX packet is in the XFER + * Buffer #0. + */ +#define INTR_TRIG_TX_PROC0 BIT(2) + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #0. + */ +#define INTR_TRIG_RX_PROC0 BIT(3) + +#define INTR_TRIG_DEBUG_ACK BIT(4) + +#define INTR_TRIG_STATE_CHANGED BIT(5) + +/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #1. + */ +#define INTR_TRIG_RX_PROC1 BIT(17) + +/* + * The host sets this bit to inform the Wlan + * hardware that a TX packet is in the XFER + * Buffer #1. + */ +#define INTR_TRIG_TX_PROC1 BIT(18) + +#define ACX_SLV_SOFT_RESET_BIT BIT(1) +#define SOFT_RESET_MAX_TIME 1000000 +#define SOFT_RESET_STALL_TIME 1000 + +#define ECPU_CONTROL_HALT 0x00000101 + +#define WELP_ARM_COMMAND_VAL 0x4 + +#endif /* __WLCORE_H__ */ diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h new file mode 100644 index 000000000..ab3f04ea2 --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -0,0 +1,558 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WLCORE_I_H__ +#define __WLCORE_I_H__ + +#include +#include +#include +#include +#include +#include + +#include "conf.h" +#include "ini.h" + +/* + * wl127x and wl128x are using the same NVS file name. However, the + * ini parameters between them are different. The driver validates + * the correct NVS size in wl1271_boot_upload_nvs(). + */ +#define WL12XX_NVS_NAME "/*(DEBLOBBED)*/" + +#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) +#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) +#define WL1271_TX_SQN_POST_RECOVERY_PADDING 0xff +/* Use smaller padding for GEM, as some APs have issues when it's too big */ +#define WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM 0x20 + + +#define WL1271_CIPHER_SUITE_GEM 0x00147201 + +#define WL1271_BUSY_WORD_CNT 1 +#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32)) + +#define WL1271_ELP_HW_STATE_ASLEEP 0 +#define WL1271_ELP_HW_STATE_IRQ 1 + +#define WL1271_DEFAULT_BEACON_INT 100 +#define WL1271_DEFAULT_DTIM_PERIOD 1 + +#define WL12XX_MAX_ROLES 4 +#define WL12XX_INVALID_ROLE_ID 0xff +#define WL12XX_INVALID_LINK_ID 0xff + +/* + * max number of links allowed by all HWs. + * this is NOT the actual max links supported by the current hw. + */ +#define WLCORE_MAX_LINKS 16 + +/* the driver supports the 2.4Ghz and 5Ghz bands */ +#define WLCORE_NUM_BANDS 2 + +#define WL12XX_MAX_RATE_POLICIES 16 +#define WLCORE_MAX_KLV_TEMPLATES 4 + +/* Defined by FW as 0. Will not be freed or allocated. */ +#define WL12XX_SYSTEM_HLID 0 + +/* + * When in AP-mode, we allow (at least) this number of packets + * to be transmitted to FW for a STA in PS-mode. Only when packets are + * present in the FW buffers it will wake the sleeping STA. We want to put + * enough packets for the driver to transmit all of its buffered data before + * the STA goes to sleep again. But we don't want to take too much memory + * as it might hurt the throughput of active STAs. + */ +#define WL1271_PS_STA_MAX_PACKETS 2 + +#define WL1271_AP_BSS_INDEX 0 +#define WL1271_AP_DEF_BEACON_EXP 20 + +enum wlcore_state { + WLCORE_STATE_OFF, + WLCORE_STATE_RESTARTING, + WLCORE_STATE_ON, +}; + +enum wl12xx_fw_type { + WL12XX_FW_TYPE_NONE, + WL12XX_FW_TYPE_NORMAL, + WL12XX_FW_TYPE_MULTI, + WL12XX_FW_TYPE_PLT, +}; + +struct wl1271; + +enum { + FW_VER_CHIP, + FW_VER_IF_TYPE, + FW_VER_MAJOR, + FW_VER_SUBTYPE, + FW_VER_MINOR, + + NUM_FW_VER +}; + +struct wl1271_chip { + u32 id; + char fw_ver_str[ETHTOOL_FWVERS_LEN]; + unsigned int fw_ver[NUM_FW_VER]; + char phy_fw_ver_str[ETHTOOL_FWVERS_LEN]; +}; + +#define NUM_TX_QUEUES 4 + +struct wl_fw_status { + u32 intr; + u8 fw_rx_counter; + u8 drv_rx_counter; + u8 tx_results_counter; + __le32 *rx_pkt_descs; + + u32 fw_localtime; + + /* + * A bitmap (where each bit represents a single HLID) + * to indicate if the station is in PS mode. + */ + u32 link_ps_bitmap; + + /* + * A bitmap (where each bit represents a single HLID) to indicate + * if the station is in Fast mode + */ + u32 link_fast_bitmap; + + /* Cumulative counter of total released mem blocks since FW-reset */ + u32 total_released_blks; + + /* Size (in Memory Blocks) of TX pool */ + u32 tx_total; + + struct { + /* + * Cumulative counter of released packets per AC + * (length of the array is NUM_TX_QUEUES) + */ + u8 *tx_released_pkts; + + /* + * Cumulative counter of freed packets per HLID + * (length of the array is wl->num_links) + */ + u8 *tx_lnk_free_pkts; + + /* Cumulative counter of released Voice memory blocks */ + u8 tx_voice_released_blks; + + /* Tx rate of the last transmitted packet */ + u8 tx_last_rate; + } counters; + + u32 log_start_addr; + + /* Private status to be used by the lower drivers */ + void *priv; +}; + +#define WL1271_MAX_CHANNELS 64 +struct wl1271_scan { + struct cfg80211_scan_request *req; + unsigned long scanned_ch[BITS_TO_LONGS(WL1271_MAX_CHANNELS)]; + bool failed; + u8 state; + u8 ssid[IEEE80211_MAX_SSID_LEN+1]; + size_t ssid_len; +}; + +struct wl1271_if_operations { + int __must_check (*read)(struct device *child, int addr, void *buf, + size_t len, bool fixed); + int __must_check (*write)(struct device *child, int addr, void *buf, + size_t len, bool fixed); + void (*reset)(struct device *child); + void (*init)(struct device *child); + int (*power)(struct device *child, bool enable); + void (*set_block_size) (struct device *child, unsigned int blksz); +}; + +struct wlcore_platdev_data { + struct wl1271_if_operations *if_ops; + + bool ref_clock_xtal; /* specify whether the clock is XTAL or not */ + u32 ref_clock_freq; /* in Hertz */ + u32 tcxo_clock_freq; /* in Hertz, tcxo is always XTAL */ + bool pwr_in_suspend; +}; + +#define MAX_NUM_KEYS 14 +#define MAX_KEY_SIZE 32 + +struct wl1271_ap_key { + u8 id; + u8 key_type; + u8 key_size; + u8 key[MAX_KEY_SIZE]; + u8 hlid; + u32 tx_seq_32; + u16 tx_seq_16; +}; + +enum wl12xx_flags { + WL1271_FLAG_GPIO_POWER, + WL1271_FLAG_TX_QUEUE_STOPPED, + WL1271_FLAG_TX_PENDING, + WL1271_FLAG_IN_ELP, + WL1271_FLAG_ELP_REQUESTED, + WL1271_FLAG_IRQ_RUNNING, + WL1271_FLAG_FW_TX_BUSY, + WL1271_FLAG_DUMMY_PACKET_PENDING, + WL1271_FLAG_SUSPENDED, + WL1271_FLAG_PENDING_WORK, + WL1271_FLAG_SOFT_GEMINI, + WL1271_FLAG_RECOVERY_IN_PROGRESS, + WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, + WL1271_FLAG_INTENDED_FW_RECOVERY, + WL1271_FLAG_IO_FAILED, + WL1271_FLAG_REINIT_TX_WDOG, +}; + +enum wl12xx_vif_flags { + WLVIF_FLAG_INITIALIZED, + WLVIF_FLAG_STA_ASSOCIATED, + WLVIF_FLAG_STA_AUTHORIZED, + WLVIF_FLAG_IBSS_JOINED, + WLVIF_FLAG_AP_STARTED, + WLVIF_FLAG_IN_PS, + WLVIF_FLAG_STA_STATE_SENT, + WLVIF_FLAG_RX_STREAMING_STARTED, + WLVIF_FLAG_PSPOLL_FAILURE, + WLVIF_FLAG_CS_PROGRESS, + WLVIF_FLAG_AP_PROBE_RESP_SET, + WLVIF_FLAG_IN_USE, + WLVIF_FLAG_ACTIVE, + WLVIF_FLAG_BEACON_DISABLED, +}; + +struct wl12xx_vif; + +struct wl1271_link { + /* AP-mode - TX queue per AC in link */ + struct sk_buff_head tx_queue[NUM_TX_QUEUES]; + + /* accounting for allocated / freed packets in FW */ + u8 allocated_pkts; + u8 prev_freed_pkts; + + u8 addr[ETH_ALEN]; + + /* bitmap of TIDs where RX BA sessions are active for this link */ + u8 ba_bitmap; + + /* The wlvif this link belongs to. Might be null for global links */ + struct wl12xx_vif *wlvif; + + /* + * total freed FW packets on the link - used for tracking the + * AES/TKIP PN across recoveries. Re-initialized each time + * from the wl1271_station structure. + */ + u64 total_freed_pkts; +}; + +#define WL1271_MAX_RX_FILTERS 5 +#define WL1271_RX_FILTER_MAX_FIELDS 8 + +#define WL1271_RX_FILTER_ETH_HEADER_SIZE 14 +#define WL1271_RX_FILTER_MAX_FIELDS_SIZE 95 +#define RX_FILTER_FIELD_OVERHEAD \ + (sizeof(struct wl12xx_rx_filter_field) - sizeof(u8 *)) +#define WL1271_RX_FILTER_MAX_PATTERN_SIZE \ + (WL1271_RX_FILTER_MAX_FIELDS_SIZE - RX_FILTER_FIELD_OVERHEAD) + +#define WL1271_RX_FILTER_FLAG_MASK BIT(0) +#define WL1271_RX_FILTER_FLAG_IP_HEADER 0 +#define WL1271_RX_FILTER_FLAG_ETHERNET_HEADER BIT(1) + +enum rx_filter_action { + FILTER_DROP = 0, + FILTER_SIGNAL = 1, + FILTER_FW_HANDLE = 2 +}; + +enum plt_mode { + PLT_OFF = 0, + PLT_ON = 1, + PLT_FEM_DETECT = 2, + PLT_CHIP_AWAKE = 3 +}; + +struct wl12xx_rx_filter_field { + __le16 offset; + u8 len; + u8 flags; + u8 *pattern; +} __packed; + +struct wl12xx_rx_filter { + u8 action; + int num_fields; + struct wl12xx_rx_filter_field fields[WL1271_RX_FILTER_MAX_FIELDS]; +}; + +struct wl1271_station { + u8 hlid; + bool in_connection; + + /* + * total freed FW packets on the link to the STA - used for tracking the + * AES/TKIP PN across recoveries. Re-initialized each time from the + * wl1271_station structure. + * Used in both AP and STA mode. + */ + u64 total_freed_pkts; +}; + +struct wl12xx_vif { + struct wl1271 *wl; + struct list_head list; + unsigned long flags; + u8 bss_type; + u8 p2p; /* we are using p2p role */ + u8 role_id; + + /* sta/ibss specific */ + u8 dev_role_id; + u8 dev_hlid; + + union { + struct { + u8 hlid; + + u8 basic_rate_idx; + u8 ap_rate_idx; + u8 p2p_rate_idx; + + u8 klv_template_id; + + bool qos; + /* channel type we started the STA role with */ + enum nl80211_channel_type role_chan_type; + } sta; + struct { + u8 global_hlid; + u8 bcast_hlid; + + /* HLIDs bitmap of associated stations */ + unsigned long sta_hlid_map[BITS_TO_LONGS( + WLCORE_MAX_LINKS)]; + + /* recoreded keys - set here before AP startup */ + struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS]; + + u8 mgmt_rate_idx; + u8 bcast_rate_idx; + u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT]; + } ap; + }; + + /* the hlid of the last transmitted skb */ + int last_tx_hlid; + + /* counters of packets per AC, across all links in the vif */ + int tx_queue_count[NUM_TX_QUEUES]; + + unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)]; + + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; + + /* The current band */ + enum ieee80211_band band; + int channel; + enum nl80211_channel_type channel_type; + + u32 bitrate_masks[WLCORE_NUM_BANDS]; + u32 basic_rate_set; + + /* + * currently configured rate set: + * bits 0-15 - 802.11abg rates + * bits 16-23 - 802.11n MCS index mask + * support only 1 stream, thus only 8 bits for the MCS rates (0-7). + */ + u32 basic_rate; + u32 rate_set; + + /* probe-req template for the current AP */ + struct sk_buff *probereq; + + /* Beaconing interval (needed for ad-hoc) */ + u32 beacon_int; + + /* Default key (for WEP) */ + u32 default_key; + + /* Our association ID */ + u16 aid; + + /* retry counter for PSM entries */ + u8 psm_entry_retry; + + /* in dBm */ + int power_level; + + int rssi_thold; + int last_rssi_event; + + /* save the current encryption type for auto-arp config */ + u8 encryption_type; + __be32 ip_addr; + + /* RX BA constraint value */ + bool ba_support; + bool ba_allowed; + + bool wmm_enabled; + + bool radar_enabled; + + /* Rx Streaming */ + struct work_struct rx_streaming_enable_work; + struct work_struct rx_streaming_disable_work; + struct timer_list rx_streaming_timer; + + struct delayed_work channel_switch_work; + struct delayed_work connection_loss_work; + + /* number of in connection stations */ + int inconn_count; + + /* + * This vif's queues are mapped to mac80211 HW queues as: + * VO - hw_queue_base + * VI - hw_queue_base + 1 + * BE - hw_queue_base + 2 + * BK - hw_queue_base + 3 + */ + int hw_queue_base; + + /* do we have a pending auth reply? (and ROC) */ + bool ap_pending_auth_reply; + + /* time when we sent the pending auth reply */ + unsigned long pending_auth_reply_time; + + /* work for canceling ROC after pending auth reply */ + struct delayed_work pending_auth_complete_work; + + /* update rate conrol */ + enum ieee80211_sta_rx_bandwidth rc_update_bw; + struct work_struct rc_update_work; + + /* + * total freed FW packets on the link. + * For STA this holds the PN of the link to the AP. + * For AP this holds the PN of the broadcast link. + */ + u64 total_freed_pkts; + + /* + * This struct must be last! + * data that has to be saved acrossed reconfigs (e.g. recovery) + * should be declared in this struct. + */ + struct { + u8 persistent[0]; + }; +}; + +static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif) +{ + WARN_ON(!vif); + return (struct wl12xx_vif *)vif->drv_priv; +} + +static inline +struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif) +{ + return container_of((void *)wlvif, struct ieee80211_vif, drv_priv); +} + +#define wl12xx_for_each_wlvif(wl, wlvif) \ + list_for_each_entry(wlvif, &wl->wlvif_list, list) + +#define wl12xx_for_each_wlvif_continue(wl, wlvif) \ + list_for_each_entry_continue(wlvif, &wl->wlvif_list, list) + +#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \ + wl12xx_for_each_wlvif(wl, wlvif) \ + if (wlvif->bss_type == _bss_type) + +#define wl12xx_for_each_wlvif_sta(wl, wlvif) \ + wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS) + +#define wl12xx_for_each_wlvif_ap(wl, wlvif) \ + wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS) + +int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode); +int wl1271_plt_stop(struct wl1271 *wl); +int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif); +void wl12xx_queue_recovery_work(struct wl1271 *wl); +size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); +int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, + u16 offset, u8 flags, + const u8 *pattern, u8 len); +void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter); +struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void); +int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter); +void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter, + u8 *buf); + +#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ + +#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */ +#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */ + +#define WL1271_DEFAULT_POWER_LEVEL 0 + +#define WL1271_TX_QUEUE_LOW_WATERMARK 32 +#define WL1271_TX_QUEUE_HIGH_WATERMARK 256 + +#define WL1271_DEFERRED_QUEUE_LIMIT 64 + +/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power + on in case is has been shut down shortly before */ +#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ +#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */ + +/* Macros to handle wl1271.sta_rate_set */ +#define HW_BG_RATES_MASK 0xffff +#define HW_HT_RATES_OFFSET 16 +#define HW_MIMO_RATES_OFFSET 24 + +#endif /* __WLCORE_I_H__ */ -- cgit v1.2.3-54-g00ecf