From d635711daa98be86d4c7fd01499c34f566b54ccb Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Fri, 10 Jun 2016 05:30:17 -0300 Subject: Linux-libre 4.6.2-gnu --- drivers/net/wireless/intel/iwlwifi/Kconfig | 12 + drivers/net/wireless/intel/iwlwifi/dvm/led.c | 5 +- drivers/net/wireless/intel/iwlwifi/dvm/lib.c | 20 +- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 13 +- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 8 +- drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 5 +- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 7 +- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 6 + drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 59 ++ .../wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h | 31 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 59 +- drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 84 ++ .../net/wireless/intel/iwlwifi/iwl-fw-error-dump.h | 3 + drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 42 + drivers/net/wireless/intel/iwlwifi/iwl-fw.h | 13 + drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 4 + drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 148 ++-- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 5 +- drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 2 + drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 21 +- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 88 ++- .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 75 +- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 120 ++- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 99 ++- .../net/wireless/intel/iwlwifi/mvm/fw-api-sta.h | 69 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 157 +++- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 12 +- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 57 +- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 45 +- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 109 ++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 139 +++- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 9 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 228 ++++-- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 131 ++-- drivers/net/wireless/intel/iwlwifi/mvm/quota.c | 16 + drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 28 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 63 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 171 +++- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 45 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 84 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 25 +- .../net/wireless/intel/iwlwifi/mvm/time-event.c | 15 +- .../net/wireless/intel/iwlwifi/mvm/time-event.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 495 +++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 315 +++++++- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 34 +- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 213 ++++- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 142 +++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 858 +++++++++++++++------ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 428 +++++++--- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 87 ++- 54 files changed, 4024 insertions(+), 895 deletions(-) (limited to 'drivers/net/wireless/intel/iwlwifi') diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 7438fbeef..16c4f3834 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -98,6 +98,18 @@ config IWLWIFI_UAPSD If unsure, say N. +config IWLWIFI_PCIE_RTPM + bool "Enable runtime power management mode for PCIe devices" + depends on IWLMVM && PM + default false + help + Say Y here to enable runtime power management for PCIe + devices. If enabled, the device will go into low power mode + when idle for a short period of time, allowing for improved + power saving during runtime. + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 1aabb5ec0..1bbd17ada 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev, { struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); unsigned long on = 0; + unsigned long off = 0; if (brightness > 0) on = IWL_LED_SOLID; + else + off = IWL_LED_SOLID; - iwl_led_cmd(priv, on, 0); + iwl_led_cmd(priv, on, off); } static int iwl_led_blink_set(struct led_classdev *led_cdev, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 4841be2aa..179946926 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -943,14 +943,16 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (sta) { + u64 pn64; + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + pn64 = atomic64_read(&key->tx_pn); + tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); + tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); @@ -996,19 +998,13 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 29ea1c670..c63ea7957 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_trans_d3_suspend(priv->trans, false); + iwl_trans_d3_suspend(priv->trans, false, true); goto out; @@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) /* we'll clear ctx->vif during iwlagn_prepare_restart() */ vif = ctx->vif; - ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true); if (ret) goto out_unlock; @@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret = -EINVAL; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index f62c2d727..856281279 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1652,10 +1652,10 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.line, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, - table.hw_ver, 0, table.brd_ver); + table.blink2, table.ilink1, table.ilink2, + table.bcon_time, table.gp1, table.gp2, + table.gp3, table.ucode_ver, table.hw_ver, + 0, table.brd_ver); IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(priv, "0x%08X | uPc\n", table.pc); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 978e53da1..786a919fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 20 -#define IWL3168_UCODE_API_MAX 20 +#define IWL7265D_UCODE_API_MAX 21 +#define IWL3168_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 5c7ba6fc0..16846cae3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 20 -#define IWL8265_UCODE_API_MAX 20 +#define IWL8000_UCODE_API_MAX 21 +#define IWL8265_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 13 @@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl4165_2ac_cfg = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index c2261ffbf..7b34387d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 20 +#define IWL9000_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL9000_UCODE_API_OK 13 @@ -138,7 +138,10 @@ static const struct iwl_tt_params iwl9000_tt_params = { .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ .thermal_params = &iwl9000_tt_params, \ - .apmg_not_supported = true + .apmg_not_supported = true, \ + .mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f99048135..3e4d346be 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff { * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section + * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response @@ -311,6 +312,8 @@ struct iwl_pwr_tx_backoff { * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM + * @mq_rx_supported: multi-queue rx support + * @vht_mu_mimo_supported: VHT MU-MIMO support * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -343,6 +346,7 @@ struct iwl_cfg { const bool host_interrupt_operation_mode; bool high_temp; u8 nvm_hw_section_num; + bool mac_addr_from_csr; bool lp_xtal_workaround; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; bool no_power_up_nic_in_init; @@ -362,6 +366,8 @@ struct iwl_cfg { const u32 smem_len; const struct iwl_tt_params *thermal_params; bool apmg_not_supported; + bool mq_rx_supported; + bool vht_mu_mimo_supported; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 163b21bc2..b978f6cae 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -549,4 +550,62 @@ enum dtd_diode_reg { DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ }; +/***************************************************************************** + * MSIX related registers * + *****************************************************************************/ + +#define CSR_MSIX_BASE (0x2000) +#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800) +#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804) +#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808) +#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C) +#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810) +#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880) +#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890) +#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000) +#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause)) +#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause)) + +#define MSIX_FH_INT_CAUSES_Q(q) (q) + +/* + * Causes for the FH register interrupts + */ +enum msix_fh_int_causes { + MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), + MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), + MSIX_FH_INT_CAUSES_S2D = BIT(19), + MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), +}; + +/* + * Causes for the HW register interrupts + */ +enum msix_hw_int_causes { + MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), + MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), + MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), + MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), + MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), + MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25), + MSIX_HW_INT_CAUSES_REG_SCD = BIT(26), + MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27), + MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29), + MSIX_HW_INT_CAUSES_REG_HAP = BIT(30), +}; + +#define MSIX_MIN_INTERRUPT_VECTORS 2 +#define MSIX_AUTO_CLEAR_CAUSE 0 +#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) + +/***************************************************************************** + * HW address related registers * + *****************************************************************************/ + +#define CSR_ADDR_BASE (0x380) +#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE) +#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4) +#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8) +#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC) + #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 22786d7dc..f02e2c89a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -2,6 +2,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -73,12 +74,12 @@ TRACE_EVENT(iwlwifi_dev_rx, TP_ARGS(dev, trans, pkt, len), TP_STRUCT__entry( DEV_ENTRY - __field(u8, cmd) + __field(u16, cmd) __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) ), TP_fast_assign( DEV_ASSIGN; - __entry->cmd = pkt->hdr.cmd; + __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); memcpy(__get_dynamic_array(rxbuf), pkt, iwl_rx_trace_len(trans, pkt, len)); ), @@ -121,13 +122,12 @@ TRACE_EVENT(iwlwifi_dev_tx, TRACE_EVENT(iwlwifi_dev_ucode_error, TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, - u32 data1, u32 data2, u32 line, u32 blink1, - u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, - u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver, - u32 brd_ver), + u32 data1, u32 data2, u32 line, u32 blink2, u32 ilink1, + u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 rev_type, + u32 major, u32 minor, u32 hw_ver, u32 brd_ver), TP_ARGS(dev, desc, tsf_low, data1, data2, line, - blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, - gp3, major, minor, hw_ver, brd_ver), + blink2, ilink1, ilink2, bcon_time, gp1, gp2, + rev_type, major, minor, hw_ver, brd_ver), TP_STRUCT__entry( DEV_ENTRY __field(u32, desc) @@ -135,14 +135,13 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __field(u32, data1) __field(u32, data2) __field(u32, line) - __field(u32, blink1) __field(u32, blink2) __field(u32, ilink1) __field(u32, ilink2) __field(u32, bcon_time) __field(u32, gp1) __field(u32, gp2) - __field(u32, gp3) + __field(u32, rev_type) __field(u32, major) __field(u32, minor) __field(u32, hw_ver) @@ -155,29 +154,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __entry->data1 = data1; __entry->data2 = data2; __entry->line = line; - __entry->blink1 = blink1; __entry->blink2 = blink2; __entry->ilink1 = ilink1; __entry->ilink2 = ilink2; __entry->bcon_time = bcon_time; __entry->gp1 = gp1; __entry->gp2 = gp2; - __entry->gp3 = gp3; + __entry->rev_type = rev_type; __entry->major = major; __entry->minor = minor; __entry->hw_ver = hw_ver; __entry->brd_ver = brd_ver; ), TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " - "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X " + "blink2 0x%05X ilink 0x%05X 0x%05X " + "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X " "minor 0x%08X hw 0x%08X brd 0x%08X", __get_str(dev), __entry->desc, __entry->tsf_low, - __entry->data1, - __entry->data2, __entry->line, __entry->blink1, + __entry->data1, __entry->data2, __entry->line, __entry->blink2, __entry->ilink1, __entry->ilink2, __entry->bcon_time, __entry->gp1, __entry->gp2, - __entry->gp3, __entry->major, __entry->minor, + __entry->rev_type, __entry->major, __entry->minor, __entry->hw_ver, __entry->brd_ver) ); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 9527ae08c..7ce381ba3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -236,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) snprintf(drv->firmware_name, sizeof(drv->firmware_name), "/*(DEBLOBBED)*/", name_pre, tag); - /* - * Starting 8000B - FW name format has changed. This overwrites the - * previous name and uses the new format. - */ - if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); - - if (rev_step != 'A') - snprintf(drv->firmware_name, - sizeof(drv->firmware_name), "/*(DEBLOBBED)*/", - name_pre, rev_step, tag); - } - IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) ? "EXPERIMENTAL " : "", @@ -374,15 +363,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } -static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, - const u32 len) +static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, + const u32 len) { struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; struct iwl_gscan_capabilities *capa = &fw->gscan_capa; - if (len < sizeof(*fw_capa)) - return -EINVAL; - capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); capa->max_ap_cache_per_scan = @@ -395,7 +381,15 @@ static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, le32_to_cpu(fw_capa->max_significant_change_aps); capa->max_bssid_history_entries = le32_to_cpu(fw_capa->max_bssid_history_entries); - return 0; + capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids); + capa->max_number_epno_networks = + le32_to_cpu(fw_capa->max_number_epno_networks); + capa->max_number_epno_networks_by_ssid = + le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid); + capa->max_number_of_white_listed_ssid = + le32_to_cpu(fw_capa->max_number_of_white_listed_ssid); + capa->max_number_of_black_listed_ssid = + le32_to_cpu(fw_capa->max_number_of_black_listed_ssid); } /* @@ -1023,8 +1017,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: - if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) - goto invalid_tlv_len; + /* + * Don't return an error in case of a shorter tlv_len + * to enable loading of FW that has an old format + * of GSCAN capabilities TLV. + */ + if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities)) + break; + + iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); gscan_capa = true; break; default: @@ -1033,7 +1034,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } } - if (usniffer_req && !*usniffer_images) { + if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) && + usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; @@ -1047,13 +1049,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, /* * If ucode advertises that it supports GSCAN but GSCAN - * capabilities TLV is not present, warn and continue without GSCAN. + * capabilities TLV is not present, or if it has an old format, + * warn and continue without GSCAN. */ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - WARN(!gscan_capa, - "GSCAN is supported but capabilities TLV is unavailable\n")) + !gscan_capa) { + IWL_DEBUG_INFO(drv, + "GSCAN is supported but capabilities TLV is unavailable\n"); __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, capa->_capa); + } return 0; @@ -1718,3 +1723,7 @@ MODULE_PARM_DESC(fw_monitor, module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay, uint, S_IRUGO); MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); + +module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, + S_IRUGO); +MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 5cc6be927..582008a66 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -312,6 +314,81 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 #define FH_MEM_TB_MAX_LENGTH (0x00020000) +/* 9000 rx series registers */ + +#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ +#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_FRBDCB_WIDX 0xA08080 +#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) +/* Read index table */ +#define RFH_Q0_FRBDCB_RIDX 0xA080C0 +#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) +/* Used list table */ +#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ +#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_URBDCB_WIDX 0xA08180 +#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) +#define RFH_Q0_URBDCB_VAID 0xA081C0 +#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) +/* stts */ +#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ +#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) + +#define RFH_Q0_ORB_WPTR_LSB 0xA08280 +#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) +#define RFH_RBDBUF_RBD0_LSB 0xA08300 +#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) + +/* DMA configuration */ +#define RFH_RXF_DMA_CFG 0xA09820 +/* RB size */ +#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ +#define RFH_RXF_DMA_RB_SIZE_POS 16 +#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) +/* RB Circular Buffer size:defines the table sizes in RBD units */ +#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define RFH_RXF_DMA_RBDCB_SIZE_POS 20 +#define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ +#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 +#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) +#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ +#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ +#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ +#define RFH_DMA_EN_ENABLE_VAL BIT(31) + +#define RFH_RXF_RXQ_ACTIVE 0xA0980C + +#define RFH_GEN_CFG 0xA09800 +#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) +#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) +#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */ +#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 +#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8 + +#define DEFAULT_RXQ_NUM 0 + +/* end of 9000 rx series registers */ + /* TFDB Area - TFDs buffer table */ #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) @@ -434,6 +511,13 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) +#define MQ_RX_TABLE_SIZE 512 +#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) +#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1) +#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \ + IWL_MAX_RX_HW_QUEUES * \ + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) + #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index a5aaf6853..8425e1a58 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a * threshold. * @FW_DBG_TDLS: trigger log collection upon TDLS related events. + * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when + * the firmware sends a tx reply. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_BA, FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, + FW_DBG_TRIGGER_TX_STATUS, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 84f8aeb92..15ec4e290 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -297,10 +297,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching + * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics + * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to @@ -313,7 +315,15 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted + * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon + * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 + * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill + * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature + * thresholds reporting + * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command + * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in + * regular image. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -330,10 +340,12 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, @@ -341,8 +353,14 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, + IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, + IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, + IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, + IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, + IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ @@ -747,6 +765,19 @@ struct iwl_fw_dbg_trigger_tdls { u8 reserved[4]; } __packed; +/** + * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response + * status. + * @statuses: the list of statuses to trigger the collection on + */ +struct iwl_fw_dbg_trigger_tx_status { + struct tx_status { + u8 status; + u8 reserved[3]; + } __packed statuses[16]; + __le32 reserved[2]; +} __packed; + /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id @@ -778,6 +809,12 @@ struct iwl_fw_dbg_conf_tlv { * change APs. * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can * hold. + * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. + * @max_number_epno_networks: max number of epno entries. + * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is + * specified. + * @max_number_of_white_listed_ssid: max number of white listed SSIDs. + * @max_number_of_black_listed_ssid: max number of black listed SSIDs. */ struct iwl_fw_gscan_capabilities { __le32 max_scan_cache_size; @@ -788,6 +825,11 @@ struct iwl_fw_gscan_capabilities { __le32 max_hotlist_aps; __le32 max_significant_change_aps; __le32 max_bssid_history_entries; + __le32 max_hotlist_ssids; + __le32 max_number_epno_networks; + __le32 max_number_epno_networks_by_ssid; + __le32 max_number_of_white_listed_ssid; + __le32 max_number_of_black_listed_ssid; } __packed; #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 85d6d6d55..2942571c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -205,6 +207,12 @@ struct iwl_fw_cscheme_list { * change APs. * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can * hold. + * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. + * @max_number_epno_networks: max number of epno entries. + * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is + * specified. + * @max_number_of_white_listed_ssid: max number of white listed SSIDs. + * @max_number_of_black_listed_ssid: max number of black listed SSIDs. */ struct iwl_gscan_capabilities { u32 max_scan_cache_size; @@ -215,6 +223,11 @@ struct iwl_gscan_capabilities { u32 max_hotlist_aps; u32 max_significant_change_aps; u32 max_bssid_history_entries; + u32 max_hotlist_ssids; + u32 max_number_epno_networks; + u32 max_number_epno_networks_by_ssid; + u32 max_number_of_white_listed_ssid; + u32 max_number_of_black_listed_ssid; }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index fd42f63f5..d1a5dd160 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -108,11 +108,14 @@ enum iwl_amsdu_size { * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 + * @nvm_file: specifies a external NVM file + * @uapsd_disable: disable U-APSD, default = 1 * @d0i3_disable: disable d0i3, default = 1, * @d0i3_entry_delay: time to wait after no refs are taken before * entering D0i3 (in msecs) * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor + * @disable_11ac: disable VHT capabilities, default = false. */ struct iwl_mod_params { int sw_crypto; @@ -133,6 +136,7 @@ struct iwl_mod_params { unsigned int d0i3_entry_delay; bool lar_disable; bool fw_monitor; + bool disable_11ac; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 7b89bfc8c..93a689583 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -69,6 +70,9 @@ #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" +#include "iwl-prph.h" +#include "iwl-io.h" +#include "iwl-csr.h" /* NVM offsets (in words) definitions */ enum wkp_nvm_offsets { @@ -366,6 +370,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (cfg->vht_mu_mimo_supported) + vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; @@ -449,7 +456,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IEEE80211_BAND_5GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, tx_chains, rx_chains); - if (data->sku_cap_11ac_enable) + if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, tx_chains, rx_chains); @@ -519,27 +526,41 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg); } -static void iwl_set_hw_address(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *nvm_sec) +static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) { - const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR); - - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; + const u8 *hw_addr; + + hw_addr = (const u8 *)&mac_addr0; + dest[0] = hw_addr[3]; + dest[1] = hw_addr[2]; + dest[2] = hw_addr[1]; + dest[3] = hw_addr[0]; + + hw_addr = (const u8 *)&mac_addr1; + dest[4] = hw_addr[1]; + dest[5] = hw_addr[0]; } -static void iwl_set_hw_address_family_8000(struct device *dev, +static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) +{ + __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); + __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + + /* If OEM did not fuse address - get it from OTP */ + if (!mac_addr0 && !mac_addr1) { + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + } + + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); +} + +static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, - const __le16 *nvm_hw, - u32 mac_addr0, u32 mac_addr1) + const __le16 *nvm_hw) { const u8 *hw_addr; @@ -565,44 +586,68 @@ static void iwl_set_hw_address_family_8000(struct device *dev, memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; - IWL_ERR_DEV(dev, - "mac address from nvm override section is not valid\n"); + IWL_ERR(trans, + "mac address from nvm override section is not valid\n"); } if (nvm_hw) { - /* read the MAC address from HW resisters */ - hw_addr = (const u8 *)&mac_addr0; - data->hw_addr[0] = hw_addr[3]; - data->hw_addr[1] = hw_addr[2]; - data->hw_addr[2] = hw_addr[1]; - data->hw_addr[3] = hw_addr[0]; + /* read the mac address from WFMP registers */ + __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, + WFMP_MAC_ADDR_0)); + __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, + WFMP_MAC_ADDR_1)); - hw_addr = (const u8 *)&mac_addr1; - data->hw_addr[4] = hw_addr[1]; - data->hw_addr[5] = hw_addr[0]; - - if (!is_valid_ether_addr(data->hw_addr)) - IWL_ERR_DEV(dev, - "mac address from hw section is not valid\n"); + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); return; } - IWL_ERR_DEV(dev, "mac address is not found\n"); + IWL_ERR(trans, "mac address is not found\n"); +} + +static int iwl_set_hw_address(struct iwl_trans *trans, + const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_hw, + const __le16 *mac_override) +{ + if (cfg->mac_addr_from_csr) { + iwl_set_hw_address_from_csr(trans, data); + } else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); + + /* The byte order is little endian 16 bit, meaning 214365 */ + data->hw_addr[0] = hw_addr[1]; + data->hw_addr[1] = hw_addr[0]; + data->hw_addr[2] = hw_addr[3]; + data->hw_addr[3] = hw_addr[2]; + data->hw_addr[4] = hw_addr[5]; + data->hw_addr[5] = hw_addr[4]; + } else { + iwl_set_hw_address_family_8000(trans, cfg, data, + mac_override, nvm_hw); + } + + if (!is_valid_ether_addr(data->hw_addr)) { + IWL_ERR(trans, "no valid mac address was found\n"); + return -EINVAL; + } + + return 0; } struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1) + u8 tx_chains, u8 rx_chains, bool lar_fw_supported) { + struct device *dev = trans->dev; struct iwl_nvm_data *data; - u32 sku; - u32 radio_cfg; + bool lar_enabled; + u32 sku, radio_cfg; u16 lar_config; + const __le16 *ch_section; if (cfg->device_family != IWL_DEVICE_FAMILY_8000) data = kzalloc(sizeof(*data) + @@ -641,21 +686,16 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { /* Checking for required sections */ if (!nvm_calib) { - IWL_ERR_DEV(dev, - "Can't parse empty Calib NVM sections\n"); + IWL_ERR(trans, + "Can't parse empty Calib NVM sections\n"); kfree(data); return NULL; } /* in family 8000 Xtal calibration values moved to OTP */ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); - } - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - iwl_set_hw_address(cfg, data, nvm_hw); - - iwl_init_sbands(dev, cfg, data, nvm_sw, - tx_chains, rx_chains, lar_fw_supported); + lar_enabled = true; + ch_section = nvm_sw; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_FAMILY_8000_OLD : @@ -664,16 +704,18 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED_FAMILY_8000); + lar_enabled = data->lar_enabled; + ch_section = regulatory; + } - /* MAC address in family 8000 */ - iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, - nvm_hw, mac_addr0, mac_addr1); - - iwl_init_sbands(dev, cfg, data, regulatory, - tx_chains, rx_chains, - lar_fw_supported && data->lar_enabled); + /* If no valid mac address was found - bail out */ + if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { + kfree(data); + return NULL; } + iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, + lar_fw_supported && lar_enabled); data->calib_version = 255; return data; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 92466ee72..d704d52aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -74,12 +74,11 @@ * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1); + u8 tx_chains, u8 rx_chains, bool lar_fw_supported); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 5bde23a47..c46e596e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -404,4 +404,6 @@ enum { LMPM_PAGE_PASS_NOTIF_POS = BIT(20), }; +#define UREG_CHICK (0xA05C00) +#define UREG_CHICK_MSIX_ENABLE BIT(25) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 82fb3a97a..91d74b3f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -506,7 +506,7 @@ struct iwl_trans_config { bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; - + u32 sdio_adma_addr; }; @@ -618,9 +618,9 @@ struct iwl_trans_ops { void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans, bool low_power); - void (*d3_suspend)(struct iwl_trans *trans, bool test); + void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test); + bool test, bool reset); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -736,6 +736,11 @@ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_D0I3, }; +/* Max time to wait for trans to become idle/non-idle on d0i3 + * enter/exit (in msecs). + */ +#define IWL_TRANS_IDLE_TIMEOUT 2000 + /** * struct iwl_trans - transport common data * @@ -831,6 +836,7 @@ struct iwl_trans { enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode; + bool suspending; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -920,22 +926,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) _iwl_trans_stop_device(trans, true); } -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { might_sleep(); if (trans->ops->d3_suspend) - trans->ops->d3_suspend(trans, test); + trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { might_sleep(); if (!trans->ops->d3_resume) return 0; - return trans->ops->d3_resume(trans, status, test); + return trans->ops->d3_resume(trans, status, test, reset); } static inline void iwl_trans_ref(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index b00c03fcd..4b560e441 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -6,7 +6,8 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +33,8 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,6 +75,7 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ +#define IWL_MVM_P2P_UAPSD_STANDALONE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) @@ -107,6 +110,7 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 +#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d3e21d95c..c1a313149 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -249,16 +251,19 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_TKIP: if (sta) { + u64 pn64; + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + pn64 = atomic64_read(&key->tx_pn); + tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); + tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), + p1k); iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); memcpy(data->tkip->mic_keys.tx, @@ -811,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - iwl_trans_stop_device(mvm->trans); - + iwl_mvm_stop_device(mvm); /* * Set the HW restart bit -- this is mostly true as we're * going to load new firmware and reprogram that, though @@ -1023,14 +1027,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct ieee80211_sta *ap_sta) { int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; - ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); - if (ret) - return ret; + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + return ret; + } if (!iwlwifi_mod_params.sw_crypto) { /* @@ -1072,10 +1080,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + } /* rfkill release can be either for wowlan or netdetect */ if (wowlan->rfkill_release) @@ -1151,6 +1163,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, }; int ret; int len __maybe_unused; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* @@ -1236,7 +1250,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_trans_d3_suspend(mvm->trans, test); + iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1299,7 +1313,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); - iwl_trans_d3_suspend(trans, false); + iwl_trans_d3_suspend(trans, false, false); return 0; } @@ -1601,7 +1615,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); - ieee80211_set_key_tx_seq(key, &seq); + atomic64_set(&key->tx_pn, + (u64)seq.tkip.iv16 | + ((u64)seq.tkip.iv32 << 16)); break; } @@ -2041,9 +2057,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; - int ret; + int ret = 1; enum iwl_d3_status d3_status; bool keep = false; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | + CMD_WAKE_UP_TRANS; mutex_lock(&mvm->mutex); @@ -2052,7 +2073,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) if (IS_ERR_OR_NULL(vif)) goto err; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; @@ -2095,17 +2116,28 @@ out_iterate: iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: - /* return 1 to reconfigure the device */ + if (unified_image && !ret) { + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); + if (!ret) /* D3 ended successfully - no need to reset device */ + return 0; + } + + /* + * Reconfigure the device in one of the following cases: + * 1. We are not using a unified image + * 2. We are using a unified image but had an error while exiting D3 + */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); - - /* We always return 1, which causes mac80211 to do a reconfig - * with IEEE80211_RECONFIG_TYPE_RESTART. This type of - * reconfig calls iwl_mvm_restart_complete(), where we unref - * the IWL_MVM_REF_UCODE_DOWN, so we need to take the - * reference here. + /* + * When switching images we return 1, which causes mac80211 + * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. + * This type of reconfig calls iwl_mvm_restart_complete(), + * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need + * to take the reference here. */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + return 1; } @@ -2122,7 +2154,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) enum iwl_d3_status d3_status; struct iwl_trans *trans = mvm->trans; - iwl_trans_d3_resume(trans, &d3_status, false); + iwl_trans_d3_resume(trans, &d3_status, false, false); /* * make sure to clear D0I3_DEFER_WAKEUP before diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 9e0d46368..14004456b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + bool prev; u8 value; int ret; @@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return -EINVAL; mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value); + prev = iwl_mvm_vif_low_latency(mvmvif); + mvmvif->low_latency_dbgfs = value; + iwl_mvm_update_low_latency(mvm, vif, prev); mutex_unlock(&mvm->mutex); return count; @@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[2]; + char buf[30] = {}; + int len; - buf[0] = mvmvif->low_latency ? '1' : '0'; - buf[1] = '\n'; - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); + len = snprintf(buf, sizeof(buf) - 1, + "traffic=%d\ndbgfs=%d\nvcmd=%d\n", + mvmvif->low_latency_traffic, + mvmvif->low_latency_dbgfs, + mvmvif->low_latency_vcmd); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, @@ -1363,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); } +static void iwl_dbgfs_quota_check(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int *ret = data; + + if (mvmvif->dbgfs_quota_min) + *ret = -EINVAL; +} + +static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + if (value > 95) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmvif->dbgfs_quota_min = 0; + ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_dbgfs_quota_check, &ret); + if (ret == 0) { + mvmvif->dbgfs_quota_min = value; + iwl_mvm_update_quotas(mvm, false, NULL); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_quota_min_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[10]; + int len; + + len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1386,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1423,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 90500e2d1..a43b3921c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -63,6 +64,7 @@ * *****************************************************************************/ #include +#include #include "mvm.h" #include "fw-dbg.h" @@ -71,6 +73,44 @@ #include "debugfs.h" #include "iwl-fw-error-dump.h" +static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, budget; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); + mutex_unlock(&mvm->mutex); + + if (budget < 0) + return budget; + + pos = scnprintf(buf, sizeof(buf), "%d\n", budget); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -261,17 +301,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, { struct iwl_mvm *mvm = file->private_data; char buf[16]; - int pos, temp; + int pos, ret; + s32 temp; if (!mvm->ucode_loaded) return -EIO; mutex_lock(&mvm->mutex); - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); - if (temp < 0) - return temp; + if (ret) + return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); @@ -942,6 +983,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, return count; } +static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + int ret, i, num_repeats, nbytes = count / 2; + + ret = hex2bin(cmd.indirection_table, buf, nbytes); + if (ret) + return ret; + + /* + * The input is the redirection table, partial or full. + * Repeat the pattern if needed. + * For example, input of 01020F will be repeated 42 times, + * indirecting RSS hash results to queues 1, 2, 15 (skipping + * queues 3 - 14). + */ + num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; + for (i = 1; i < num_repeats; i++) + memcpy(&cmd.indirection_table[i * nbytes], + cmd.indirection_table, nbytes); + /* handle cut in the middle pattern for the last places */ + memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, + ARRAY_SIZE(cmd.indirection_table) % nbytes); + + memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -983,7 +1065,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) return -EOPNOTSUPP; - ret = kstrtouint(buf, 0, &rec_mode); + ret = kstrtoint(buf, 0, &rec_mode); if (ret) return ret; @@ -1037,6 +1119,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, return count; } +static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + unsigned int max_amsdu_len; + int ret; + + ret = kstrtouint(buf, 0, &max_amsdu_len); + + if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454) + return -EINVAL; + mvm->max_amsdu_len = max_amsdu_len; + + return count; +} + #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) #ifdef CONFIG_IWLWIFI_BCAST_FILTERING static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, @@ -1433,6 +1531,8 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ +MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); +MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); @@ -1454,6 +1554,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, + (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); @@ -1479,6 +1582,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, S_IWUSR | S_IRUSR); MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); @@ -1496,13 +1601,18 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); if (!debugfs_create_bool("enable_scan_iteration_notif", S_IRUSR | S_IWUSR, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled)) goto err; + if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) + goto err; #ifdef CONFIG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index 62b9a0a96..eec52c57f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -251,6 +251,7 @@ enum iwl_wowlan_flags { ENABLE_L3_FILTERING = BIT(1), ENABLE_NBNS_FILTERING = BIT(2), ENABLE_DHCP_FILTERING = BIT(3), + ENABLE_STORE_BEACON = BIT(4), }; struct iwl_wowlan_config_cmd { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index fb6d341d6..7a16e55df 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -264,9 +264,8 @@ enum iwl_rx_mpdu_mac_flags2 { }; enum iwl_rx_mpdu_amsdu_info { - IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x3f, - IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x40, - /* 0x80 bit reserved for now */ + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; enum iwl_rx_l3l4_flags { @@ -287,16 +286,13 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), - /* TODO - verify this is the correct value */ IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */ IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */ IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), @@ -350,11 +346,11 @@ struct iwl_rx_mpdu_desc { /* DW8 */ __le32 filter_match; /* DW9 */ - __le32 gp2_on_air_rise; - /* DW10 */ __le32 rate_n_flags; + /* DW10 */ + u8 energy_a, energy_b, channel, reserved; /* DW11 */ - u8 energy_a, energy_b, energy_c, channel; + __le32 gp2_on_air_rise; /* DW12 & DW13 */ __le64 tsf_on_air_rise; } __packed; @@ -365,4 +361,85 @@ struct iwl_frame_release { __le16 nssn; }; +enum iwl_rss_hash_func_en { + IWL_RSS_HASH_TYPE_IPV4_TCP, + IWL_RSS_HASH_TYPE_IPV4_UDP, + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, + IWL_RSS_HASH_TYPE_IPV6_TCP, + IWL_RSS_HASH_TYPE_IPV6_UDP, + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, +}; + +#define IWL_RSS_HASH_KEY_CNT 10 +#define IWL_RSS_INDIRECTION_TABLE_SIZE 128 +#define IWL_RSS_ENABLE 1 + +/** + * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration + * + * @flags: 1 - enable, 0 - disable + * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en + * @secret_key: 320 bit input of random key configuration from driver + * @indirection_table: indirection table + */ +struct iwl_rss_config_cmd { + __le32 flags; + u8 hash_mask; + u8 reserved[3]; + __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; + u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; +} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ + +#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128 +#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 +#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf + +/** + * struct iwl_rxq_sync_cmd - RXQ notification trigger + * + * @flags: flags of the notification. bit 0:3 are the sender queue + * @rxq_mask: rx queues to send the notification on + * @count: number of bytes in payload, should be DWORD aligned + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_cmd { + __le32 flags; + __le32 rxq_mask; + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** + * struct iwl_rxq_sync_notification - Notification triggered by RXQ + * sync command + * + * @count: number of bytes in payload + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_notification { + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** +* Internal message identifier +* +* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA +*/ +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_NOTIF_DEL_BA, +}; + +/** +* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent +* in &iwl_rxq_sync_cmd. Should be DWORD aligned. +* +* @type: value from &iwl_mvm_rxq_notif_type +* @data: payload +*/ +struct iwl_mvm_internal_rxq_notif { + u32 type; + u8 data[]; +} __packed; + #endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 6fca4fb1d..90d911394 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -253,6 +255,68 @@ struct iwl_mvm_keyinfo { __le64 hw_tkip_mic_tx_key; } __packed; +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_MASK 0xFF00 + +/** + * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: 1: modify existing, 0: add new station + * @awake_acs: + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @station_flags: look at %iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd_v7 { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) @@ -282,6 +346,7 @@ struct iwl_mvm_keyinfo { * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station + * @rx_ba_window: aggregation window size * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx @@ -310,7 +375,9 @@ struct iwl_mvm_add_sta_cmd { __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; -} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + __le16 rx_ba_window; + __le16 reserved; +} __packed; /* ADD_STA_CMD_API_S_VER_8 */ /** * struct iwl_mvm_add_sta_key_cmd - add/modify sta key diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 82049bb13..4a0fc47c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -119,6 +119,8 @@ enum { SCAN_ABORT_UMAC = 0xe, SCAN_COMPLETE_UMAC = 0xf, + BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, + /* station table */ ADD_STA_KEY = 0x17, ADD_STA = 0x18, @@ -213,6 +215,8 @@ enum { MFUART_LOAD_NOTIFICATION = 0xb1, + RSS_CONFIG_CMD = 0xb3, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, FRAME_RELEASE = 0xc3, @@ -277,14 +281,30 @@ enum { */ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + CTDP_CONFIG_CMD = 0x03, + TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_data_path_subcmd_ids { + UPDATE_MU_GROUPS_CMD = 0x1, + TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + MU_GROUP_MGMT_NOTIF = 0xFE, + RX_QUEUES_NOTIFICATION = 0xFF, +}; + +enum iwl_prot_offload_subcmd_ids { + STORED_BEACON_NTF = 0xFF, +}; + /* command groups */ enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, PHY_OPS_GROUP = 0x4, + DATA_PATH_GROUP = 0x5, + PROT_OFFLOAD_GROUP = 0xb, }; /** @@ -1271,6 +1291,26 @@ struct iwl_fw_bcast_filter { struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; } __packed; /* BCAST_FILTER_S_VER_1 */ +#define BA_WINDOW_STREAMS_MAX 16 +#define BA_WINDOW_STATUS_TID_MSK 0x000F +#define BA_WINDOW_STATUS_STA_ID_POS 4 +#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 +#define BA_WINDOW_STATUS_VALID_MSK BIT(9) + +/** + * struct iwl_ba_window_status_notif - reordering window's status notification + * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] + * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid + * @start_seq_num: the start sequence number of the bitmap + * @mpdu_rx_count: the number of received MPDUs since entering D0i3 + */ +struct iwl_ba_window_status_notif { + __le64 bitmap[BA_WINDOW_STREAMS_MAX]; + __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; + __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; + __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; +} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ + /** * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. * @default_discard: default action for this mac (discard (1) / pass (0)). @@ -1668,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd { } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ /** - * iwl_dts_measurement_notif - notification received with the measurements + * struct iwl_dts_measurement_notif_v1 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage */ -struct iwl_dts_measurement_notif { +struct iwl_dts_measurement_notif_v1 { __le32 temp; __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ + +/** + * struct iwl_dts_measurement_notif_v2 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + * @threshold_idx: the trip index that was crossed + */ +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ + +/** + * struct ct_kill_notif - CT-kill entry notification + * + * @temperature: the current temperature in celsius + * @reserved: reserved + */ +struct ct_kill_notif { + __le16 temperature; + __le16 reserved; +} __packed; /* GRP_PHY_CT_KILL_NTF */ + +/** +* enum ctdp_cmd_operation - CTDP command operations +* @CTDP_CMD_OPERATION_START: update the current budget +* @CTDP_CMD_OPERATION_STOP: stop ctdp +* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget +*/ +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 0x1, + CTDP_CMD_OPERATION_STOP = 0x2, + CTDP_CMD_OPERATION_REPORT = 0x4, +};/* CTDP_CMD_OPERATION_TYPE_E */ + +/** + * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * + * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @budget: the budget in milliwatt + * @window_size: defined in API but not used + */ +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +} __packed; + +#define IWL_MAX_DTS_TRIPS 8 + +/** + * struct iwl_temp_report_ths_cmd - set temperature thresholds + * + * @num_temps: number of temperature thresholds passed + * @thresholds: array with the thresholds to be configured + */ +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[IWL_MAX_DTS_TRIPS]; +} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ /*********************************** * TDLS API @@ -1851,4 +1953,53 @@ struct iwl_shared_mem_cfg { __le32 page_buff_size; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ +/** + * VHT MU-MIMO group configuration + * + * @membership_status: a bitmap of MU groups + * @user_position:the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ + +/** + * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification + * + * @membership_status: a bitmap of MU groups + * @user_position: the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ + +#define MAX_STORED_BEACON_SIZE 600 + +/** + * Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @phy_flags: general phy flags: band, modulation, etc. + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 phy_flags; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 0813f8184..6938cd37b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) bool monitor_dump_only = false; int i; + if (!IWL_MVM_COLLECT_FW_ERR_DUMP && + !mvm->trans->dbg_dest_tlv) + return; + lockdep_assert_held(&mvm->mutex); /* there's no point in fw dump if the bus is dead */ @@ -522,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; /* Make room for fw's virtual image pages, if it exists */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size && + mvm->fw_paging_db[0].fw_paging_block) file_len += mvm->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -639,9 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) } /* Dump fw's virtual image */ - if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { - u32 i; - + if (mvm->fw->img[mvm->cur_ucode].paging_mem_size && + mvm->fw_paging_db[0].fw_paging_block) { for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0ccc697fe..09d895faf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } +static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) +{ + int i; + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + + for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) + cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; + memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + + return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); +} + void iwl_free_fw_paging(struct iwl_mvm *mvm) { int i; @@ -125,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm) __free_pages(mvm->fw_paging_db[i].fw_paging_block, get_order(mvm->fw_paging_db[i].fw_paging_size)); + mvm->fw_paging_db[i].fw_paging_block = NULL; } kfree(mvm->trans->paging_download_buf); mvm->trans->paging_download_buf = NULL; + mvm->trans->paging_db = NULL; memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); } @@ -520,7 +541,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_sf_region st_fwrd_space; if (ucode_type == IWL_UCODE_REGULAR && - iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) + iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && + !(fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm, ucode_type); @@ -896,6 +919,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + /* Init RSS configuration */ + if (iwl_mvm_has_new_rx_api(mvm)) { + ret = iwl_send_rss_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", + ret); + goto error; + } + } + /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); @@ -925,8 +958,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } +#ifdef CONFIG_THERMAL + if (iwl_mvm_is_tt_in_fw(mvm)) { + /* in order to give the responsibility of ct-kill and + * TX backoff to FW we need to send empty temperature reporting + * cmd during init time + */ + iwl_mvm_send_temp_report_ths_cmd(mvm); + } else { + /* Initialize tx backoffs to the minimal possible */ + iwl_mvm_tt_tx_backoff(mvm, 0); + } + + /* TODO: read the budget from BIOS / Platform NVM */ + if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + mvm->cooling_dev.cur_state); +#else /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); +#endif WARN_ON(iwl_mvm_config_ltr(mvm)); @@ -962,7 +1013,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); return ret; } @@ -1006,7 +1057,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return 0; error: - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index bf1e5eb5d..e885db346 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count) { + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { @@ -1462,3 +1462,42 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, iwl_mvm_beacon_loss_iterator, mb); } + +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct ieee80211_rx_status rx_status; + struct sk_buff *skb; + u32 size = le32_to_cpu(sb->byte_count); + + if (size == 0) + return; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + /* update rx_status according to the notification's metadata */ + memset(&rx_status, 0, sizeof(rx_status)); + rx_status.mactime = le64_to_cpu(sb->tsf); + /* TSF as indicated by the firmware is at INA time */ + rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; + rx_status.device_timestamp = le32_to_cpu(sb->system_time); + rx_status.band = + (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), + rx_status.band); + + /* copy the data */ + memcpy(skb_put(skb, size), sb->data, size); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + /* pass it as regular rx to mac80211 */ + ieee80211_rx_napi(mvm->hw, skb, NULL); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1486f33a7..a50f4df7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -69,7 +70,6 @@ #include #include #include -#include #include #include #include @@ -85,7 +85,6 @@ #include "testmode.h" #include "iwl-fw-error-dump.h" #include "iwl-prph.h" -#include "iwl-csr.h" #include "iwl-nvm-parse.h" #include "fw-dbg.h" @@ -837,13 +836,17 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; bool tx_agg_ref = false; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; + bool amsdu = params->amsdu; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); @@ -884,10 +887,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); break; case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu(mvm->cfg)) { @@ -904,7 +907,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); + ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, + buf_size, amsdu); break; default: WARN_ON_ONCE(1); @@ -966,7 +970,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) */ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); mvm->scan_status = 0; mvm->ps_disabled = false; @@ -1135,7 +1139,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ flush_work(&mvm->roc_done_wk); - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ @@ -1168,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) mvm->scan_uid_status[i] = 0; } } - - mvm->ucode_loaded = false; } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) @@ -1761,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) } #endif +static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mu_group_mgmt_cmd cmd = {}; + + memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, + WLAN_MEMBERSHIP_LEN); + memcpy(cmd.user_position, vif->bss_conf.mu_group.position, + WLAN_USER_POSITION_LEN); + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + UPDATE_MU_GROUPS_CMD), + 0, sizeof(cmd), &cmd); +} + +static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + if (vif->mu_mimo_owner) { + struct iwl_mu_group_mgmt_notif *notif = _data; + + /* + * MU-MIMO Group Id action frame is little endian. We treat + * the data received from firmware as if it came from the + * action frame, so no conversion is needed. + */ + ieee80211_update_mu_groups(vif, + (u8 *)¬if->membership_status, + (u8 *)¬if->user_position); + } +} + +void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_mu_mimo_iface_iterator, notif); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1869,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, vif->addr); } + /* + * The firmware tracks the MU-MIMO group on its own. + * However, on HW restart we should restore this data. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { + ret = iwl_mvm_update_mu_groups(mvm, vif); + if (ret) + IWL_ERR(mvm, + "failed to update VHT MU_MIMO groups\n"); + } + iwl_mvm_recalc_multicast(mvm); iwl_mvm_configure_bcast_filter(mvm); @@ -1895,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } - if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | + /* + * Send power command on every beacon change, + * because we may have not enabled beacon abort yet. + */ + BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); @@ -2082,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } - } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, @@ -2275,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; + if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) { + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + return; + } + if (iwlwifi_mod_params.uapsd_disable) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; @@ -2489,10 +2556,8 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS, - 200 + vif->bss_conf.beacon_int); - u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS, - 100 + vif->bss_conf.beacon_int); + u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; + u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (WARN_ON_ONCE(vif->bss_conf.assoc)) return; @@ -2584,7 +2649,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; @@ -2623,8 +2688,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. + * CMAC in AP/IBSS modes must be done in software. */ - ret = 0; + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + ret = -EOPNOTSUPP; + else + ret = 0; key->hw_key_idx = STA_KEY_IDX_INVALID; break; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ff7c6df9f..9abbc93e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -71,6 +73,10 @@ #include #include +#ifdef CONFIG_THERMAL +#include +#endif + #include "iwl-op-mode.h" #include "iwl-trans.h" #include "iwl-notif-wait.h" @@ -346,8 +352,9 @@ struct iwl_mvm_vif_bf_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency: indicates that this interface is in low-latency mode - * (VMACLowLatencyMode) + * @low_latency_traffic: indicates low latency traffic was detected + * @low_latency_dbgfs: low latency mode set from debugfs + * @low_latency_vcmd: low latency mode set from vendor command * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -375,7 +382,7 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - bool low_latency; + bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -432,6 +439,7 @@ struct iwl_mvm_vif { struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; + int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; @@ -485,6 +493,12 @@ enum iwl_mvm_scan_type { IWL_SCAN_TYPE_FRAGMENTED, }; +enum iwl_mvm_sched_scan_pass_all_states { + SCHED_SCAN_PASS_ALL_DISABLED, + SCHED_SCAN_PASS_ALL_ENABLED, + SCHED_SCAN_PASS_ALL_FOUND, +}; + /** * struct iwl_nvm_section - describes an NVM section in memory. * @@ -515,6 +529,30 @@ struct iwl_mvm_tt_mgmt { bool throttle; }; +#ifdef CONFIG_THERMAL +/** + *struct iwl_mvm_thermal_device - thermal zone related data + * @temp_trips: temperature thresholds for report + * @fw_trips_index: keep indexes to original array - temp_trips + * @tzone: thermal zone device data +*/ +struct iwl_mvm_thermal_device { + s16 temp_trips[IWL_MAX_DTS_TRIPS]; + u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; + struct thermal_zone_device *tzone; +}; + +/* + * struct iwl_mvm_cooling_device + * @cur_state: current state + * @cdev: struct thermal cooling device + */ +struct iwl_mvm_cooling_device { + u32 cur_state; + struct thermal_cooling_device *cdev; +}; +#endif + #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { @@ -645,6 +683,7 @@ struct iwl_mvm { atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; + u32 secret_key[IWL_RSS_HASH_KEY_CNT]; /* configured by mac80211 */ u32 rts_threshold; @@ -654,6 +693,7 @@ struct iwl_mvm { void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; enum iwl_mvm_scan_type scan_type; + enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; @@ -794,6 +834,11 @@ struct iwl_mvm { /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; +#ifdef CONFIG_THERMAL + struct iwl_mvm_thermal_device tz_device; + struct iwl_mvm_cooling_device cooling_dev; +#endif + s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the @@ -816,6 +861,7 @@ struct iwl_mvm { /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ + unsigned int max_amsdu_len; /* used for debugfs only */ struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; @@ -856,6 +902,12 @@ struct iwl_mvm { u32 ciphers[6]; struct iwl_mvm_tof_data tof_data; + + /* + * Drop beacons from other APs in AP mode when there are no connected + * clients. + */ + bool drop_bcn_ap_mode; }; /* Extract MVM priv from op_mode and _hw */ @@ -934,8 +986,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) { - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); + /* Make sure DQA isn't allowed in driver until feature is complete */ + return false && fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT); } static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) @@ -1005,10 +1058,40 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) IWL_MVM_BT_COEX_MPLUT; } +static inline +bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && + IWL_MVM_P2P_UAPSD_STANDALONE; +} + static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { - /* firmware flag isn't defined yet */ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); +} + +static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) +{ +#ifdef CONFIG_THERMAL + /* these two TLV are redundant since the responsibility to CT-kill by + * FW happens only after we send at least one command of + * temperature THs report. + */ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); +#else /* CONFIG_THERMAL */ return false; +#endif /* CONFIG_THERMAL */ +} + +static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; @@ -1143,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue); +int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, + const u8 *data, u32 count); +void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, @@ -1184,6 +1271,12 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, @@ -1420,8 +1513,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - - return mvmvif->low_latency; + return mvmvif->low_latency_dbgfs || + mvmvif->low_latency_traffic || + mvmvif->low_latency_vcmd; } /* hw scheduler queue config */ @@ -1459,32 +1553,29 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); } -static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, int fifo, - int sta_id, int tid, int frame_limit, - u16 ssn, unsigned int wdg_timeout) +static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = sta_id, - .tid = tid, - .frame_limit = frame_limit, - .aggregate = true, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); + mvm->ucode_loaded = false; + iwl_trans_stop_device(mvm->trans); } +/* Stop/start all mac queues in a given bitmap */ +void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); +void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); -void iwl_mvm_tt_exit(struct iwl_mvm *mvm); +void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); +void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); -int iwl_mvm_get_temp(struct iwl_mvm *mvm); +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); +void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index a891598c8..9a8cf2f41 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -300,7 +300,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; - u32 mac_addr0, mac_addr1; /* Checking for required sections */ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { @@ -336,10 +335,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) if (WARN_ON(!mvm->cfg)) return NULL; - /* read the mac address from WFMP registers */ - mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); - mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; @@ -352,10 +347,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled, mac_addr0, mac_addr1); + lar_enabled); } #define MAX_NVM_FILE_LEN 16384 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 89ea70dee..d27839909 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -33,6 +33,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -204,70 +205,107 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } +/** + * enum iwl_rx_handler_context context for Rx handler + * @RX_HANDLER_SYNC : this means that it will be called in the Rx path + * which can't acquire mvm->mutex. + * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex + * (and only in this case!), it should be set as ASYNC. In that case, + * it will be called from a worker with mvm->mutex held. + * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the + * mutex itself, it will be called from a worker without mvm->mutex held. + */ +enum iwl_rx_handler_context { + RX_HANDLER_SYNC, + RX_HANDLER_ASYNC_LOCKED, + RX_HANDLER_ASYNC_UNLOCKED, +}; + +/** + * struct iwl_rx_handlers handler for FW notification + * @cmd_id: command id + * @context: see &iwl_rx_handler_context + * @fn: the function is called when notification is received + */ struct iwl_rx_handlers { u16 cmd_id; - bool async; + enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; -#define RX_HANDLER(_cmd_id, _fn, _async) \ - { .cmd_id = _cmd_id , .fn = _fn , .async = _async } -#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ - { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } +#define RX_HANDLER(_cmd_id, _fn, _context) \ + { .cmd_id = _cmd_id, .fn = _fn, .context = _context } +#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \ + { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context } /* * Handlers for fw notifications * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME * This list should be in order of frequency for performance purposes. * - * The handler can be SYNC - this means that it will be called in the Rx path - * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and - * only in this case!), it should be set as ASYNC. In that case, it will be - * called from a worker with mvm->mutex held. + * The handler can be one from three contexts, see &iwl_rx_handler_context */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { - RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), - RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), - - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), - RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true), - RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), + RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), + RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), + + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, - iwl_mvm_rx_ant_coupling_notif, true), + iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED), + + RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, + iwl_mvm_window_status_notif, RX_HANDLER_SYNC), - RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), - RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, + RX_HANDLER_SYNC), + RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, + RX_HANDLER_ASYNC_LOCKED), - RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), + RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_ITERATION_COMPLETE, - iwl_mvm_rx_lmac_scan_iter_complete_notif, false), + iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, - iwl_mvm_rx_lmac_scan_complete_notif, true), + iwl_mvm_rx_lmac_scan_complete_notif, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, - false), + RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, - true), + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, - iwl_mvm_rx_umac_scan_iter_complete_notif, false), + iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC), - RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, + RX_HANDLER_SYNC), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, - false), + RX_HANDLER_SYNC), - RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, - iwl_mvm_power_uapsd_misbehaving_ap_notif, false), - RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), + iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC), + RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, - iwl_mvm_temp_notif, true), + iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, + iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, - true), - RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), - + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, + RX_HANDLER_SYNC), + RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, + iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, + iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -289,6 +327,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(TOF_CMD), HCMD_NAME(TOF_NOTIFICATION), + HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), @@ -344,6 +383,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), + HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), @@ -383,16 +423,37 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(CTDP_CONFIG_CMD), + HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), + HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { + HCMD_NAME(UPDATE_MU_GROUPS_CMD), + HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(MU_GROUP_MGMT_NOTIF), + HCMD_NAME(RX_QUEUES_NOTIFICATION), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { + HCMD_NAME(STORED_BEACON_NTF), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), + [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), + [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), }; - /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); @@ -463,8 +524,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; + trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); } else { op_mode->ops = &iwl_mvm_ops; + trans->rx_mpdu_cmd_hdr_size = + sizeof(struct iwl_rx_mpdu_res_start); if (WARN_ON(trans->num_rx_queues > 1)) goto out_free; @@ -481,6 +545,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; mvm->cur_ucode = IWL_UCODE_INIT; + mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -555,7 +620,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, @@ -575,9 +639,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->cfg->name, mvm->trans->hw_rev); - min_backoff = calc_min_backoff(trans, cfg); - iwl_mvm_tt_initialize(mvm, min_backoff); - if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else @@ -607,7 +668,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) - iwl_trans_stop_device(trans); + iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); /* returns 0 if successful, 1 if success but in rfkill */ @@ -630,22 +691,31 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (err) goto out_free; + min_backoff = calc_min_backoff(trans, cfg); + iwl_mvm_thermal_initialize(mvm, min_backoff); + err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); if (err) goto out_unregister; memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); - /* rpm starts with a taken reference, we can release it now */ - iwl_trans_unref(mvm->trans); + /* The transport always starts with a taken reference, we can + * release it now if d0i3 is supported */ + if (iwl_mvm_is_d0i3_supported(mvm)) + iwl_trans_unref(mvm->trans); iwl_mvm_tof_init(mvm); + /* init RSS hash key */ + get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); + return op_mode; out_unregister: ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); + iwl_mvm_thermal_exit(mvm); out_free: flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); @@ -661,9 +731,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; + /* If d0i3 is supported, we have released the reference that + * the transport started with, so we should take it back now + * that we are leaving. + */ + if (iwl_mvm_is_d0i3_supported(mvm)) + iwl_trans_ref(mvm->trans); + iwl_mvm_leds_exit(mvm); - iwl_mvm_tt_exit(mvm); + iwl_mvm_thermal_exit(mvm); ieee80211_unregister_hw(mvm->hw); @@ -692,6 +769,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; + enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; @@ -718,7 +796,6 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) INIT_LIST_HEAD(&local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ - mutex_lock(&mvm->mutex); /* * Sync with Rx path with a lock. Remove all the entries from this list, @@ -729,12 +806,15 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { + if (entry->context == RX_HANDLER_ASYNC_LOCKED) + mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); + if (entry->context == RX_HANDLER_ASYNC_LOCKED) + mutex_unlock(&mvm->mutex); kfree(entry); } - mutex_unlock(&mvm->mutex); } static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, @@ -791,7 +871,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (!rx_h->async) { + if (rx_h->context == RX_HANDLER_SYNC) { rx_h->fn(mvm, rxb); return; } @@ -805,6 +885,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, entry->rxb._offset = rxb->_offset; entry->rxb._rx_page_order = rxb->_rx_page_order; entry->fn = rx_h->fn; + entry->context = rx_h->context; spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); @@ -841,28 +922,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_phy_cmd_mq(mvm, rxb); + else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP && + pkt->hdr.cmd == RX_QUEUES_NOTIFICATION)) + iwl_mvm_rx_queue_notif(mvm, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) +void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; int q; - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; - spin_unlock_bh(&mvm->queue_info_lock); - if (WARN_ON_ONCE(!mq)) return; for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) already stopped\n", - queue, q); + "mac80211 %d already stopped\n", q); continue; } @@ -882,24 +959,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, iwl_trans_block_txq_ptrs(mvm->trans, false); } -static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); unsigned long mq; - int q; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; + mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_stop_mac_queues(mvm, mq); +} + +void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) +{ + int q; + if (WARN_ON_ONCE(!mq)) return; for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) still stopped\n", - queue, q); + "mac80211 %d still stopped\n", q); continue; } @@ -907,6 +989,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) } } +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + unsigned long mq; + + spin_lock_bh(&mvm->queue_info_lock); + mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_mvm_start_mac_queues(mvm, mq); +} + void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) { if (state) @@ -1196,7 +1290,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; cmd->offloading_tid = iter_data->offloading_tid; cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | - ENABLE_DHCP_FILTERING; + ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; /* * The d0i3 uCode takes care of the nonqos counters, * so configure only the qos seq ones. @@ -1217,8 +1311,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) struct iwl_wowlan_config_cmd wowlan_config_cmd = { .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE | - IWL_WOWLAN_WAKEUP_BCN_FILTERING), + IWL_WOWLAN_WAKEUP_LINK_CHANGE), }; struct iwl_d3_manager_config d3_cfg_cmd = { .min_sleep_time = cpu_to_le32(1000), @@ -1268,6 +1361,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) /* configure wowlan configuration only if needed */ if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + /* wake on beacons only if beacon storing isn't supported */ + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); + iwl_mvm_wowlan_config_key_params(mvm, d0i3_iter_data.connected_vif, true, flags); @@ -1508,6 +1607,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, rxb, queue); + else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION && + pkt->hdr.group_id == DATA_PATH_GROUP)) + iwl_mvm_rx_queue_notif(mvm, rxb, queue); else iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 9de159f1e..f313910cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } +static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *is_p2p_standalone = _data; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + *is_p2p_standalone = false; + break; + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + *is_p2p_standalone = false; + break; + + default: + break; + } +} + static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, ETH_ALEN)) return false; - if (vif->p2p && - !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) - return false; /* * Avoid using uAPSD if P2P client is associated to GO that uses * opportunistic power save. This is due to current FW limitation. @@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, if (iwl_mvm_phy_ctx_count(mvm) >= 2) return false; + if (vif->p2p) { + /* Allow U-APSD only if p2p is stand alone */ + bool is_p2p_standalone = true; + + if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) + return false; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_p2p_standalone_iterator, + &is_p2p_standalone); + + if (!is_p2p_standalone) + return false; + } + return true; } @@ -544,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_power_vifs { struct iwl_mvm *mvm; - struct ieee80211_vif *bf_vif; struct ieee80211_vif *bss_vif; struct ieee80211_vif *p2p_vif; struct ieee80211_vif *ap_vif; @@ -617,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) power_iterator->bss_active = true; - - if (mvmvif->bf_data.bf_enabled && - !WARN_ON(power_iterator->bf_vif)) - power_iterator->bf_vif = vif; - break; default: @@ -850,29 +877,9 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); } -static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - - if (!mvmvif->bf_data.bf_enabled) - return 0; - - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) - cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - - mvmvif->bf_data.ba_enabled = enable; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); -} - -int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) +static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags, bool d0i3) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -883,12 +890,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); - if (!ret) + /* don't change bf_enabled in case of temporary d0i3 configuration */ + if (!ret && !d0i3) mvmvif->bf_data.bf_enabled = false; return ret; } +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags) +{ + return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false); +} + static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) { bool disable_ps; @@ -918,21 +933,26 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) } static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) + struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif; - bool ba_enable; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; - if (!vifs->bf_vif) + if (!mvmvif->bf_data.bf_enabled) return 0; - mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs->bf_vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); + mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || + mvm->ps_disabled || + !vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); - return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) @@ -953,7 +973,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) @@ -988,7 +1011,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return ret; } - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, @@ -1025,8 +1051,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, IWL_BF_CMD_CONFIG_D0I3, .bf_enable_beacon_filter = cpu_to_le32(1), }; - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, - flags, true); + /* + * When beacon storing is supported - disable beacon filtering + * altogether - the latest beacon will be sent when exiting d0i3 + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, + true); + else + ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, + flags, true); } else { if (mvmvif->bf_data.bf_enabled) ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 0b762b4f8..2141db5bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data { int n_interfaces[MAX_BINDINGS]; int colors[MAX_BINDINGS]; int low_latency[MAX_BINDINGS]; +#ifdef CONFIG_IWLWIFI_DEBUGFS + int dbgfs_min[MAX_BINDINGS]; +#endif int n_low_latency_bindings; struct ieee80211_vif *disabled_vif; }; @@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, data->n_interfaces[id]++; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_quota_min) + data->dbgfs_min[id] = max(data->dbgfs_min[id], + mvmvif->dbgfs_quota_min); +#endif + if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { data->n_low_latency_bindings++; data->low_latency[id] = true; @@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, if (data.n_interfaces[i] <= 0) cmd.quotas[idx].quota = cpu_to_le32(0); +#ifdef CONFIG_IWLWIFI_DEBUGFS + else if (data.dbgfs_min[i]) + cmd.quotas[idx].quota = + cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); +#endif else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i]) /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 94caa88df..61d0a8cd1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate) if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) rate_str = legacy_rates[rate->index]; else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && + (rate->index >= IWL_RATE_MCS_0_INDEX) && (rate->index <= IWL_RATE_MCS_9_INDEX)) rate_str = ht_vht_rates[rate->index]; else @@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) } } +static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, + enum rs_action scale_action) +{ + struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + + if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || + tbl->rate.index < IWL_RATE_MCS_5_INDEX || + scale_action == RS_ACTION_DOWNSCALE) + sta_priv->tlc_amsdu = false; + else + sta_priv->tlc_amsdu = true; +} + /* * setup rate table in uCode */ @@ -2062,7 +2077,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, } /* try decreasing first if applicable */ - if (weak != TPC_INVALID) { + if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && + weak != TPC_INVALID) { if (weak_tpt == IWL_INVALID_VALUE && (strong_tpt == IWL_INVALID_VALUE || current_tpt >= strong_tpt)) { @@ -2414,6 +2430,7 @@ lq_update: tbl->rate.index = index; if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); + rs_set_amsdu_len(mvm, sta, tbl, scale_action); rs_update_rate_tbl(mvm, sta, lq_sta, tbl); } @@ -3097,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; lq_sta->lq.sta_id = sta_priv->sta_id; + sta_priv->tlc_amsdu = false; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3656,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, ssize_t ret; struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = + container_of(lq_sta, struct iwl_mvm_sta, lq_sta); struct iwl_mvm *mvm; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct rs_rate *rate = &tbl->rate; u32 ss_params; + mvm = lq_sta->pers.drv; buff = kmalloc(2048, GFP_KERNEL); if (!buff) @@ -3685,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : (is_ht80(rate)) ? "80Mhz" : "BAD BW"); - desc += sprintf(buff + desc, " %s %s %s\n", + desc += sprintf(buff + desc, " %s %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", - (lq_sta->is_agg) ? "AGG on" : ""); + (lq_sta->is_agg) ? "AGG on" : "", + (mvmsta->tlc_amsdu) ? "AMSDU on" : ""); } desc += sprintf(buff+desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 145ec68ce..485cfc1a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); - /* - * TSF as indicated by the fw is at INA time, but mac80211 expects the - * TSF at the beginning of the MPDU. - */ - /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/ + + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); @@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif + + if (unlikely((ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) && + mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, crypt_len, rxb); } @@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); } + +void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_ba_window_status_notif *notif = (void *)pkt->data; + int i; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ONCE(pkt_len != sizeof(*notif), + "Received window status notification of wrong size (%u)\n", + pkt_len)) + return; + + rcu_read_lock(); + for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) { + struct ieee80211_sta *sta; + u8 sta_id, tid; + u64 bitmap; + u32 ssn; + u16 ratid; + u16 received_mpdu; + + ratid = le16_to_cpu(notif->ra_tid[i]); + /* check that this TID is valid */ + if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) + continue; + + received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]); + if (received_mpdu == 0) + continue; + + tid = ratid & BA_WINDOW_STATUS_TID_MSK; + /* get the station */ + sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) + >> BA_WINDOW_STATUS_STA_ID_POS; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) + continue; + bitmap = le64_to_cpu(notif->bitmap[i]); + ssn = le32_to_cpu(notif->start_seq_num[i]); + + /* update mac80211 with the bitmap for the reordering buffer */ + ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, + received_mpdu); + } + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 0c073e02f..9a54f2d2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { - unsigned int hdrlen, fraglen; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; + unsigned int headlen, fraglen, pad_len = 0; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) + pad_len = 2; + len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ - hdrlen = (len <= skb_tailroom(skb)) ? len : - sizeof(*hdr) + crypt_len + 8; + headlen = (len <= skb_tailroom(skb)) ? len : + hdrlen + crypt_len + 8; + /* The firmware may align the packet to DWORD. + * The padding is inserted after the IV. + * After copying the header + IV skip the padding if + * present before copying packet data. + */ + hdrlen += crypt_len; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); - fraglen = len - hdrlen; + memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len, + headlen - hdrlen); + + fraglen = len - headlen; if (fraglen) { - int offset = (void *)hdr + hdrlen - + int offset = (void *)hdr + headlen + pad_len - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, @@ -201,25 +217,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc, struct ieee80211_rx_status *rx_status) { - int energy_a, energy_b, energy_c, max_energy; + int energy_a, energy_b, max_energy; energy_a = desc->energy_a; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = desc->energy_b; energy_b = energy_b ? -energy_b : S8_MIN; - energy_c = desc->energy_c; - energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); - max_energy = max(max_energy, energy_c); - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", - energy_a, energy_b, energy_c, max_energy); + IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", + energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = 0; /* TODO: phy info */ rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = energy_c; + rx_status->chain_signal[2] = S8_MIN; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, @@ -288,13 +301,121 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, skb->ip_summed = CHECKSUM_UNNECESSARY; } +/* + * returns true if a packet outside BA session is a duplicate and + * should be dropped + */ +static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, + struct ieee80211_rx_status *rx_status, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc) +{ + struct iwl_mvm_sta *mvm_sta; + struct iwl_mvm_rxq_dup_data *dup_data; + u8 baid, tid, sub_frame_idx; + + if (WARN_ON(IS_ERR_OR_NULL(sta))) + return false; + + baid = (le32_to_cpu(desc->reorder_data) & + IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT; + + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) + return false; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + dup_data = &mvm_sta->dup_data[queue]; + + /* + * Drop duplicate 802.11 retransmissions + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") + */ + if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) { + rx_status->flag |= RX_FLAG_DUP_VALIDATED; + return false; + } + + if (ieee80211_is_data_qos(hdr->frame_control)) + /* frame has qos control */ + tid = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_TID_MASK; + else + tid = IWL_MAX_TID_COUNT; + + /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ + sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + dup_data->last_seq[tid] == hdr->seq_ctrl && + dup_data->last_sub_frame[tid] >= sub_frame_idx)) + return true; + + dup_data->last_seq[tid] = hdr->seq_ctrl; + dup_data->last_sub_frame[tid] = sub_frame_idx; + + rx_status->flag |= RX_FLAG_DUP_VALIDATED; + + return false; +} + +int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, + const u8 *data, u32 count) +{ + struct iwl_rxq_sync_cmd *cmd; + u32 data_size = sizeof(*cmd) + count; + int ret; + + /* should be DWORD aligned */ + if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE)) + return -EINVAL; + + cmd = kzalloc(data_size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->rxq_mask = cpu_to_le32(rxq_mask); + cmd->count = cpu_to_le32(count); + cmd->flags = 0; + memcpy(cmd->payload, data, count); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + TRIGGER_RX_QUEUES_NOTIF_CMD), + 0, data_size, cmd); + + kfree(cmd); + return ret; +} + +void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + int queue) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rxq_sync_notification *notif; + struct iwl_mvm_internal_rxq_notif *internal_notif; + + notif = (void *)pkt->data; + internal_notif = (void *)notif->payload; + + switch (internal_notif->type) { + case IWL_MVM_RXQ_NOTIF_DEL_BA: + /* TODO */ + break; + default: + WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); + } +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - struct ieee80211_hdr *hdr = (void *)(desc + 1); + struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); u32 len = le16_to_cpu(desc->mpdu_len); u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); struct ieee80211_sta *sta = NULL; @@ -335,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, desc, rx_status); + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; rcu_read_lock(); @@ -390,6 +513,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); + + if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { + kfree_skb(skb); + rcu_read_unlock(); + return; + } + + /* + * Our hardware de-aggregates AMSDUs but copies the mac header + * as it to the de-aggregated MPDUs. We need to turn off the + * AMSDU bit in the QoS control ourselves. + */ + if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && + !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index ea1e177c2..09eb72c4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); + + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { + IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); + ieee80211_sched_scan_results(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; + } } void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, @@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", @@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Sending scheduled scan with filtering, n_match_sets %d\n", req->n_match_sets); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; return false; } IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); + + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; return true; } @@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; #endif + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) + flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; + if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && params->type != IWL_SCAN_TYPE_FRAGMENTED) @@ -930,8 +945,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) + if (type == mvm->scan_type) { + IWL_DEBUG_SCAN(mvm, + "Ignoring UMAC scan config of the same type\n"); return 0; + } cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; @@ -1071,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #endif + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; + if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && params->type != IWL_SCAN_TYPE_FRAGMENTED) @@ -1109,7 +1130,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, vif)); - if (type == IWL_MVM_SCAN_SCHED) + if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) @@ -1298,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, return -EBUSY; } - /* we don't support "match all" in the firmware */ - if (!req->n_match_sets) - return -EOPNOTSUPP; - ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; @@ -1355,7 +1372,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, type); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); @@ -1397,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; @@ -1431,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); + + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { + IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); + ieee80211_sched_scan_results(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; + } } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) @@ -1525,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); if (uid >= 0 && !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } @@ -1546,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * restart_hw, so do not report if FW is about to be * restarted. */ - if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) + if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && + !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; + } } } @@ -1583,6 +1611,7 @@ out: ieee80211_scan_completed(mvm->hw, true); } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b556e3365..ef99942d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,18 @@ #include "sta.h" #include "rs.h" +/* + * New version of ADD_STA_sta command added new fields at the end of the + * structure, so sending the size of the relevant API's structure is enough to + * support both API versions. + */ +static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) +{ + return iwl_mvm_has_new_rx_api(mvm) ? + sizeof(struct iwl_mvm_add_sta_cmd) : + sizeof(struct iwl_mvm_add_sta_cmd_v7); +} + static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { @@ -187,12 +201,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; @@ -265,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; lockdep_assert_held(&mvm->mutex); @@ -312,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, } mvm_sta->agg_tids = 0; + if (iwl_mvm_has_new_rx_api(mvm) && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + dup_data = kcalloc(mvm->trans->num_rx_queues, + sizeof(*dup_data), + GFP_KERNEL); + if (!dup_data) + return -ENOMEM; + mvm_sta->dup_data = dup_data; + } + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); if (ret) goto err; @@ -357,12 +383,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); @@ -492,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_rx_api(mvm)) + kfree(mvm_sta->dup_data); + if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == mvm_sta->sta_id) { ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); @@ -623,12 +653,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, if (addr) memcpy(cmd.addr, addr, ETH_ALEN); - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; @@ -819,7 +850,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start) + int tid, u16 ssn, bool start, u8 buf_size) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -839,6 +870,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + cmd.rx_ba_window = cpu_to_le16((u16)buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } @@ -846,12 +878,13 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, STA_MODIFY_REMOVE_BA_TID; status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); @@ -904,12 +937,13 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: @@ -1011,15 +1045,23 @@ release_locks: } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u8 buf_size, + bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); - int queue, fifo, ret; + int queue, ret; u16 ssn; + struct iwl_trans_txq_scd_cfg cfg = { + .sta_id = mvmsta->sta_id, + .tid = tid, + .frame_limit = buf_size, + .aggregate = true, + }; + BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); @@ -1031,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; + tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); - fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - iwl_mvm_enable_agg_txq(mvm, queue, - vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, - mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout); + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], + ssn, &cfg, wdg_timeout); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) @@ -1640,7 +1682,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1731,7 +1774,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, - sizeof(cmd), &cmd); + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1766,7 +1809,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 39fdf5224..1a8f69a41 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -258,8 +258,7 @@ enum iwl_mvm_agg_state { * This is basically (last acked packet++). * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). - * @reduced_tpc: Reduced tx power. Holds the data between the - * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). + * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or @@ -273,7 +272,7 @@ struct iwl_mvm_tid_data { u16 next_reclaimed; /* The rest is Tx AGG related */ u32 rate_n_flags; - u8 reduced_tpc; + bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; @@ -293,6 +292,16 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +/** + * struct iwl_mvm_rxq_dup_data - per station per rx queue data + * @last_seq: last sequence per tid for duplicate packet detection + * @last_sub_frame: last subframe packet + */ +struct iwl_mvm_rxq_dup_data { + __le16 last_seq[IWL_MAX_TID_COUNT + 1]; + u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; +} ____cacheline_aligned_in_smp; + /** * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) @@ -311,6 +320,7 @@ struct iwl_mvm_key_pn { * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? + * @tlc_amsdu: true if A-MSDU is allowed * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue @@ -318,6 +328,7 @@ struct iwl_mvm_key_pn { * we are sending frames from an AMPDU queue and there was a hole in * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures + * @dup_data: per queue duplicate packet detection data * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -337,14 +348,15 @@ struct iwl_mvm_sta { struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; - struct iwl_mvm_key_pn __rcu *ptk_pn[4]; + struct iwl_mvm_rxq_dup_data *dup_data; /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; bool disable_tx; + bool tlc_amsdu; u8 agg_tids; u8 sleep_tx_count; }; @@ -401,11 +413,12 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start); + int tid, u16 ssn, bool start, u8 buf_size); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size); + struct ieee80211_sta *sta, u16 tid, u8 buf_size, + bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 924dd6a41..2c12789e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -371,20 +371,13 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, iwl_mvm_te_check_trigger(mvm, notif, te_data); - if (!le32_to_cpu(notif->status)) { - IWL_DEBUG_TE(mvm, - "ERROR: Aux ROC Time Event %s notification failure\n", - (le32_to_cpu(notif->action) & - TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end"); - return -EINVAL; - } - IWL_DEBUG_TE(mvm, - "Aux ROC time event notification - UID = 0x%x action %d\n", + "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", le32_to_cpu(notif->unique_id), - le32_to_cpu(notif->action)); + le32_to_cpu(notif->action), le32_to_cpu(notif->status)); - if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { + if (!le32_to_cpu(notif->status) || + le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { /* End TE, notify mac80211 */ ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_roc_finished(mvm); /* flush aux queue */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index 99d9a35ad..3d2e8b615 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -115,7 +115,7 @@ * needed by the driver. */ -#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500 +#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600 #define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index fb76004ee..f1f28255a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,6 +65,8 @@ * *****************************************************************************/ +#include + #include "mvm.h" #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ @@ -79,8 +82,10 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); - tt->throttle = false; - tt->dynamic_smps = false; + if (!iwl_mvm_is_tt_in_fw(mvm)) { + tt->throttle = false; + tt->dynamic_smps = false; + } /* Don't schedule an exit work if we're in test mode, since * the temperature will not change unless we manually set it @@ -116,18 +121,21 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_dts_measurement_notif *notif; + struct iwl_dts_measurement_notif_v1 *notif_v1; int len = iwl_rx_packet_payload_len(pkt); int temp; - if (WARN_ON_ONCE(len < sizeof(*notif))) { + /* we can use notif_v1 only, because v2 only adds an additional + * parameter, which is not used in this function. + */ + if (WARN_ON_ONCE(len < sizeof(*notif_v1))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return -EINVAL; } - notif = (void *)pkt->data; + notif_v1 = (void *)pkt->data; - temp = le32_to_cpu(notif->temp); + temp = le32_to_cpu(notif_v1->temp); /* shouldn't be negative, but since it's s32, make sure it isn't */ if (WARN_ON_ONCE(temp < 0)) @@ -158,17 +166,78 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_dts_measurement_notif_v2 *notif_v2; + int len = iwl_rx_packet_payload_len(pkt); int temp; + u32 ths_crossed; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); - if (temp < 0) + + if (!iwl_mvm_is_tt_in_fw(mvm)) { + if (temp >= 0) + iwl_mvm_tt_temp_changed(mvm, temp); return; + } + + if (WARN_ON_ONCE(len < sizeof(*notif_v2))) { + IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); + return; + } + + notif_v2 = (void *)pkt->data; + ths_crossed = le32_to_cpu(notif_v2->threshold_idx); - iwl_mvm_tt_temp_changed(mvm, temp); + /* 0xFF in ths_crossed means the notification is not related + * to a trip, so we can ignore it here. + */ + if (ths_crossed == 0xFF) + return; + + IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n", + temp, ths_crossed); + +#ifdef CONFIG_THERMAL + if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) + return; + + /* + * We are now handling a temperature notification from the firmware + * in ASYNC and hold the mutex. thermal_notify_framework will call + * us back through get_temp() which ought to send a SYNC command to + * the firmware and hence to take the mutex. + * Avoid the deadlock by unlocking the mutex here. + */ + if (mvm->tz_device.tzone) { + struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; + + mutex_unlock(&mvm->mutex); + thermal_notify_framework(tz_dev->tzone, + tz_dev->fw_trips_index[ths_crossed]); + mutex_lock(&mvm->mutex); + } +#endif /* CONFIG_THERMAL */ +} + +void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct ct_kill_notif *notif; + int len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON_ONCE(len != sizeof(*notif))) { + IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n"); + return; + } + + notif = (struct ct_kill_notif *)pkt->data; + IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n", + notif->temperature); + + iwl_mvm_enter_ctkill(mvm); } static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) @@ -194,12 +263,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); } -int iwl_mvm_get_temp(struct iwl_mvm *mvm) +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) { struct iwl_notification_wait wait_temp_notif; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; - int ret, temp; + int ret; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; @@ -208,7 +277,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), - iwl_mvm_temp_notif_wait, &temp); + iwl_mvm_temp_notif_wait, temp); ret = iwl_mvm_get_temp_cmd(mvm); if (ret) { @@ -219,12 +288,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); - if (ret) { + if (ret) IWL_ERR(mvm, "Getting the temperature timed out\n"); - return ret; - } - return temp; + return ret; } static void check_exit_ctkill(struct work_struct *work) @@ -233,10 +300,17 @@ static void check_exit_ctkill(struct work_struct *work) struct iwl_mvm *mvm; u32 duration; s32 temp; + int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); + if (iwl_mvm_is_tt_in_fw(mvm)) { + iwl_mvm_exit_ctkill(mvm); + + return; + } + duration = tt->params.ct_kill_duration; mutex_lock(&mvm->mutex); @@ -250,13 +324,13 @@ static void check_exit_ctkill(struct work_struct *work) goto reschedule; } - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); __iwl_mvm_mac_stop(mvm); - if (temp < 0) + if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); @@ -436,7 +510,378 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = { .support_tx_backoff = true, }; -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) +/* budget in mWatt */ +static const u32 iwl_mvm_cdev_budgets[] = { + 2000, /* cooling state 0 */ + 1800, /* cooling state 1 */ + 1600, /* cooling state 2 */ + 1400, /* cooling state 3 */ + 1200, /* cooling state 4 */ + 1000, /* cooling state 5 */ + 900, /* cooling state 6 */ + 800, /* cooling state 7 */ + 700, /* cooling state 8 */ + 650, /* cooling state 9 */ + 600, /* cooling state 10 */ + 550, /* cooling state 11 */ + 500, /* cooling state 12 */ + 450, /* cooling state 13 */ + 400, /* cooling state 14 */ + 350, /* cooling state 15 */ + 300, /* cooling state 16 */ + 250, /* cooling state 17 */ + 200, /* cooling state 18 */ + 150, /* cooling state 19 */ +}; + +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) +{ + struct iwl_mvm_ctdp_cmd cmd = { + .operation = cpu_to_le32(op), + .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), + .window_size = 0, + }; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, + CTDP_CONFIG_CMD), + sizeof(cmd), &cmd, &status); + + if (ret) { + IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); + return ret; + } + + switch (op) { + case CTDP_CMD_OPERATION_START: +#ifdef CONFIG_THERMAL + mvm->cooling_dev.cur_state = state; +#endif /* CONFIG_THERMAL */ + break; + case CTDP_CMD_OPERATION_REPORT: + IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); + /* when the function is called with CTDP_CMD_OPERATION_REPORT + * option the function should return the average budget value + * that is received from the FW. + * The budget can't be less or equal to 0, so it's possible + * to distinguish between error values and budgets. + */ + return status; + case CTDP_CMD_OPERATION_STOP: + IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n"); + break; + } + + return 0; +} + +#ifdef CONFIG_THERMAL +static int compare_temps(const void *a, const void *b) +{ + return ((s16)le16_to_cpu(*(__le16 *)a) - + (s16)le16_to_cpu(*(__le16 *)b)); +} + +int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) +{ + struct temp_report_ths_cmd cmd = {0}; + int ret, i, j, idx = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!mvm->tz_device.tzone) + return -EINVAL; + + /* The driver holds array of temperature trips that are unsorted + * and uncompressed, the FW should get it compressed and sorted + */ + + /* compress temp_trips to cmd array, remove uninitialized values*/ + for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { + if (mvm->tz_device.temp_trips[i] != S16_MIN) { + cmd.thresholds[idx++] = + cpu_to_le16(mvm->tz_device.temp_trips[i]); + } + } + cmd.num_temps = cpu_to_le32(idx); + + if (!idx) + goto send; + + /*sort cmd array*/ + sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL); + + /* we should save the indexes of trips because we sort + * and compress the orginal array + */ + for (i = 0; i < idx; i++) { + for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { + if (le16_to_cpu(cmd.thresholds[i]) == + mvm->tz_device.temp_trips[j]) + mvm->tz_device.fw_trips_index[i] = j; + } + } + +send: + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, + TEMP_REPORTING_THRESHOLDS_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n", + ret); + + return ret; +} + +static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, + int *temperature) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + int ret; + int temp; + + mutex_lock(&mvm->mutex); + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { + ret = -EIO; + goto out; + } + + ret = iwl_mvm_get_temp(mvm, &temp); + if (ret) + goto out; + + *temperature = temp * 1000; + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device, + int trip, int *temp) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) + return -EINVAL; + + *temp = mvm->tz_device.temp_trips[trip] * 1000; + + return 0; +} + +static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device, + int trip, enum thermal_trip_type *type) +{ + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) + return -EINVAL; + + *type = THERMAL_TRIP_PASSIVE; + + return 0; +} + +static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, + int trip, int temp) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm_thermal_device *tzone; + int i, ret; + s16 temperature; + + mutex_lock(&mvm->mutex); + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { + ret = -EIO; + goto out; + } + + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) { + ret = -EINVAL; + goto out; + } + + if ((temp / 1000) > S16_MAX) { + ret = -EINVAL; + goto out; + } + + temperature = (s16)(temp / 1000); + tzone = &mvm->tz_device; + + if (!tzone) { + ret = -EIO; + goto out; + } + + /* no updates*/ + if (tzone->temp_trips[trip] == temperature) { + ret = 0; + goto out; + } + + /* already existing temperature */ + for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { + if (tzone->temp_trips[i] == temperature) { + ret = -EINVAL; + goto out; + } + } + + tzone->temp_trips[trip] = temperature; + + ret = iwl_mvm_send_temp_report_ths_cmd(mvm); +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static struct thermal_zone_device_ops tzone_ops = { + .get_temp = iwl_mvm_tzone_get_temp, + .get_trip_temp = iwl_mvm_tzone_get_trip_temp, + .get_trip_type = iwl_mvm_tzone_get_trip_type, + .set_trip_temp = iwl_mvm_tzone_set_trip_temp, +}; + +/* make all trips writable */ +#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1) + +static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) +{ + int i; + char name[] = "iwlwifi"; + + if (!iwl_mvm_is_tt_in_fw(mvm)) { + mvm->tz_device.tzone = NULL; + + return; + } + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + mvm->tz_device.tzone = thermal_zone_device_register(name, + IWL_MAX_DTS_TRIPS, + IWL_WRITABLE_TRIPS_MSK, + mvm, &tzone_ops, + NULL, 0, 0); + if (IS_ERR(mvm->tz_device.tzone)) { + IWL_DEBUG_TEMP(mvm, + "Failed to register to thermal zone (err = %ld)\n", + PTR_ERR(mvm->tz_device.tzone)); + mvm->tz_device.tzone = NULL; + return; + } + + /* 0 is a valid temperature, + * so initialize the array with S16_MIN which invalid temperature + */ + for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) + mvm->tz_device.temp_trips[i] = S16_MIN; +} + +static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1; + + return 0; +} + +static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); + + if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) + return -EBUSY; + + *state = mvm->cooling_dev.cur_state; + + return 0; +} + +static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long new_state) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); + int ret; + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) + return -EIO; + + if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) + return -EBUSY; + + mutex_lock(&mvm->mutex); + + if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { + ret = -EINVAL; + goto unlock; + } + + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + new_state); + +unlock: + mutex_unlock(&mvm->mutex); + return ret; +} + +static struct thermal_cooling_device_ops tcooling_ops = { + .get_max_state = iwl_mvm_tcool_get_max_state, + .get_cur_state = iwl_mvm_tcool_get_cur_state, + .set_cur_state = iwl_mvm_tcool_set_cur_state, +}; + +static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) +{ + char name[] = "iwlwifi"; + + if (!iwl_mvm_is_ctdp_supported(mvm)) + return; + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + mvm->cooling_dev.cdev = + thermal_cooling_device_register(name, + mvm, + &tcooling_ops); + + if (IS_ERR(mvm->cooling_dev.cdev)) { + IWL_DEBUG_TEMP(mvm, + "Failed to register to cooling device (err = %ld)\n", + PTR_ERR(mvm->cooling_dev.cdev)); + mvm->cooling_dev.cdev = NULL; + return; + } +} + +static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) +{ + if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone) + return; + + IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; +} + +static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) +{ + if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev) + return; + + IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; +} +#endif /* CONFIG_THERMAL */ + +void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; @@ -451,10 +896,20 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) tt->dynamic_smps = false; tt->min_backoff = min_backoff; INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); + +#ifdef CONFIG_THERMAL + iwl_mvm_cooling_device_register(mvm); + iwl_mvm_thermal_zone_register(mvm); +#endif } -void iwl_mvm_tt_exit(struct iwl_mvm *mvm) +void iwl_mvm_thermal_exit(struct iwl_mvm *mvm) { cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); + +#ifdef CONFIG_THERMAL + iwl_mvm_cooling_device_unregister(mvm); + iwl_mvm_thermal_zone_unregister(mvm); +#endif } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a040edc55..34731e29c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -65,6 +66,7 @@ #include #include #include +#include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" @@ -103,6 +105,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); @@ -182,7 +185,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted */ - tx_cmd->len = cpu_to_le16((u16)skb->len); + tx_cmd->len = cpu_to_le16((u16)skb->len + + (uintptr_t)skb_info->driver_data[0]); tx_cmd->next_frame_len = 0; tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; @@ -299,6 +303,8 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + pn = atomic64_inc_return(&keyconf->tx_pn); + ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; @@ -322,10 +328,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, */ static struct iwl_device_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, - int hdrlen, struct ieee80211_sta *sta, u8 sta_id) + struct ieee80211_tx_info *info, int hdrlen, + struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; @@ -345,10 +352,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); - memset(&info->status, 0, sizeof(info->status)); - memset(info->driver_data, 0, sizeof(info->driver_data)); + memset(&skb_info->status, 0, sizeof(skb_info->status)); + memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); - info->driver_data[1] = dev_cmd; + skb_info->driver_data[1] = dev_cmd; return dev_cmd; } @@ -356,20 +363,26 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info info; struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) + memcpy(&info, skb->cb, sizeof(info)); + + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; - if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && - (!info->control.vif || - info->hw_queue != info->control.vif->cab_queue))) + if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && + (!info.control.vif || + info.hw_queue != info.control.vif->cab_queue))) return -1; + /* This holds the amsdu headers length */ + skb_info->driver_data[0] = (void *)(uintptr_t)0; + /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel @@ -377,7 +390,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * and hence needs to be sent on the aux queue */ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - info->control.vif->type == NL80211_IFTYPE_STATION) + info.control.vif->type == NL80211_IFTYPE_STATION) IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; /* @@ -390,14 +403,14 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * AUX station. */ sta_id = mvm->aux_sta.sta_id; - if (info->control.vif) { + if (info.control.vif) { struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); + iwl_mvm_vif_from_mac80211(info.control.vif); - if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info->control.vif->type == NL80211_IFTYPE_AP) + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info.control.vif->type == NL80211_IFTYPE_AP) sta_id = mvmvif->bcast_sta.sta_id; - else if (info->control.vif->type == NL80211_IFTYPE_STATION && + else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); @@ -406,19 +419,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; - /* From now on, we cannot access info->control */ tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } @@ -435,42 +447,208 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return 0; } -static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso, +#ifdef CONFIG_INET +static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int mss = skb_shinfo(skb)->gso_size; struct sk_buff *tmp, *next; - char cb[sizeof(skb_gso->cb)]; + char cb[sizeof(skb->cb)]; + unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; + bool ipv4 = (skb->protocol == htons(ETH_P_IP)); + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; + u16 amsdu_add, snap_ip_tcp, pad, i = 0; + unsigned int dbg_max_amsdu_len; + u8 *qc, tid, txf; + + snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + + tcp_hdrlen(skb); + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + + if (!sta->max_amsdu_len || + !ieee80211_is_data_qos(hdr->frame_control) || + !mvmsta->tlc_amsdu) { + num_subframes = 1; + pad = 0; + goto segment; + } + + /* + * No need to lock amsdu_in_ampdu_allowed since it can't be modified + * during an BA session. + */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) { + num_subframes = 1; + pad = 0; + goto segment; + } + + max_amsdu_len = sta->max_amsdu_len; + dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + + /* the Tx FIFO to which this A-MSDU will be routed */ + txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + max_amsdu_len = min_t(unsigned int, max_amsdu_len, + mvm->shared_mem_cfg.txfifo_size[txf] - 256); + + if (dbg_max_amsdu_len) + max_amsdu_len = min_t(unsigned int, max_amsdu_len, + dbg_max_amsdu_len); - memcpy(cb, skb_gso->cb, sizeof(cb)); - next = skb_gso_segment(skb_gso, 0); - if (IS_ERR(next)) + /* + * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not + * supported. This is a spec requirement (IEEE 802.11-2015 + * section 8.7.3 NOTE 3). + */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + !sta->vht_cap.vht_supported) + max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); + + /* Sub frame header + SNAP + IP header + TCP header + MSS */ + subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; + pad = (4 - subf_len) & 0x3; + + /* + * If we have N subframes in the A-MSDU, then the A-MSDU's size is + * N * subf_len + (N - 1) * pad. + */ + num_subframes = (max_amsdu_len + pad) / (subf_len + pad); + if (num_subframes > 1) + *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - + tcp_hdrlen(skb) + skb->data_len; + + /* + * Make sure we have enough TBs for the A-MSDU: + * 2 for each subframe + * 1 more for each fragment + * 1 more for the potential data in the header + */ + num_subframes = + min_t(unsigned int, num_subframes, + (mvm->trans->max_skb_frags - 1 - + skb_shinfo(skb)->nr_frags) / 2); + + /* This skb fits in one single A-MSDU */ + if (num_subframes * mss >= tcp_payload_len) { + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); + + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. Note that the original skb + * already had one set of SNAP / IP / TCP headers. + */ + num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); + amsdu_add = num_subframes * sizeof(struct ethhdr) + + (num_subframes - 1) * (snap_ip_tcp + pad); + /* This holds the amsdu headers length */ + skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add; + + __skb_queue_tail(mpdus_skb, skb); + return 0; + } + + /* + * Trick the segmentation function to make it + * create SKBs that can fit into one A-MSDU. + */ +segment: + skb_shinfo(skb)->gso_size = num_subframes * mss; + memcpy(cb, skb->cb, sizeof(cb)); + + next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); + skb_shinfo(skb)->gso_size = mss; + if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) - consume_skb(skb_gso); + consume_skb(skb); while (next) { tmp = next; next = tmp->next; + memcpy(tmp->cb, cb, sizeof(tmp->cb)); + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. + */ + tcp_payload_len = skb_tail_pointer(tmp) - + skb_transport_header(tmp) - + tcp_hdrlen(tmp) + tmp->data_len; + + if (ipv4) + ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); + + if (tcp_payload_len > mss) { + struct ieee80211_tx_info *skb_info = + IEEE80211_SKB_CB(tmp); + + num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); + amsdu_add = num_subframes * sizeof(struct ethhdr) + + (num_subframes - 1) * (snap_ip_tcp + pad); + skb_info->driver_data[0] = + (void *)(uintptr_t)amsdu_add; + skb_shinfo(tmp)->gso_size = mss; + } else { + qc = ieee80211_get_qos_ctl((void *)tmp->data); + + if (ipv4) + ip_send_check(ip_hdr(tmp)); + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + skb_shinfo(tmp)->gso_size = 0; + } tmp->prev = NULL; tmp->next = NULL; __skb_queue_tail(mpdus_skb, tmp); + i++; } return 0; } +#else /* CONFIG_INET */ +static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff_head *mpdus_skb) +{ + /* Impossible to get TSO with CONFIG_INET */ + WARN_ON(1); + + return -1; +} +#endif /* * Sets the fields in the Tx cmd that are crypto related */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_sta *mvmsta; struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; @@ -491,7 +669,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) return -1; - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, + sta, mvmsta->sta_id); if (!dev_cmd) goto drop; @@ -567,6 +746,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; @@ -577,18 +758,23 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) return -1; + memcpy(&info, skb->cb, sizeof(info)); + + /* This holds the amsdu headers length */ + skb_info->driver_data[0] = (void *)(uintptr_t)0; + if (!skb_is_gso(skb)) - return iwl_mvm_tx_mpdu(mvm, skb, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) - return iwl_mvm_tx_mpdu(mvm, skb, sta); + return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); __skb_queue_head_init(&mpdus_skbs); - ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs); + ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; @@ -596,9 +782,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, return ret; while (!skb_queue_empty(&mpdus_skbs)) { - struct sk_buff *skb = __skb_dequeue(&mpdus_skbs); + skb = __skb_dequeue(&mpdus_skbs); - ret = iwl_mvm_tx_mpdu(mvm, skb, sta); + ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { __skb_queue_purge(&mpdus_skbs); return ret; @@ -745,6 +931,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } +static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, + u32 status) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_tx_status *status_trig; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); + status_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { + /* don't collect on status 0 */ + if (!status_trig->statuses[i].status) + break; + + if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) + continue; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); + break; + } +} + static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -760,6 +977,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct sk_buff_head skbs; u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; + bool is_ndp = false; __skb_queue_head_init(&skbs); @@ -793,6 +1011,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + iwl_mvm_tx_status_check_trigger(mvm, status); + info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), info); @@ -811,6 +1031,20 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(hdr->seq_ctrl); } + if (unlikely(!seq_ctl)) { + struct ieee80211_hdr *hdr = (void *)skb->data; + + /* + * If it is an NDP, we can't update next_reclaim since + * its sequence control is 0. Note that for that same + * reason, NDPs are never sent to A-MPDU'able queues + * so that we can never have more than one freed frame + * for a single Tx resonse (see WARN_ON below). + */ + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + is_ndp = true; + } + /* * TODO: this is not accurate if we are freeing more than one * packet. @@ -874,9 +1108,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); - tid_data->next_reclaimed = next_reclaimed; - IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", - next_reclaimed); + if (!is_ndp) { + tid_data->next_reclaimed = next_reclaimed; + IWL_DEBUG_TX_REPLY(mvm, + "Next reclaimed packet:%d\n", + next_reclaimed); + } else { + IWL_DEBUG_TX_REPLY(mvm, + "NDP - don't update next_reclaimed\n"); + } + iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 3a989f5c2..53cdc5760 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -376,8 +376,8 @@ struct iwl_error_event_table_v1 { struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ + u32 trm_hw_status0; /* TRM HW status */ + u32 trm_hw_status1; /* TRM HW status */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ @@ -389,7 +389,7 @@ struct iwl_error_event_table { u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ + u32 fw_rev_type; /* firmware revision type */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ @@ -408,7 +408,7 @@ struct iwl_error_event_table { * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 last_cmd_id; /* last HCMD id handled by the firmware */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ @@ -419,7 +419,7 @@ struct iwl_error_event_table { u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; +} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; /* * UMAC error struct - relevant starting from family 8000 chip. @@ -529,9 +529,9 @@ static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, 0, + table.blink2, table.ilink1, table.ilink2, + table.bcon_time, table.gp1, table.gp2, + table.gp3, table.ucode_ver, 0, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); @@ -615,14 +615,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, + table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.major, + table.gp2, table.fw_rev_type, table.major, table.minor, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); + IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); @@ -634,7 +634,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); @@ -645,7 +645,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); @@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value) + bool prev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; lockdep_assert_held(&mvm->mutex); - if (mvmvif->low_latency == value) + if (iwl_mvm_vif_low_latency(mvmvif) == prev) return 0; - mvmvif->low_latency = value; - res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 00335ea6b..79d7cd7d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -66,6 +67,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -477,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, @@ -627,6 +639,33 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_drv; + /* if RTPM is in use, enable it in our device */ + if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + /* We explicitly set the device to active here to + * clear contingent errors. + */ + pm_runtime_set_active(&pdev->dev); + + pm_runtime_set_autosuspend_delay(&pdev->dev, + iwlwifi_mod_params.d0i3_entry_delay); + pm_runtime_use_autosuspend(&pdev->dev); + + /* We are not supposed to call pm_runtime_allow() by + * ourselves, but let userspace enable runtime PM via + * sysfs. However, since we don't enable this from + * userspace yet, we need to allow/forbid() ourselves. + */ + pm_runtime_allow(&pdev->dev); + } + + /* The PCI device starts with a reference taken and we are + * supposed to release it here. But to simplify the + * interaction with the opmode, we don't do it now, but let + * the opmode release it when it's ready. To account for this + * reference, we start with ref_count set to 1. + */ + trans_pcie->ref_count = 1; + return 0; out_free_drv: @@ -641,7 +680,17 @@ static void iwl_pci_remove(struct pci_dev *pdev) struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* if RTPM was in use, restore it to the state before probe */ + if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + /* We should not call forbid here, but we do for now. + * Check the comment to pm_runtime_allow() in + * iwl_pci_probe(). + */ + pm_runtime_forbid(trans->dev); + } + iwl_drv_stop(trans_pcie->drv); + iwl_trans_pcie_free(trans); } @@ -693,15 +742,173 @@ static int iwl_pci_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (test_bit(STATUS_FW_ERROR, &trans->status)) + return 0; + + set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + /* config the fw */ + ret = iwl_op_mode_enter_d0i3(trans->op_mode); + if (ret == 1) { + IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + return -EBUSY; + } + if (ret) + goto err; + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout entering D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + return 0; +err: + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + /* sometimes a D0i3 entry is not followed through */ + if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) + return 0; + + /* config the fw */ + ret = iwl_op_mode_exit_d0i3(trans->op_mode); + if (ret) + goto err; + + /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + !test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + return 0; +err: + clear_bit(STATUS_TRANS_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + +#ifdef CONFIG_IWLWIFI_PCIE_RTPM +static int iwl_pci_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + int ret; + + IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); + + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + ret = iwl_pci_fw_enter_d0i3(trans); + if (ret < 0) + return ret; + } + + trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; + + iwl_trans_d3_suspend(trans, false, false); + + return 0; +} + +static int iwl_pci_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + enum iwl_d3_status d3_status; + + IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); + + iwl_trans_d3_resume(trans, &d3_status, false, false); + + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return iwl_pci_fw_exit_d0i3(trans); + + return 0; +} + +static int iwl_pci_system_prepare(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + + IWL_DEBUG_RPM(trans, "preparing for system suspend\n"); + + /* This is called before entering system suspend and before + * the runtime resume is called. Set the suspending flag to + * prevent the wakelock from being taken. + */ + trans->suspending = true; + + /* Wake the device up from runtime suspend before going to + * platform suspend. This is needed because we don't know + * whether wowlan any is set and, if it's not, mac80211 will + * disconnect (in which case, we can't be in D0i3). + */ + pm_runtime_resume(device); + + return 0; +} + +static void iwl_pci_system_complete(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + + IWL_DEBUG_RPM(trans, "completing system suspend\n"); + + /* This is called as a counterpart to the prepare op. It is + * called either when suspending fails or when suspend + * completed successfully. Now there's no risk of grabbing + * the wakelock anymore, so we can release the suspending + * flag. + */ + trans->suspending = false; +} +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + +static const struct dev_pm_ops iwl_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, + iwl_pci_resume) +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, + iwl_pci_runtime_resume, + NULL) + .prepare = iwl_pci_system_prepare, + .complete = iwl_pci_system_complete, +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ +}; #define IWL_PM_OPS (&iwl_dev_pm_ops) -#else +#else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL -#endif +#endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 73c95594e..dadafbdef 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -56,17 +57,23 @@ #define RX_NUM_QUEUES 1 #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 +#define RX_PENDING_WATERMARK 16 struct iwl_host_cmd; /*This file includes the declaration that are internal to the * trans_pcie layer */ +/** + * struct iwl_rx_mem_buffer + * @page_dma: bus address of rxb page + * @page: driver's pointer to the rxb page + * @vid: index of this rxb in the global table + */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; + u16 vid; struct list_head list; }; @@ -90,8 +97,12 @@ struct isr_statistics { /** * struct iwl_rxq - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @id: queue index + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). + * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) + * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -103,32 +114,34 @@ struct isr_statistics { * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue + * @queue: actual rx queue. Not used for multi-rx queue. * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { - __le32 *bd; + int id; + void *bd; dma_addr_t bd_dma; + __le32 *used_bd; + dma_addr_t used_bd_dma; u32 read; u32 write; u32 free_count; u32 used_count; u32 write_actual; + u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update; struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; /** * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to @@ -140,7 +153,6 @@ struct iwl_rxq { * @rx_alloc: work struct for background calls */ struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; atomic_t req_pending; atomic_t req_ready; struct list_head rbd_allocated; @@ -280,6 +292,7 @@ struct iwl_txq { bool ampdu; bool block; unsigned long wd_timeout; + struct sk_buff_head overflow_q; }; static inline dma_addr_t @@ -297,6 +310,8 @@ struct iwl_tso_hdr_page { /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data + * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues + * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area @@ -321,15 +336,24 @@ struct iwl_tso_hdr_page { * @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor * @fw_mon_size: size of the buffer for the firmware monitor + * @msix_entries: array of MSI-X entries + * @msix_enabled: true if managed to enable MSI-X + * @allocated_vector: the number of interrupt vector allocated by the OS + * @default_irq_num: default irq for non rx interrupt + * @fh_init_mask: initial unmasked fh causes + * @hw_init_mask: initial unmasked hw causes + * @fh_mask: current unmasked fh causes + * @hw_mask: current unmasked hw causes */ struct iwl_trans_pcie { - struct iwl_rxq rxq; + struct iwl_rxq *rxq; + struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; + struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; struct net_device napi_dev; - struct napi_struct napi; struct __percpu iwl_tso_hdr_page *tso_hdr_page; @@ -359,6 +383,7 @@ struct iwl_trans_pcie { bool ucode_write_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t wait_command_queue; + wait_queue_head_t d0i3_waitq; u8 cmd_queue; u8 cmd_fifo; @@ -385,6 +410,15 @@ struct iwl_trans_pcie { dma_addr_t fw_mon_phys; struct page *fw_mon_page; u32 fw_mon_size; + + struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; + bool msix_enabled; + u32 allocated_vector; + u32 default_irq_num; + u32 fh_init_mask; + u32 hw_init_mask; + u32 fh_mask; + u32 hw_mask; }; static inline struct iwl_trans_pcie * @@ -413,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); +irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); @@ -468,15 +505,24 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans); ******************************************************/ static inline void iwl_disable_interrupts(struct iwl_trans *trans) { - clear_bit(STATUS_INT_ENABLED, &trans->status); - - /* disable interrupts from uCode/NIC to host */ - iwl_write32(trans, CSR_INT_MASK, 0x00000000); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* acknowledge/clear/reset any interrupts still pending - * from uCode or flow handler (Rx/Tx DMA) */ - iwl_write32(trans, CSR_INT, 0xffffffff); - iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + clear_bit(STATUS_INT_ENABLED, &trans->status); + if (!trans_pcie->msix_enabled) { + /* disable interrupts from uCode/NIC to host */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(trans, CSR_INT, 0xffffffff); + iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + } else { + /* disable all the interrupt we might use */ + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + } IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } @@ -486,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans) IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); set_bit(STATUS_INT_ENABLED, &trans->status); - trans_pcie->inta_mask = CSR_INI_SET_MASK; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INI_SET_MASK; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + /* + * fh/hw_mask keeps all the unmasked causes. + * Unlike msi, in msix cause is enabled when it is unset. + */ + trans_pcie->hw_mask = trans_pcie->hw_init_mask; + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + ~trans_pcie->fh_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + ~trans_pcie->hw_mask); + } +} + +static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); + trans_pcie->hw_mask = msk; +} + +static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); + trans_pcie->fh_mask = msk; } static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) @@ -495,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); - trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + iwl_enable_fh_int_msk_msix(trans, + MSIX_FH_INT_CAUSES_D2S_CH0_NUM); + } } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) @@ -504,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); - trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_enable_hw_int_msk_msix(trans, + MSIX_HW_INT_CAUSES_REG_RF_KILL); + } } static inline void iwl_wake_queue(struct iwl_trans *trans, @@ -588,4 +677,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) } #endif +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 152cf9ad9..4be3c35af 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -140,8 +141,8 @@ */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { - /* Make sure RX_QUEUE_SIZE is a power of 2 */ - BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + /* Make sure rx queue size is a power of 2 */ + WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); /* * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity @@ -149,7 +150,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq) * The following is equivalent to modulo by RX_QUEUE_SIZE and is well * defined for negative dividends. */ - return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); + return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); } /* @@ -160,6 +161,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } +static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) +{ + iwl_write_prph(trans, ofs, val & 0xffffffff); + iwl_write_prph(trans, ofs + 4, val >> 32); +} + /* * iwl_pcie_rx_stop - stops the Rx DMA */ @@ -173,10 +180,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; u32 reg; lockdep_assert_held(&rxq->lock); @@ -201,41 +207,84 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) } rxq->write_actual = round_down(rxq->write, 8); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + if (trans->cfg->mq_rx_supported) + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), + rxq->write_actual); + else + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + int i; - spin_lock(&rxq->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (!rxq->need_update) + continue; + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + rxq->need_update = false; + spin_unlock(&rxq->lock); + } +} + +/* + * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx + */ +static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - no need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; - if (!rxq->need_update) - goto exit_unlock; + spin_lock(&rxq->lock); + while (rxq->free_count) { + __le64 *bd = (__le64 *)rxq->bd; - iwl_pcie_rxq_inc_wr_ptr(trans); - rxq->need_update = false; + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); - exit_unlock: + /* 12 first bits are expected to be empty */ + WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); + /* Point to Rx buffer via next RBD in circular buffer */ + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; + rxq->free_count--; + } spin_unlock(&rxq->lock); + + /* + * If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. + */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + spin_unlock(&rxq->lock); + } } /* - * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. + * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans) +static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; /* @@ -251,6 +300,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) spin_lock(&rxq->lock); while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { + __le32 *bd = (__le32 *)rxq->bd; /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); @@ -261,7 +311,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) list_del(&rxb->list); /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); + bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; @@ -272,11 +322,31 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&rxq->lock); } } +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static +void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + if (trans->cfg->mq_rx_supported) + iwl_pcie_rxq_mq_restock(trans, rxq); + else + iwl_pcie_rxq_sq_restock(trans, rxq); +} + /* * iwl_pcie_rx_alloc_page - allocates and returns a page. * @@ -285,13 +355,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct page *page; gfp_t gfp_mask = priority; - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; @@ -301,16 +367,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. + /* + * Issue an error if we don't have enough pre-allocated + * buffers. ` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) + if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); + "Failed to alloc_pages\n"); return NULL; } return page; @@ -325,10 +388,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; @@ -372,10 +435,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) __free_pages(page, trans_pcie->rx_page_order); return; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock(&rxq->lock); @@ -386,40 +445,23 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } } -static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) +static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; int i; - lockdep_assert_held(&rxq->lock); - - for (i = 0; i < RX_QUEUE_SIZE; i++) { - if (!rxq->pool[i].page) + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!trans_pcie->rx_pool[i].page) continue; - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); - rxq->pool[i].page = NULL; + __free_pages(trans_pcie->rx_pool[i].page, + trans_pcie->rx_page_order); + trans_pcie->rx_pool[i].page = NULL; } } -/* - * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free - * - * When moving to rx_free an page is allocated for the slot. - * - * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization - */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) -{ - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); - - iwl_pcie_rxq_restock(trans); -} - /* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * @@ -444,6 +486,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) while (pending) { int i; struct list_head local_allocated; + gfp_t gfp_mask = GFP_KERNEL; + + /* Do not post a warning if there are only a few requests */ + if (pending < RX_PENDING_WATERMARK) + gfp_mask |= __GFP_NOWARN; INIT_LIST_HEAD(&local_allocated); @@ -463,7 +510,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) BUG_ON(rxb->page); /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + page = iwl_pcie_rx_alloc_page(trans, gfp_mask); if (!page) continue; rxb->page = page; @@ -477,10 +524,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) __free_pages(page, trans_pcie->rx_page_order); continue; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); /* move the allocated entry to the out list */ list_move(&rxb->list, &local_allocated); @@ -512,40 +555,46 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) } /* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages + * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. + * This function directly moves the allocated RBs to the queue's ownership + * and updates the relevant counters. */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) +static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; + lockdep_assert_held(&rxq->lock); + /* * atomic_dec_if_positive returns req_ready - 1 for any scenario. * If req_ready is 0 atomic_dec_if_positive will return -1 and this - * function will return -ENOMEM, as there are no ready requests. + * function will return early, as there are no ready requests. * atomic_dec_if_positive will perofrm the *actual* decrement only if * req_ready > 0, i.e. - there are ready requests and the function * hands one request to the caller. */ if (atomic_dec_if_positive(&rba->req_ready) < 0) - return -ENOMEM; + return; spin_lock(&rba->lock); for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); + struct iwl_rx_mem_buffer *rxb = + list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + + list_move(&rxb->list, &rxq->rx_free); } spin_unlock(&rba->lock); - return 0; + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + rxq->free_count += RX_CLAIM_REQ_ALLOC; } static void iwl_pcie_rx_allocator_work(struct work_struct *data) @@ -561,38 +610,83 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data) static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; + int i; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); - memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); + if (WARN_ON(trans_pcie->rxq)) + return -EINVAL; + + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + GFP_KERNEL); + if (!trans_pcie->rxq) + return -EINVAL; - spin_lock_init(&rxq->lock); spin_lock_init(&rba->lock); - if (WARN_ON(rxq->bd || rxq->rb_stts)) - return -EINVAL; + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err_bd; + spin_lock_init(&rxq->lock); + if (trans->cfg->mq_rx_supported) + rxq->queue_size = MQ_RX_TABLE_SIZE; + else + rxq->queue_size = RX_QUEUE_SIZE; - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb_stts; + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_zalloc_coherent(dev, + free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err; + + if (trans->cfg->mq_rx_supported) { + rxq->used_bd = dma_zalloc_coherent(dev, + sizeof(__le32) * + rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); + if (!rxq->used_bd) + goto err; + } + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, + GFP_KERNEL); + if (!rxq->rb_stts) + goto err; + } return 0; -err_rb_stts: - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; -err_bd: +err: + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(dev, free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + + if (rxq->used_bd) + dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + } + kfree(trans_pcie->rxq); + return -ENOMEM; } @@ -659,65 +753,112 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size, enabled = 0; int i; - lockdep_assert_held(&rxq->lock); - - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - rxq->free_count = 0; - rxq->used_count = 0; + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_4K: + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = RFH_RXF_DMA_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = RFH_RXF_DMA_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + } - for (i = 0; i < RX_QUEUE_SIZE; i++) - list_add(&rxq->pool[i].list, &rxq->rx_used); -} + /* Stop Rx DMA */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + /* disable free amd used rx queue operation */ + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); + + for (i = 0; i < trans->num_rx_queues; i++) { + /* Tell device where to find RBD free table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), + (u64)(trans_pcie->rxq[i].bd_dma)); + /* Tell device where to find RBD used table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), + (u64)(trans_pcie->rxq[i].used_bd_dma)); + /* Tell device where in DRAM to update its Rx status */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); + /* Reset device indice tables */ + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); + + enabled |= BIT(i) | BIT(i + 16); + } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; + /* restock default queue */ + iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); - lockdep_assert_held(&rba->lock); + /* + * Enable Rx DMA + * Single frame mode + * Rx buffer size 4 or 8k or 12k + * Min RB size 4 or 8 + * Drop frames that exceed RB size + * 512 RBDs + */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | + rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_DROP_TOO_LARGE_MASK | + RFH_RXF_DMA_RBDCB_SIZE_512); - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); + /* + * Activate DMA snooping. + * Set RX DMA chunk size to 64B + * Default queue is 0 + */ + iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | + (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | + RFH_GEN_CFG_SERVICE_DMA_SNOOP); + /* Enable the relevant rx queues */ + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; + lockdep_assert_held(&rxq->lock); - lockdep_assert_held(&rba->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + rxq->used_count = 0; +} - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ + WARN_ON(1); + return 0; } int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err; + int i, err, queue_size, allocator_pool_size, num_alloc; - if (!rxq->bd) { + if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } + def_rxq = trans_pcie->rxq; if (!rba->alloc_wq) rba->alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); @@ -726,34 +867,69 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock(&rba->lock); - spin_lock(&rxq->lock); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rxq_free_rbs(trans); - iwl_pcie_rx_init_rxb_lists(rxq); + iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; + def_rxq->queue[i] = NULL; - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_unlock(&rxq->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - iwl_pcie_rx_replenish(trans); + rxq->id = i; - iwl_pcie_rx_hw_init(trans, rxq); + spin_lock(&rxq->lock); + /* + * Set read write pointer to reflect that we have processed + * and used all buffers, but have not restocked the Rx queue + * with fresh buffers + */ + rxq->read = 0; + rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); - spin_unlock(&rxq->lock); + iwl_pcie_rx_init_rxb_lists(rxq); + + if (!rxq->napi.poll) + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + iwl_pcie_dummy_napi_poll, 64); + + spin_unlock(&rxq->lock); + } + + /* move the pool to the default queue and allocator ownerships */ + queue_size = trans->cfg->mq_rx_supported ? + MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; + allocator_pool_size = trans->num_rx_queues * + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); + num_alloc = queue_size + allocator_pool_size; + for (i = 0; i < num_alloc; i++) { + struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; + + if (i < allocator_pool_size) + list_add(&rxb->list, &rba->rbd_empty); + else + list_add(&rxb->list, &def_rxq->rx_used); + trans_pcie->global_table[i] = rxb; + rxb->vid = (u16)i; + } + + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + if (trans->cfg->mq_rx_supported) { + iwl_pcie_rx_mq_hw_init(trans); + } else { + iwl_pcie_rxq_sq_restock(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, def_rxq); + } + + spin_lock(&def_rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); + spin_unlock(&def_rxq->lock); return 0; } @@ -761,12 +937,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); + int i; - /*if rxq->bd is NULL, it means that nothing has been allocated, - * exit now */ - if (!rxq->bd) { + /* + * if rxq is NULL, it means that nothing has been allocated, + * exit now + */ + if (!trans_pcie->rxq) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } @@ -777,27 +957,37 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rba->alloc_wq = NULL; } - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); - - spin_lock(&rxq->lock); - iwl_pcie_rxq_free_rbs(trans); - spin_unlock(&rxq->lock); - - dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - rxq->rb_stts_dma = 0; - rxq->rb_stts = NULL; + iwl_pcie_free_rbs_pool(trans); + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(trans->dev, + free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(trans, + "Free rxq->rb_stts which is NULL\n"); + + if (rxq->used_bd) + dma_free_coherent(trans->dev, + sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + + if (rxq->napi.poll) + netif_napi_del(&rxq->napi); + } + kfree(trans_pcie->rxq); } /* @@ -841,11 +1031,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, + struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; @@ -911,7 +1101,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); + if (rxq->id == 0) + iwl_op_mode_rx(trans->op_mode, &rxq->napi, + &rxcb); + else + iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, + &rxcb, rxq->id); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -972,11 +1167,11 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ -static void iwl_pcie_rx_handle(struct iwl_trans *trans) +static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i, j, count = 0; + struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; + u32 r, i, count = 0; bool emergency = false; restart: @@ -986,80 +1181,73 @@ restart: r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; + /* W/A 9000 device step A0 wrap-around bug */ + r &= (rxq->queue_size - 1); + /* Rx interrupt, but nothing sent from uCode */ if (i == r) - IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { struct iwl_rx_mem_buffer *rxb; - if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + if (unlikely(rxq->used_count == rxq->queue_size / 2)) emergency = true; - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; + if (trans->cfg->mq_rx_supported) { + /* + * used_bd is a 32 bit but only 12 are used to retrieve + * the vid + */ + u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; + + if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), + "Invalid rxb index from HW %u\n", (u32)vid)) + goto out; + rxb = trans_pcie->global_table[vid]; + } else { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + } - IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); - iwl_pcie_rx_handle_rb(trans, rxb, emergency); + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); - i = (i + 1) & RX_QUEUE_MASK; + i = (i + 1) & (rxq->queue_size - 1); - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + /* + * If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator. + * If not ready - will try to reclaim next time. + * There is no need to reschedule work - allocator exits only + * on success + */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) + iwl_pcie_rx_allocator_get(trans, rxq); + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && - !emergency) { - /* Add the remaining 6 empty RBDs - * for allocator use - */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, - &rba->rbd_empty); - spin_unlock(&rba->lock); - } - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } - } - } - if (emergency) { + /* Add the remaining empty RBDs for allocator use */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + } else if (emergency) { count++; if (count == 8) { count = 0; - if (rxq->used_count < RX_QUEUE_SIZE / 3) + if (rxq->used_count < rxq->queue_size / 3) emergency = false; + + rxq->read = i; spin_unlock(&rxq->lock); - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); - spin_lock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); + iwl_pcie_rxq_restock(trans, rxq); + goto restart; } } - /* handle restock for three cases, can be all of them at once: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated - * - we are in emergency and allocated buffers - */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); - goto restart; - } } - +out: /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); @@ -1077,10 +1265,60 @@ restart: * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); + + if (rxq->napi.poll) + napi_gro_flush(&rxq->napi, false); + + iwl_pcie_rxq_restock(trans, rxq); +} + +static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) +{ + u8 queue = entry->entry; + struct msix_entry *entries = entry - queue; + + return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); +} - if (trans_pcie->napi.poll) - napi_gro_flush(&trans_pcie->napi, false); +static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, + struct msix_entry *entry) +{ + /* + * Before sending the interrupt the HW disables it to prevent + * a nested interrupt. This is done by writing 1 to the corresponding + * bit in the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as + * write 1 clear (W1C) register, meaning that it's being clear + * by writing 1 to the bit. + */ + iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); +} + +/* + * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw + * This interrupt handler should be used with RSS queue only. + */ +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + + if (WARN_ON(entry->entry >= trans->num_rx_queues)) + return IRQ_NONE; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + local_bh_disable(); + iwl_pcie_rx_handle(trans, entry->entry); + local_bh_enable(); + + iwl_pcie_clear_irq(trans, entry); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; } /* @@ -1413,7 +1651,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rx++; local_bh_disable(); - iwl_pcie_rx_handle(trans); + iwl_pcie_rx_handle(trans, 0); local_bh_enable(); } @@ -1556,3 +1794,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data) return IRQ_WAKE_THREAD; } + +irqreturn_t iwl_pcie_msix_isr(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; + u32 inta_fh, inta_hw; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock(&trans_pcie->irq_lock); + inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD); + inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + /* + * Clear causes registers to avoid being handling the same cause. + */ + iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); + spin_unlock(&trans_pcie->irq_lock); + + if (unlikely(!(inta_fh | inta_hw))) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", + inta_fh, + iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { + IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); + isr_stats->tx++; + /* + * Wake up uCode load routine, + * now that load is complete + */ + trans_pcie->ucode_write_complete = true; + wake_up(&trans_pcie->ucode_write_waitq); + } + + /* Error detected by uCode */ + if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { + IWL_ERR(trans, + "Microcode SW error detected. Restarting 0x%X.\n", + inta_fh); + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + } + + /* After checking FH register check HW register */ + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, + "ISR inta_hw 0x%08x, enabled 0x%08x\n", + inta_hw, + iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + } + + /* uCode wakes up after power-down sleep */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); + + isr_stats->wakeup++; + } + + /* Chip got too hot and stopped itself */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { + IWL_ERR(trans, "Microcode CT kill error detected.\n"); + isr_stats->ctkill++; + } + + /* HW RF KILL switch toggled */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { + bool hw_rfkill; + + hw_rfkill = iwl_is_rfkill_set(trans); + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", + hw_rfkill ? "disable radio" : "enable radio"); + + isr_stats->rfkill++; + + mutex_lock(&trans_pcie->mutex); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); + if (hw_rfkill) { + set_bit(STATUS_RFKILL, &trans->status); + if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status)) + IWL_DEBUG_RF_KILL(trans, + "Rfkill while SYNC HCMD in flight\n"); + wake_up(&trans_pcie->wait_command_queue); + } else { + clear_bit(STATUS_RFKILL, &trans->status); + } + } + + if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { + IWL_ERR(trans, + "Hardware error detected. Restarting.\n"); + + isr_stats->hw++; + iwl_pcie_irq_handle_error(trans); + } + + iwl_pcie_clear_irq(trans, entry); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1198caac3..b2b79354d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -72,6 +72,7 @@ #include #include #include +#include #include "iwl-drv.h" #include "iwl-trans.h" @@ -615,38 +616,38 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; int ret; trans_pcie->ucode_write_complete = false; - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + if (!iwl_trans_grab_nic_access(trans, &flags)) + return -EIO; + + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + + iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), + dst_addr); - iwl_write_direct32(trans, - FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), - dst_addr); + iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); - iwl_write_direct32(trans, - FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), - phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); + iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), + (iwl_get_dma_hi_addr(phy_addr) + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); - iwl_write_direct32(trans, - FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), - (iwl_get_dma_hi_addr(phy_addr) - << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); + iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | - FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | - FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); + iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); @@ -1122,6 +1123,20 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } +static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->msix_enabled) { + int i; + + for (i = 0; i < trans_pcie->allocated_vector; i++) + synchronize_irq(trans_pcie->msix_entries[i].vector); + } else { + synchronize_irq(trans_pcie->pci_dev->irq); + } +} + static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { @@ -1148,7 +1163,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, iwl_disable_interrupts(trans); /* Make sure it finished running */ - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); @@ -1248,11 +1263,10 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) _iwl_trans_pcie_stop_device(trans, true); } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); @@ -1269,14 +1283,14 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_pcie_disable_ict(trans); - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { + if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try @@ -1290,7 +1304,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { u32 val; int ret; @@ -1325,7 +1339,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { @@ -1348,6 +1362,153 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } +struct iwl_causes_list { + u32 cause_num; + u32 mask_reg; + u8 addr; +}; + +static struct iwl_causes_list causes_list[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + u32 val, max_rx_vector, i; + struct iwl_trans *trans = trans_pcie->trans; + + max_rx_vector = trans_pcie->allocated_vector - 1; + + if (!trans_pcie->msix_enabled) + return; + + iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + + /* + * Each cause from the list above and the RX causes is represented as + * a byte in the IVAR table. We access the first (N - 1) bytes and map + * them to the (N - 1) vectors so these vectors will be used as rx + * vectors. Then access all non rx causes and map them to the + * default queue (N'th queue). + */ + for (i = 0; i < max_rx_vector; i++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i)); + iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD, + BIT(MSIX_FH_INT_CAUSES_Q(i))); + } + + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + val = trans_pcie->default_irq_num | + MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); + iwl_clear_bit(trans, causes_list[i].mask_reg, + causes_list[i].cause_num); + } + trans_pcie->fh_init_mask = + ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + trans_pcie->hw_init_mask = + ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_mask = trans_pcie->hw_init_mask; +} + +static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, + struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 pci_cmd; + int max_vector; + int ret, i; + + if (trans->cfg->mq_rx_supported) { + max_vector = min_t(u32, (num_possible_cpus() + 1), + IWL_MAX_RX_HW_QUEUES); + for (i = 0; i < max_vector; i++) + trans_pcie->msix_entries[i].entry = i; + + ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries, + MSIX_MIN_INTERRUPT_VECTORS, + max_vector); + if (ret > 1) { + IWL_DEBUG_INFO(trans, + "Enable MSI-X allocate %d interrupt vector\n", + ret); + trans_pcie->allocated_vector = ret; + trans_pcie->default_irq_num = + trans_pcie->allocated_vector - 1; + trans_pcie->trans->num_rx_queues = + trans_pcie->allocated_vector - 1; + trans_pcie->msix_enabled = true; + + return; + } + IWL_DEBUG_INFO(trans, + "ret = %d %s move to msi mode\n", ret, + (ret == 1) ? + "can't allocate more than 1 interrupt vector" : + "failed to enable msi-x mode"); + pci_disable_msix(pdev); + } + + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + } + } +} + +static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, + struct iwl_trans_pcie *trans_pcie) +{ + int i, last_vector; + + last_vector = trans_pcie->trans->num_rx_queues; + + for (i = 0; i < trans_pcie->allocated_vector; i++) { + int ret; + + ret = request_threaded_irq(trans_pcie->msix_entries[i].vector, + iwl_pcie_msix_isr, + (i == last_vector) ? + iwl_pcie_irq_msix_handler : + iwl_pcie_irq_rx_msix_handler, + IRQF_SHARED, + DRV_NAME, + &trans_pcie->msix_entries[i]); + if (ret) { + int j; + + IWL_ERR(trans_pcie->trans, + "Error allocating IRQ %d\n", i); + for (j = 0; j < i; j++) + free_irq(trans_pcie->msix_entries[i].vector, + &trans_pcie->msix_entries[i]); + pci_disable_msix(pdev); + return ret; + } + } + + return 0; +} + static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1369,6 +1530,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) iwl_pcie_apm_init(trans); + iwl_pcie_init_msix(trans_pcie); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); @@ -1383,6 +1545,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); + /* Make sure we sync here, because we'll need full access later */ + if (low_power) + pm_runtime_resume(trans->dev); + return 0; } @@ -1419,7 +1585,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) mutex_unlock(&trans_pcie->mutex); - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1452,12 +1618,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) -{ - WARN_ON(1); - return 0; -} - static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { @@ -1486,19 +1646,13 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; - /* init ref_count to 1 (should be cleared when ucode is loaded) */ - trans_pcie->ref_count = 1; - /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll) { + if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); - netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, - iwl_pcie_dummy_napi_poll, 64); - } } void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -1506,22 +1660,29 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); - free_irq(trans_pcie->pci_dev->irq, trans); - iwl_pcie_free_ict(trans); + if (trans_pcie->msix_enabled) { + for (i = 0; i < trans_pcie->allocated_vector; i++) + free_irq(trans_pcie->msix_entries[i].vector, + &trans_pcie->msix_entries[i]); + + pci_disable_msix(trans_pcie->pci_dev); + trans_pcie->msix_enabled = false; + } else { + free_irq(trans_pcie->pci_dev->irq, trans); + + iwl_pcie_free_ict(trans); - pci_disable_msi(trans_pcie->pci_dev); + pci_disable_msi(trans_pcie->pci_dev); + } iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - if (trans_pcie->napi.poll) - netif_napi_del(&trans_pcie->napi); - iwl_pcie_free_fw_monitor(trans); for_each_possible_cpu(i) { @@ -1861,6 +2022,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->ref_lock, flags); IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); trans_pcie->ref_count++; + pm_runtime_get(&trans_pcie->pci_dev->dev); spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -1879,6 +2041,10 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans) return; } trans_pcie->ref_count--; + + pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); + pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -2031,29 +2197,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", - rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", - rxq->write); - pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", - rxq->write_actual); - pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", - rxq->need_update); - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); + char *buf; + int pos = 0, i, ret; + size_t bufsz = sizeof(buf); + + bufsz = sizeof(char) * 121 * trans->num_rx_queues; + + if (!trans_pcie->rxq) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", + i); + pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", + rxq->write_actual); + pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", + rxq->need_update); + pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & + 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: Not Allocated\n"); + } } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, @@ -2218,7 +2403,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = PAGE_SIZE << trans_pcie->rx_page_order; - struct iwl_rxq *rxq = &trans_pcie->rxq; + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); @@ -2413,7 +2599,8 @@ static struct iwl_trans_dump_data u32 len, num_rbs; u32 monitor_len; int i, ptr; - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && + !trans->cfg->mq_rx_supported; /* transport dump header */ len = sizeof(*dump_data); @@ -2468,11 +2655,12 @@ static struct iwl_trans_dump_data len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); if (dump_rbs) { + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE( - trans_pcie->rxq.rb_stts->closed_rb_num)) + num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; - num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); @@ -2523,6 +2711,22 @@ static struct iwl_trans_dump_data return dump_data; } +#ifdef CONFIG_PM_SLEEP +static int iwl_trans_pcie_suspend(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + return iwl_pci_fw_enter_d0i3(trans); + + return 0; +} + +static void iwl_trans_pcie_resume(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + iwl_pci_fw_exit_d0i3(trans); +} +#endif /* CONFIG_PM_SLEEP */ + static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, .op_mode_leave = iwl_trans_pcie_op_mode_leave, @@ -2533,6 +2737,11 @@ static const struct iwl_trans_ops trans_ops_pcie = { .d3_suspend = iwl_trans_pcie_d3_suspend, .d3_resume = iwl_trans_pcie_d3_resume, +#ifdef CONFIG_PM_SLEEP + .suspend = iwl_trans_pcie_suspend, + .resume = iwl_trans_pcie_resume, +#endif /* CONFIG_PM_SLEEP */ + .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, @@ -2570,8 +2779,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - u16 pci_cmd; - int ret; + int ret, addr_size; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2609,11 +2817,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } + if (cfg->mq_rx_supported) + addr_size = 64; + else + addr_size = 36; + pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (!ret) - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(addr_size)); if (ret) { ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) @@ -2647,17 +2861,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - ret = pci_enable_msi(pdev); - if (ret) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); - } - } - trans->hw_rev = iwl_read32(trans, CSR_HW_REV); /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have @@ -2709,6 +2912,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } + iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); @@ -2716,19 +2920,31 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - ret = iwl_pcie_alloc_ict(trans); - if (ret) - goto out_pci_disable_msi; + init_waitqueue_head(&trans_pcie->d0i3_waitq); - ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, - iwl_pcie_irq_handler, - IRQF_SHARED, DRV_NAME, trans); - if (ret) { - IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); - goto out_free_ict; - } + if (trans_pcie->msix_enabled) { + if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) + goto out_pci_release_regions; + } else { + ret = iwl_pcie_alloc_ict(trans); + if (ret) + goto out_pci_disable_msi; - trans_pcie->inta_mask = CSR_INI_SET_MASK; + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); + if (ret) { + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); + goto out_free_ict; + } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + } + +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; +#else + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ return trans; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 5262028b5..16ad820ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,7 +1,8 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -33,7 +34,6 @@ #include #include #include -#include #include "iwl-debug.h" #include "iwl-csr.h" @@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return ret; spin_lock_init(&txq->lock); + __skb_queue_head_init(&txq->overflow_q); /* * Tell nic where to find circular buffer of Tx Frame Descriptors for @@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } txq->active = false; + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ @@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); - if (iwl_queue_space(&txq->q) > txq->q.low_mark) - iwl_wake_queue(trans, txq); + if (iwl_queue_space(&txq->q) > txq->q.low_mark && + test_bit(txq_id, trans_pcie->queue_stopped)) { + struct sk_buff_head overflow_skbs; + + __skb_queue_head_init(&overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + + /* + * This is tricky: we are in reclaim path which is non + * re-entrant, so noone will try to take the access the + * txq data from that path. We stopped tx, so we can't + * have tx as well. Bottom line, we can unlock and re-lock + * later. + */ + spin_unlock_bh(&txq->lock); + + while (!skb_queue_empty(&overflow_skbs)) { + struct sk_buff *skb = __skb_dequeue(&overflow_skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; + struct iwl_device_cmd *dev_cmd = + info->driver_data[dev_cmd_idx]; + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_queue_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id); + } + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(&txq->q) > txq->q.low_mark) + iwl_wake_queue(trans, txq); + } if (q->read_ptr == q->write_ptr) { IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); @@ -1686,6 +1727,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, wake_up(&trans_pcie->wait_command_queue); } + if (meta->flags & CMD_MAKE_TRANS_IDLE) { + IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + set_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + + if (meta->flags & CMD_WAKE_UP_TRANS) { + IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + clear_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + meta->flags = 0; spin_unlock_bh(&txq->lock); @@ -2161,6 +2216,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, csum = skb_checksum(skb, offs, skb->len - offs, 0); *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); + + skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb_is_nonlinear(skb) && @@ -2177,6 +2234,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (iwl_queue_space(q) < q->high_mark) { + iwl_stop_queue(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_queue_space(q) < 3)) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] = + dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + + spin_unlock(&txq->lock); + return 0; + } + } + /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. @@ -2281,12 +2354,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ - if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) - iwl_pcie_txq_inc_wr_ptr(trans, txq); - else - iwl_stop_queue(trans, txq); - } spin_unlock(&txq->lock); return 0; out_err: -- cgit v1.2.3-54-g00ecf