diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi')
74 files changed, 3038 insertions, 2370 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 16c4f3834..b64db47b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. -config IWLWIFI_UAPSD - bool "enable U-APSD by default" - depends on IWLMVM - help - Say Y here to enable U-APSD by default. This may cause - interoperability problems with some APs, manifesting in lower than - expected throughput due to those APs not enabling aggregation - - If unsure, say N. - config IWLWIFI_PCIE_RTPM bool "Enable runtime power management mode for PCIe devices" depends on IWLMVM && PM @@ -144,12 +134,6 @@ config IWLWIFI_DEBUGFS is a low-impact option that allows getting insight into the driver's state at runtime. -config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE - bool "Experimental uCode support" - depends on IWLWIFI_DEBUG - ---help--- - Enable use of experimental ucode for testing and debugging. - config IWLWIFI_DEVICE_TRACING bool "iwlwifi device access tracing" depends on EVENT_TRACING diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 9de277c6c..b79e38734 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx); void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_vif *vif); /* uCode */ @@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear); static inline const struct ieee80211_supported_band *iwl_get_hw_mode( - struct iwl_priv *priv, enum ieee80211_band band) + struct iwl_priv *priv, enum nl80211_band band) { return priv->hw->wiphy->bands[band]; } @@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan); #endif /* rx */ -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_setup_rx_handlers(struct iwl_priv *priv); void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); @@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); int __must_check iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif, enum iwl_scan_type scan_type, - enum ieee80211_band band); + enum nl80211_band band); /* For faster active scanning, scan will move to the next channel if fewer than * PLCP_QUIET_THRESH packets are heard on this channel within diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 74c516152..f6591c83d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ); if (supp_band) { channels = supp_band->channels; @@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, IEEE80211_CHAN_NO_IR ? "passive only" : "active/passive"); } - supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ); if (supp_band) { channels = supp_band->channels; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 1a7ead753..8148df61a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -677,7 +677,7 @@ struct iwl_priv { struct iwl_hw_params hw_params; - enum ieee80211_band band; + enum nl80211_band band; u8 valid_contexts; void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, @@ -722,11 +722,11 @@ struct iwl_priv { unsigned long scan_start; unsigned long scan_start_tsf; void *scan_cmd; - enum ieee80211_band scan_band; + enum nl80211_band scan_band; struct cfg80211_scan_request *scan_request; struct ieee80211_vif *scan_vif; enum iwl_scan_type scan_type; - u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 scan_tx_ant[NUM_NL80211_BANDS]; u8 mgmt_tx_ant; /* max number of station keys */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index cc13c0406..f21732ec3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, .data = { &cmd, }, }; - cmd.band = priv->band == IEEE80211_BAND_2GHZ; + cmd.band = priv->band == NL80211_BAND_2GHZ; ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", ctx->active.channel, ch); @@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, hcmd.data[0] = cmd; - cmd->band = priv->band == IEEE80211_BAND_2GHZ; + cmd->band = priv->band == NL80211_BAND_2GHZ; ch = ch_switch->chandef.chan->hw_value; IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", ctx->active.channel, ch); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 179946926..8dda52ae3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv) iwl_tt_handler(priv); } -int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int idx = 0; int band_offset = 0; @@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) return idx; /* Legacy rate format, search for match in table */ } else { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) @@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) int i; u8 ind = ant; - if (priv->band == IEEE80211_BAND_2GHZ && + if (priv->band == NL80211_BAND_2GHZ && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index c63ea7957..8c0719468 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; - if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) - priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &priv->nvm_data->bands[IEEE80211_BAND_5GHZ]; + if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = + &priv->nvm_data->bands[NL80211_BAND_2GHZ]; + if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = + &priv->nvm_data->bands[NL80211_BAND_5GHZ]; hw->wiphy->hw_version = priv->trans->hw_id; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 856281279..37b32a6f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); /* In mac80211, rates for 5 GHz start at 0 */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate += IWL_FIRST_OFDM_RATE; else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) rate_flags |= RATE_MCS_CCK_MSK; @@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data) static void iwl_setup_deferred_work(struct iwl_priv *priv) { - priv->workqueue = create_singlethread_workqueue(DRV_NAME); + priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0); INIT_WORK(&priv->restart, iwl_bg_restart); INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); @@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv) INIT_LIST_HEAD(&priv->calib_results); - priv->band = IEEE80211_BAND_2GHZ; + priv->band = NL80211_BAND_2GHZ; priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index ee7505537..b95c2d76d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv, * fill "search" or "active" tx mode table. */ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct iwl_scale_tbl_info *tbl, int *rate_idx) { @@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, /* legacy rate format */ if (!(rate_n_flags & RATE_MCS_HT_MSK)) { if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { switch_to_legacy = 1; scale_index = rs_ht_to_legacy[scale_index]; - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) tbl->lq_type = LQ_A; else tbl->lq_type = LQ_G; @@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, /* Mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { /* supp_rates has no CCK bits in A mode */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate_mask = (u16)(rate_mask & (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); else @@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, table = &lq_sta->lq; tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); - if (priv->band == IEEE80211_BAND_5GHZ) + if (priv->band == NL80211_BAND_5GHZ) rs_index -= IWL_FIRST_OFDM_RATE; mac_flags = info->status.rates[0].flags; mac_index = info->status.rates[0].idx; @@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, * mac80211 HT index is always zero-indexed; we need to move * HT OFDM rates after CCK rates in 2.4 GHz band */ - if (priv->band == IEEE80211_BAND_2GHZ) + if (priv->band == NL80211_BAND_2GHZ) mac_index += IWL_FIRST_OFDM_RATE; } /* Here we actually compare this rate to the latest LQ command */ @@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, /* mask with station rate restriction */ if (is_legacy(tbl->lq_type)) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) /* supp_rates has no CCK bits in A mode */ rate_scale_index_msk = (u16) (rate_mask & (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); @@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, /* Get max rate if user set max rate */ if (lq_sta) { lq_sta->max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && + if ((sband->band == NL80211_BAND_5GHZ) && (lq_sta->max_rate_idx != -1)) lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; if ((lq_sta->max_rate_idx < 0) || @@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, } else { /* Check for invalid rates */ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || - ((sband->band == IEEE80211_BAND_5GHZ) && + ((sband->band == NL80211_BAND_5GHZ) && (rate_idx < IWL_FIRST_OFDM_RATE))) rate_idx = rate_lowest_index(sband, sta); /* On valid 5 GHz rate, adjust index */ - else if (sband->band == IEEE80211_BAND_5GHZ) + else if (sband->band == NL80211_BAND_5GHZ) rate_idx -= IWL_FIRST_OFDM_RATE; info->control.rates[0].flags = 0; } @@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i /* Set last_txrate_idx to lowest rate */ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; #ifdef CONFIG_MAC80211_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index c5fe44584..50c1e951d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -355,7 +355,7 @@ struct iwl_lq_sta { u8 action_counter; /* # mode-switch actions tried */ u8 is_green; u8 is_dup; - enum ieee80211_band band; + enum nl80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ u32 supp_rates; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 52ab1e012..dfa2041cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -686,7 +686,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx_napi(priv->hw, skb, priv->napi); + ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi); } static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) @@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* rx_status carries information about the packet to mac80211 */ rx_status.mactime = le64_to_cpu(phy_res->timestamp); rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), rx_status.band); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 2d47cb24c..b22855218 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx) { - enum ieee80211_band band = ch->band; + enum nl80211_band band = ch->band; u16 channel = ch->hw_value; if ((le16_to_cpu(ctx->staging.channel) == channel) && @@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, return; ctx->staging.channel = cpu_to_le16(channel); - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; else ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; @@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_vif *vif) { - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { ctx->staging.flags &= ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_CCK_MSK); @@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwlagn_set_rxon_chain(priv, ctx); - if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) + if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ)) ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 81a2ddbe9..d01766f16 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, scan_notif->tsf_high, scan_notif->status); IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", - (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", jiffies_to_msecs(jiffies - priv->scan_start)); /* @@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) } static u16 iwl_get_active_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band, u8 n_probes) + enum nl80211_band band, u8 n_probes) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) return IWL_ACTIVE_DWELL_TIME_52 + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); else @@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) } static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, - enum ieee80211_band band) + enum nl80211_band band) { - u16 passive = (band == IEEE80211_BAND_2GHZ) ? + u16 passive = (band == NL80211_BAND_2GHZ) ? IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; @@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, /* Return valid, unused, channel for a passive scan to reset the RF */ static u8 iwl_get_single_channel_number(struct iwl_priv *priv, - enum ieee80211_band band) + enum nl80211_band band) { struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band]; struct iwl_rxon_context *ctx; @@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv, static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, struct iwl_scan_channel *scan_ch) { const struct ieee80211_supported_band *sband; @@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME); /* Set txpower levels to defaults */ scan_ch->dsp_atten = 110; - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv, static int iwl_get_channels_for_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, u8 is_active, u8 n_probes, struct iwl_scan_channel *scan_ch) { @@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv, * power level: * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) u32 rate_flags = 0; u16 cmd_len = 0; u16 rx_chain = 0; - enum ieee80211_band band; + enum nl80211_band band; u8 n_probes = 0; u8 rx_ant = priv->nvm_data->valid_rx_ant; u8 rate; @@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; switch (priv->scan_band) { - case IEEE80211_BAND_2GHZ: + case NL80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; chan_mod = le32_to_cpu( priv->contexts[IWL_RXON_CTX_BSS].active.flags & @@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) priv->lib->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; - case IEEE80211_BAND_5GHZ: + case NL80211_BAND_5GHZ: rate = IWL_RATE_6M_PLCP; break; default: @@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; - if (band == IEEE80211_BAND_2GHZ && + if (band == NL80211_BAND_2GHZ && priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ @@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) void iwl_init_scan_params(struct iwl_priv *priv) { u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1; - if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; - if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) - priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; + if (!priv->scan_tx_ant[NL80211_BAND_5GHZ]) + priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[NL80211_BAND_2GHZ]) + priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; } int __must_check iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif, enum iwl_scan_type scan_type, - enum ieee80211_band band) + enum nl80211_band band) { int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index 8e9768a55..de6ec9b7a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ - if (priv->band == IEEE80211_BAND_5GHZ) + if (priv->band == NL80211_BAND_5GHZ) r = IWL_RATE_6M_INDEX; else if (ctx && ctx->vif && ctx->vif->p2p) r = IWL_RATE_6M_INDEX; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 59e2001c3..4b97371c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_TSF_MSK; else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; - else if (info->band == IEEE80211_BAND_2GHZ && + else if (info->band == NL80211_BAND_2GHZ && priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || @@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_idx = rate_lowest_index( &priv->nvm_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_rates[rate_idx].plcp; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c index 2cfac764a..9bdac0306 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c @@ -34,10 +34,6 @@ #define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 -/* Oldest version we won't warn about */ -#define IWL1000_UCODE_API_OK 5 -#define IWL100_UCODE_API_OK 5 - /* Lowest firmware API version supported */ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 @@ -56,7 +52,7 @@ static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .eeprom_size = OTP_LOW_IMAGE_SIZE, - .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .pll_cfg = true, .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, @@ -68,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = { static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl1000_eeprom_params = { @@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ - .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = { #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ - .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c index f128e40cb..761cdccaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c @@ -36,12 +36,6 @@ #define IWL105_UCODE_API_MAX 6 #define IWL135_UCODE_API_MAX 6 -/* Oldest version we won't warn about */ -#define IWL2030_UCODE_API_OK 6 -#define IWL2000_UCODE_API_OK 6 -#define IWL105_UCODE_API_OK 6 -#define IWL135_UCODE_API_OK 6 - /* Lowest firmware API version supported */ #define IWL2030_UCODE_API_MIN 5 #define IWL2000_UCODE_API_MIN 5 @@ -68,7 +62,6 @@ static const struct iwl_base_params iwl2000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 51, @@ -82,7 +75,6 @@ static const struct iwl_base_params iwl2000_base_params = { static const struct iwl_base_params iwl2030_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 57, @@ -95,7 +87,7 @@ static const struct iwl_base_params iwl2030_base_params = { static const struct iwl_ht_params iwl2000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl20x0_eeprom_params = { @@ -114,7 +106,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { #define IWL_DEVICE_2000 \ .fw_name_pre = IWL2000_FW_PRE, \ .ucode_api_max = IWL2000_UCODE_API_MAX, \ - .ucode_api_ok = IWL2000_UCODE_API_OK, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -142,7 +133,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { #define IWL_DEVICE_2030 \ .fw_name_pre = IWL2030_FW_PRE, \ .ucode_api_max = IWL2030_UCODE_API_MAX, \ - .ucode_api_ok = IWL2030_UCODE_API_OK, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_2030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -163,7 +153,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { #define IWL_DEVICE_105 \ .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ - .ucode_api_ok = IWL105_UCODE_API_OK, \ .ucode_api_min = IWL105_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_105, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -191,7 +180,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { #define IWL_DEVICE_135 \ .fw_name_pre = IWL135_FW_PRE, \ .ucode_api_max = IWL135_UCODE_API_MAX, \ - .ucode_api_ok = IWL135_UCODE_API_OK, \ .ucode_api_min = IWL135_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_135, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c index cae82e1a1..230f324ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c @@ -34,10 +34,6 @@ #define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 -/* Oldest version we won't warn about */ -#define IWL5000_UCODE_API_OK 5 -#define IWL5150_UCODE_API_OK 2 - /* Lowest firmware API version supported */ #define IWL5000_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1 @@ -57,7 +53,7 @@ static const struct iwl_base_params iwl5000_base_params = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .pll_cfg = true, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 512, @@ -66,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = { static const struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_eeprom_params iwl5000_eeprom_params = { @@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { #define IWL_DEVICE_5000 \ .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ - .ucode_api_ok = IWL5000_UCODE_API_OK, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ @@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, - .ucode_api_ok = IWL5000_UCODE_API_OK, .ucode_api_min = IWL5000_UCODE_API_MIN, .device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, @@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ - .ucode_api_ok = IWL5150_UCODE_API_OK, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c index b8b2a0e08..fa86e5e7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c @@ -36,13 +36,6 @@ #define IWL6000G2_UCODE_API_MAX 6 #define IWL6035_UCODE_API_MAX 6 -/* Oldest version we won't warn about */ -#define IWL6000_UCODE_API_OK 4 -#define IWL6000G2_UCODE_API_OK 5 -#define IWL6050_UCODE_API_OK 5 -#define IWL6000G2B_UCODE_API_OK 6 -#define IWL6035_UCODE_API_OK 6 - /* Lowest firmware API version supported */ #define IWL6000_UCODE_API_MIN 4 #define IWL6050_UCODE_API_MIN 4 @@ -78,7 +71,6 @@ static const struct iwl_base_params iwl6000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 51, @@ -91,7 +83,6 @@ static const struct iwl_base_params iwl6000_base_params = { static const struct iwl_base_params iwl6050_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, .led_compensation = 51, @@ -104,7 +95,6 @@ static const struct iwl_base_params iwl6050_base_params = { static const struct iwl_base_params iwl6000_g2_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, - .pll_cfg_val = 0, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 57, @@ -117,7 +107,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = { static const struct iwl_ht_params iwl6000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_eeprom_params iwl6000_eeprom_params = { @@ -136,7 +126,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { #define IWL_DEVICE_6005 \ .fw_name_pre = IWL6005_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6005, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -191,7 +180,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { #define IWL_DEVICE_6030 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -228,7 +216,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = { #define IWL_DEVICE_6035 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6035_UCODE_API_MAX, \ - .ucode_api_ok = IWL6035_UCODE_API_OK, \ .ucode_api_min = IWL6035_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -282,7 +269,6 @@ const struct iwl_cfg iwl130_bg_cfg = { #define IWL_DEVICE_6000i \ .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000_UCODE_API_OK, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_6000i, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -370,7 +356,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, - .ucode_api_ok = IWL6000_UCODE_API_OK, .ucode_api_min = IWL6000_UCODE_API_MIN, .device_family = IWL_DEVICE_FAMILY_6000, .max_inst_size = IWL60_RTC_INST_SIZE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 786a919fa..8d164d896 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -76,16 +76,10 @@ #define IWL7265D_UCODE_API_MAX 21 #define IWL3168_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL7260_UCODE_API_OK 13 -#define IWL7265_UCODE_API_OK 13 -#define IWL7265D_UCODE_API_OK 13 -#define IWL3168_UCODE_API_OK 20 - /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 13 -#define IWL7265_UCODE_API_MIN 13 -#define IWL7265D_UCODE_API_MIN 13 +#define IWL7260_UCODE_API_MIN 16 +#define IWL7265_UCODE_API_MIN 16 +#define IWL7265D_UCODE_API_MIN 16 #define IWL3168_UCODE_API_MIN 20 /* NVM versions */ @@ -128,7 +122,6 @@ static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, .num_of_queues = 31, - .pll_cfg_val = 0, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -162,7 +155,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = { static const struct iwl_ht_params iwl7000_ht_params = { .stbc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; #define IWL_DEVICE_7000_COMMON \ @@ -179,25 +172,21 @@ static const struct iwl_ht_params iwl7000_ht_params = { #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7260_UCODE_API_MAX, \ - .ucode_api_ok = IWL7260_UCODE_API_OK, \ .ucode_api_min = IWL7260_UCODE_API_MIN #define IWL_DEVICE_7005 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265_UCODE_API_MAX, \ - .ucode_api_ok = IWL7265_UCODE_API_OK, \ .ucode_api_min = IWL7265_UCODE_API_MIN #define IWL_DEVICE_3008 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL3168_UCODE_API_MAX, \ - .ucode_api_ok = IWL3168_UCODE_API_OK, \ .ucode_api_min = IWL3168_UCODE_API_MIN #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ - .ucode_api_ok = IWL7265D_UCODE_API_OK, \ .ucode_api_min = IWL7265D_UCODE_API_MIN const struct iwl_cfg iwl7260_2ac_cfg = { @@ -297,7 +286,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { static const struct iwl_ht_params iwl7265_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; const struct iwl_cfg iwl3165_2ac_cfg = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 16846cae3..0b4684234 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -73,12 +73,8 @@ #define IWL8000_UCODE_API_MAX 21 #define IWL8265_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL8000_UCODE_API_OK 13 -#define IWL8265_UCODE_API_OK 20 - /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 13 +#define IWL8000_UCODE_API_MIN 16 #define IWL8265_UCODE_API_MIN 20 /* NVM versions */ @@ -116,7 +112,6 @@ static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .num_of_queues = 31, - .pll_cfg_val = 0, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -128,7 +123,7 @@ static const struct iwl_base_params iwl8000_base_params = { static const struct iwl_ht_params iwl8000_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl8000_tt_params = { @@ -175,19 +170,16 @@ static const struct iwl_tt_params iwl8000_tt_params = { #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8260 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8265 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8265_UCODE_API_MAX, \ - .ucode_api_ok = IWL8265_UCODE_API_OK, \ .ucode_api_min = IWL8265_UCODE_API_MIN \ const struct iwl_cfg iwl8260_2n_cfg = { @@ -244,6 +236,20 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, }; +const struct iwl_cfg iwl8265_2ac_sdio_cfg = { + .name = "Intel(R) Dual Band Wireless-AC 8265", + .fw_name_pre = IWL8265_FW_PRE, + IWL_DEVICE_8265, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, + .disable_dummy_notification = true, + .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, + .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, +}; + const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .name = "Intel(R) Dual Band Wireless-AC 4165", .fw_name_pre = IWL8000_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 7b34387d7..19569fff2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015-2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015-2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -57,11 +57,8 @@ /* Highest firmware API version supported */ #define IWL9000_UCODE_API_MAX 21 -/* Oldest version we won't warn about */ -#define IWL9000_UCODE_API_OK 13 - /* Lowest firmware API version supported */ -#define IWL9000_UCODE_API_MIN 13 +#define IWL9000_UCODE_API_MIN 16 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d @@ -76,15 +73,20 @@ #define IWL9000_SMEM_LEN 0x68000 #define IWL9000_FW_PRE "/*(DEBLOBBED)*/" +#define IWL9260_FW_PRE "/*(DEBLOBBED)*/" +#define IWL9260LC_FW_PRE "/*(DEBLOBBED)*/" #define IWL9000_MODULE_FIRMWARE(api) \ IWL9000_FW_PRE /*(DEBLOBBED)*/ +#define IWL9260_MODULE_FIRMWARE(api) \ + IWL9260_FW_PRE /*(DEBLOBBED)*/ +#define IWL9260LC_MODULE_FIRMWARE(api) \ + IWL9260LC_FW_PRE /*(DEBLOBBED)*/ #define NVM_HW_SECTION_NUM_FAMILY_9000 10 static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, .num_of_queues = 31, - .pll_cfg_val = 0, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -96,7 +98,7 @@ static const struct iwl_base_params iwl9000_base_params = { static const struct iwl_ht_params iwl9000_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl9000_tt_params = { @@ -122,7 +124,6 @@ static const struct iwl_tt_params iwl9000_tt_params = { #define IWL_DEVICE_9000 \ .ucode_api_max = IWL9000_UCODE_API_MAX, \ - .ucode_api_ok = IWL9000_UCODE_API_OK, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_8000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ @@ -137,15 +138,31 @@ static const struct iwl_tt_params iwl9000_tt_params = { .dccm2_len = IWL9000_DCCM2_LEN, \ .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true + .mac_addr_from_csr = true, \ + .rf_id = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9000_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +/* + * TODO the struct below is for internal testing only this should be + * removed by EO 2016~ + */ +const struct iwl_cfg iwl9260lc_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9260", + .fw_name_pre = IWL9260LC_FW_PRE, IWL_DEVICE_9000, .ht_params = &iwl9000_ht_params, .nvm_ver = IWL9000_NVM_VERSION, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 3e4d346be..4a0af7de8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -131,6 +133,8 @@ enum iwl_led_mode { #define IWL_MAX_WD_TIMEOUT 120000 #define IWL_DEFAULT_MAX_TX_POWER 22 +#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ + NETIF_F_TSO | NETIF_F_TSO6) /* Antenna presence definitions */ #define ANT_NONE 0x0 @@ -163,34 +167,36 @@ static inline u8 num_of_ant(u8 mask) * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. */ struct iwl_base_params { - int eeprom_size; - int num_of_queues; /* def: HW dependent */ - /* for iwl_pcie_apm_init() */ - u32 pll_cfg_val; - - const u16 max_ll_items; - const bool shadow_ram_support; - u16 led_compensation; unsigned int wd_timeout; - u32 max_event_log_size; - const bool shadow_reg_enable; - const bool pcie_l1_allowed; - const bool apmg_wake_up_wa; - const bool scd_chain_ext_wa; + + u16 eeprom_size; + u16 max_event_log_size; + + u8 pll_cfg:1, /* for iwl_pcie_apm_init() */ + shadow_ram_support:1, + shadow_reg_enable:1, + pcie_l1_allowed:1, + apmg_wake_up_wa:1, + scd_chain_ext_wa:1; + + u8 num_of_queues; /* def: HW dependent */ + + u8 max_ll_items; + u8 led_compensation; }; /* * @stbc: support Tx STBC and 1*SS Rx STBC * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic - * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 + * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { enum ieee80211_smps_mode smps_mode; - const bool ht_greenfield_support; /* if used set to true */ - const bool stbc; - const bool ldpc; - bool use_rts_for_aggregation; + u8 ht_greenfield_support:1, + stbc:1, + ldpc:1, + use_rts_for_aggregation:1; u8 ht40_bands; }; @@ -231,10 +237,10 @@ struct iwl_tt_params { u32 tx_protection_entry; u32 tx_protection_exit; struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; - bool support_ct_kill; - bool support_dynamic_smps; - bool support_tx_protection; - bool support_tx_backoff; + u8 support_ct_kill:1, + support_dynamic_smps:1, + support_tx_protection:1, + support_tx_backoff:1; }; /* @@ -277,8 +283,6 @@ struct iwl_pwr_tx_backoff { * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre<api>.ucode. * @ucode_api_max: Highest version of uCode API supported by driver. - * @ucode_api_ok: oldest version of the uCode API that is OK to load - * without a warning, for use in transitions * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section * @max_data_size: The maximal length of the fw data section @@ -314,6 +318,7 @@ struct iwl_pwr_tx_backoff { * @smem_len: the length of SMEM * @mq_rx_supported: multi-queue rx support * @vht_mu_mimo_supported: VHT MU-MIMO support + * @rf_id: need to read rf_id to determine the firmware image * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -323,51 +328,51 @@ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; - const unsigned int ucode_api_max; - const unsigned int ucode_api_ok; - const unsigned int ucode_api_min; - const enum iwl_device_family device_family; - const u32 max_data_size; - const u32 max_inst_size; - u8 valid_tx_ant; - u8 valid_rx_ant; - u8 non_shared_ant; - bool bt_shared_single_ant; - u16 nvm_ver; - u16 nvm_calib_ver; /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; - enum iwl_led_mode led_mode; - const bool rx_with_siso_diversity; - const bool internal_wimax_coex; - const bool host_interrupt_operation_mode; - bool high_temp; - u8 nvm_hw_section_num; - bool mac_addr_from_csr; - bool lp_xtal_workaround; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; - bool no_power_up_nic_in_init; const char *default_nvm_file_B_step; const char *default_nvm_file_C_step; - netdev_features_t features; - unsigned int max_rx_agg_size; - bool disable_dummy_notification; - unsigned int max_tx_agg_size; - unsigned int max_ht_ampdu_exponent; - unsigned int max_vht_ampdu_exponent; - const u32 dccm_offset; - const u32 dccm_len; - const u32 dccm2_offset; - const u32 dccm2_len; - const u32 smem_offset; - const u32 smem_len; const struct iwl_tt_params *thermal_params; - bool apmg_not_supported; - bool mq_rx_supported; - bool vht_mu_mimo_supported; + enum iwl_device_family device_family; + enum iwl_led_mode led_mode; + u32 max_data_size; + u32 max_inst_size; + netdev_features_t features; + u32 dccm_offset; + u32 dccm_len; + u32 dccm2_offset; + u32 dccm2_len; + u32 smem_offset; + u32 smem_len; + u16 nvm_ver; + u16 nvm_calib_ver; + u16 rx_with_siso_diversity:1, + bt_shared_single_ant:1, + internal_wimax_coex:1, + host_interrupt_operation_mode:1, + high_temp:1, + mac_addr_from_csr:1, + lp_xtal_workaround:1, + no_power_up_nic_in_init:1, + disable_dummy_notification:1, + apmg_not_supported:1, + mq_rx_supported:1, + vht_mu_mimo_supported:1, + rf_id:1; + u8 valid_tx_ant; + u8 valid_rx_ant; + u8 non_shared_ant; + u8 nvm_hw_section_num; + u8 max_rx_agg_size; + u8 max_tx_agg_size; + u8 max_ht_ampdu_exponent; + u8 max_vht_ampdu_exponent; + u8 ucode_api_max; + u8 ucode_api_min; }; /* @@ -438,8 +443,10 @@ extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; +extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260lc_2ac_cfg; extern const struct iwl_cfg iwl5165_2ac_cfg; #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index b978f6cae..b52913448 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -108,6 +108,17 @@ #define CSR_HW_REV (CSR_BASE+0x028) /* + * RF ID revision info + * Bit fields: + * 31:24: Reserved (set to 0x0) + * 23:12: Type + * 11:8: Step (A - 0x0, B - 0x1, etc) + * 7:4: Dash + * 3:0: Flavor + */ +#define CSR_HW_RF_ID (CSR_BASE+0x09c) + +/* * EEPROM and OTP (one-time-programmable) memory reads * * NOTE: Device must be awake, initialized via apm_ops.init(), @@ -333,6 +344,10 @@ enum { #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) +/* RF_ID value */ +#define CSR_HW_RF_ID_TYPE_JF (0x00105000) +#define CSR_HW_RF_ID_TYPE_LC (0x00101000) + /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 7ce381ba3..c695125d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -117,7 +117,7 @@ struct iwl_drv { const struct iwl_cfg *cfg; int fw_index; /* firmware we're trying to load */ - char firmware_name[32]; /* name of firmware file to load */ + char firmware_name[64]; /* name of firmware file to load */ struct completion request_firmware_complete; @@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.dbg_conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) + kfree(drv->fw.dbg_mem_tlv[i]); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -209,20 +211,12 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); -#define UCODE_EXPERIMENTAL_INDEX 100 -#define UCODE_EXPERIMENTAL_TAG "exp" - static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const char *name_pre = drv->cfg->fw_name_pre; char tag[8]; if (first) { -#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE - drv->fw_index = UCODE_EXPERIMENTAL_INDEX; - strcpy(tag, UCODE_EXPERIMENTAL_TAG); - } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { -#endif drv->fw_index = drv->cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { @@ -238,9 +232,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) snprintf(drv->firmware_name, sizeof(drv->firmware_name), "/*(DEBLOBBED)*/", name_pre, tag); - IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? "EXPERIMENTAL " : "", + IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); return reject_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, @@ -284,6 +276,7 @@ struct iwl_firmware_pieces { size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; }; /* @@ -538,9 +531,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, } if (build) - sprintf(buildstr, " build %u%s", build, - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? " (EXP)" : ""); + sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; @@ -624,9 +615,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, build = le32_to_cpu(ucode->build); if (build) - sprintf(buildstr, " build %u%s", build, - (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) - ? " (EXP)" : ""); + sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; @@ -1028,6 +1017,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); gscan_capa = true; break; + case IWL_UCODE_TLV_FW_MEM_SEG: { + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = + (void *)tlv_data; + u32 type; + + if (tlv_len != (sizeof(*dbg_mem))) + goto invalid_tlv_len; + + type = le32_to_cpu(dbg_mem->data_type); + drv->fw.dbg_dynamic_mem = true; + + if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) { + IWL_ERR(drv, + "Skip unknown dbg mem segment: %u\n", + dbg_mem->data_type); + break; + } + + if (pieces->dbg_mem_tlv[type]) { + IWL_ERR(drv, + "Ignore duplicate mem segment: %u\n", + dbg_mem->data_type); + break; + } + + IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", + dbg_mem->data_type); + + pieces->dbg_mem_tlv[type] = dbg_mem; + break; + } default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -1193,7 +1213,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int err; struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->cfg->ucode_api_max; - unsigned int api_ok = drv->cfg->ucode_api_ok; const unsigned int api_min = drv->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; @@ -1206,20 +1225,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - if (!api_ok) - api_ok = api_max; - pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) return; - if (!ucode_raw) { - if (drv->fw_index <= api_ok) - IWL_ERR(drv, - "request for firmware file '%s' failed.\n", - drv->firmware_name); + if (!ucode_raw) goto try_again; - } IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); @@ -1252,28 +1263,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ - /* no api version check required for experimental uCode */ - if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) { - if (api_ver < api_min || api_ver > api_max) { - IWL_ERR(drv, - "Driver unable to support your firmware API. " - "Driver supports v%u, firmware is v%u.\n", - api_max, api_ver); - goto try_again; - } - - if (api_ver < api_ok) { - if (api_ok != api_max) - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u through v%u, got v%u.\n", - api_ok, api_max, api_ver); - else - IWL_ERR(drv, "Firmware has old API version, " - "expected v%u, got v%u.\n", - api_max, api_ver); - IWL_ERR(drv, "New firmware can be obtained from " - "http://www.intellinuxwireless.org/.\n"); - } + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(drv, + "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; } /* @@ -1362,6 +1357,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) { + if (pieces->dbg_mem_tlv[i]) { + drv->fw.dbg_mem_tlv[i] = + kmemdup(pieces->dbg_mem_tlv[i], + sizeof(*drv->fw.dbg_mem_tlv[i]), + GFP_KERNEL); + if (!drv->fw.dbg_mem_tlv[i]) + goto out_free_fw; + } + } + /* Now that we can no longer fail, copy information */ /* @@ -1554,9 +1560,7 @@ struct iwl_mod_params iwlwifi_mod_params = { .power_level = IWL_POWER_INDEX_1, .d0i3_disable = true, .d0i3_entry_delay = 1000, -#ifndef CONFIG_IWLWIFI_UAPSD - .uapsd_disable = true, -#endif /* CONFIG_IWLWIFI_UAPSD */ + .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); @@ -1675,12 +1679,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, - bool, S_IRUGO | S_IWUSR); -#ifdef CONFIG_IWLWIFI_UAPSD -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); -#else -MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); -#endif + uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(uapsd_disable, + "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); /* * set bt_coex_active to true, uCode will do kill/defer @@ -1726,4 +1727,4 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, S_IRUGO); -MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities"); +MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index c15f5be85..bf1b69aec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, int n_channels, s8 max_txpower_avg) { int ch_idx; - enum ieee80211_band band; + enum nl80211_band band; band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? - IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; for (ch_idx = 0; ch_idx < n_channels; ch_idx++) { struct ieee80211_channel *chan = &data->channels[ch_idx]; @@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg, static void iwl_mod_ht40_chan_info(struct device *dev, struct iwl_nvm_data *data, int n_channels, - enum ieee80211_band band, u16 channel, + enum nl80211_band band, u16 channel, const struct iwl_eeprom_channel *eeprom_ch, u8 clear_ht40_extension_channel) { @@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev, IWL_DEBUG_EEPROM(dev, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), CHECK_AND_PRINT(RADAR), @@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, n_channels++; channel->hw_value = eeprom_ch_array[ch_idx]; - channel->band = (band == 1) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; + channel->band = (band == 1) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ for (band = 6; band <= 7; band++) { - enum ieee80211_band ieeeband; + enum nl80211_band ieeeband; iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info, &eeprom_ch_array); /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ - ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ - : IEEE80211_BAND_5GHZ; + ieeeband = (band == 6) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; /* Loop through each band adding each of the channels */ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) { @@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band) + int n_channels, enum nl80211_band band) { struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; @@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, + enum nl80211_band band, u8 tx_chains, u8 rx_chains) { int max_bit_rate = 0; @@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, int n_used = 0; struct ieee80211_supported_band *sband; - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; + sband = &data->bands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant, data->valid_rx_ant); - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; + sband = &data->bands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index ad2b83466..1f4e50289 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -98,7 +98,8 @@ struct iwl_nvm_data { s8 max_tx_pwr_half_dbm; bool lar_enabled; - struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + bool vht160_supported; + struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels[]; }; @@ -133,12 +134,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data, int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, - int n_channels, enum ieee80211_band band); + int n_channels, enum nl80211_band band); void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band, + enum nl80211_band band, u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 582008a66..270f39ecd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -321,6 +321,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) /* Write index table */ #define RFH_Q0_FRBDCB_WIDX 0xA08080 #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) +/* Write index table - shadow registers */ +#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80 +#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) /* Read index table */ #define RFH_Q0_FRBDCB_RIDX 0xA080C0 #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index 8425e1a58..09b7ea28f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_RADIO_REG = 13, + IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_MAX, }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 15ec4e290..37dc09e8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, + IWL_UCODE_TLV_FW_MEM_SEG = 51, }; struct iwl_ucode_tlv { @@ -245,7 +246,6 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; /** * enum iwl_ucode_tlv_api - ucode api - * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. @@ -260,12 +260,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, - IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, @@ -324,6 +323,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * regular image. + * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared + * memory addresses from the firmware. + * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -361,6 +363,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, + IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ @@ -491,6 +495,37 @@ enum iwl_fw_dbg_monitor_mode { }; /** + * enum iwl_fw_mem_seg_type - data types for dumping on error + * + * @FW_DBG_MEM_SMEM: the data type is SMEM + * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC + * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC + */ +enum iwl_fw_dbg_mem_seg_type { + FW_DBG_MEM_DCCM_LMAC = 0, + FW_DBG_MEM_DCCM_UMAC, + FW_DBG_MEM_SMEM, + + /* Must be last */ + FW_DBG_MEM_MAX, +}; + +/** + * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments + * + * @data_type: enum %iwl_fw_mem_seg_type + * @ofs: the memory segment offset + * @len: the memory segment length, in bytes + * + * This parses IWL_UCODE_TLV_FW_MEM_SEG + */ +struct iwl_fw_dbg_mem_seg_tlv { + __le32 data_type; + __le32 ofs; + __le32 len; +} __packed; + +/** * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data * * @version: version of the TLV - currently 0 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 2942571c6..e461d6318 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -286,6 +286,8 @@ struct iwl_fw { struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX]; + bool dbg_dynamic_mem; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; struct iwl_gscan_capabilities gscan_capa; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index d1a5dd160..6c5c2f9f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -92,6 +92,11 @@ enum iwl_amsdu_size { IWL_AMSDU_12K = 2, }; +enum iwl_uapsd_disable { + IWL_DISABLE_UAPSD_BSS = BIT(0), + IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1), +}; + /** * struct iwl_mod_params * @@ -109,7 +114,8 @@ enum iwl_amsdu_size { * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 * @nvm_file: specifies a external NVM file - * @uapsd_disable: disable U-APSD, default = 1 + * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default = + * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT * @d0i3_disable: disable d0i3, default = 1, * @d0i3_entry_delay: time to wait after no refs are taken before * entering D0i3 (in msecs) @@ -131,7 +137,7 @@ struct iwl_mod_params { #endif int ant_coupling; char *nvm_file; - bool uapsd_disable; + u32 uapsd_disable; bool d0i3_disable; unsigned int d0i3_entry_delay; bool lar_disable; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 93a689583..21653fee8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -288,6 +288,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, !data->sku_cap_band_52GHz_enable) continue; + if (ch_flags & NVM_CHANNEL_160MHZ) + data->vht160_supported = true; + if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is @@ -308,7 +311,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->hw_value = nvm_chan[ch_idx]; channel->band = (ch_idx < num_2ghz_channels) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -320,7 +323,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; - is_5ghz = channel->band == IEEE80211_BAND_5GHZ; + is_5ghz = channel->band == NL80211_BAND_5GHZ; /* don't put limitations in case we're using LAR */ if (!lar_supported) @@ -331,17 +334,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->flags = 0; IWL_DEBUG_EEPROM(dev, - "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", + "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", channel->hw_value, is_5ghz ? "5.2" : "2.4", + ch_flags, CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), - ch_flags, + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(40MHZ), + CHECK_AND_PRINT_I(80MHZ), + CHECK_AND_PRINT_I(160MHZ), channel->max_power, ((ch_flags & NVM_CHANNEL_IBSS) && !(ch_flags & NVM_CHANNEL_RADAR)) @@ -370,6 +376,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (data->vht160_supported) + vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | + IEEE80211_VHT_CAP_SHORT_GI_160; + if (cfg->vht_mu_mimo_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; @@ -439,22 +449,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, &ch_section[NVM_CHANNELS_FAMILY_8000], lar_supported); - sband = &data->bands[IEEE80211_BAND_2GHZ]; - sband->band = IEEE80211_BAND_2GHZ; + sband = &data->bands[NL80211_BAND_2GHZ]; + sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); - sband = &data->bands[IEEE80211_BAND_5GHZ]; - sband->band = IEEE80211_BAND_5GHZ; + sband = &data->bands[NL80211_BAND_5GHZ]; + sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, - IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, @@ -781,7 +791,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, struct ieee80211_regdomain *regd; int size_of_regd; struct ieee80211_reg_rule *rule; - enum ieee80211_band band; + enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; @@ -809,7 +819,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; @@ -857,7 +867,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, IWL_DEBUG_DEV(dev, IWL_DL_LAR, "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", center_freq, - band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", + band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c index 4a4dea087..7beba9ae5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -72,8 +73,6 @@ #include "iwl-trans.h" #define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ -#define IWL_NUM_PAPD_CH_GROUPS 9 -#define IWL_NUM_TXP_CH_GROUPS 9 struct iwl_phy_db_entry { u16 size; @@ -86,14 +85,18 @@ struct iwl_phy_db_entry { * @cfg: phy configuration. * @calib_nch: non channel specific calibration data. * @calib_ch: channel specific calibration data. + * @n_group_papd: number of entries in papd channel group. * @calib_ch_group_papd: calibration data related to papd channel group. + * @n_group_txp: number of entries in tx power channel group. * @calib_ch_group_txp: calibration data related to tx power chanel group. */ struct iwl_phy_db { struct iwl_phy_db_entry cfg; struct iwl_phy_db_entry calib_nch; - struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; - struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; + int n_group_papd; + struct iwl_phy_db_entry *calib_ch_group_papd; + int n_group_txp; + struct iwl_phy_db_entry *calib_ch_group_txp; struct iwl_trans *trans; }; @@ -143,6 +146,9 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) phy_db->trans = trans; + phy_db->n_group_txp = -1; + phy_db->n_group_papd = -1; + /* TODO: add default values of the phy db. */ return phy_db; } @@ -166,11 +172,11 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db, case IWL_PHY_DB_CALIB_NCH: return &phy_db->calib_nch; case IWL_PHY_DB_CALIB_CHG_PAPD: - if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) + if (chg_id >= phy_db->n_group_papd) return NULL; return &phy_db->calib_ch_group_papd[chg_id]; case IWL_PHY_DB_CALIB_CHG_TXP: - if (chg_id >= IWL_NUM_TXP_CH_GROUPS) + if (chg_id >= phy_db->n_group_txp) return NULL; return &phy_db->calib_ch_group_txp[chg_id]; default: @@ -202,17 +208,21 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); - for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) + + for (i = 0; i < phy_db->n_group_papd; i++) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); - for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) + kfree(phy_db->calib_ch_group_papd); + + for (i = 0; i < phy_db->n_group_txp; i++) iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i); + kfree(phy_db->calib_ch_group_txp); kfree(phy_db); } IWL_EXPORT_SYMBOL(iwl_phy_db_free); -int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, - gfp_t alloc_ctx) +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, + struct iwl_rx_packet *pkt) { struct iwl_calib_res_notif_phy_db *phy_db_notif = (struct iwl_calib_res_notif_phy_db *)pkt->data; @@ -224,16 +234,42 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, if (!phy_db) return -EINVAL; - if (type == IWL_PHY_DB_CALIB_CHG_PAPD || - type == IWL_PHY_DB_CALIB_CHG_TXP) + if (type == IWL_PHY_DB_CALIB_CHG_PAPD) { chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); + if (phy_db && !phy_db->calib_ch_group_papd) { + /* + * Firmware sends the largest index first, so we can use + * it to know how much we should allocate. + */ + phy_db->calib_ch_group_papd = kcalloc(chg_id + 1, + sizeof(struct iwl_phy_db_entry), + GFP_ATOMIC); + if (!phy_db->calib_ch_group_papd) + return -ENOMEM; + phy_db->n_group_papd = chg_id + 1; + } + } else if (type == IWL_PHY_DB_CALIB_CHG_TXP) { + chg_id = le16_to_cpup((__le16 *)phy_db_notif->data); + if (phy_db && !phy_db->calib_ch_group_txp) { + /* + * Firmware sends the largest index first, so we can use + * it to know how much we should allocate. + */ + phy_db->calib_ch_group_txp = kcalloc(chg_id + 1, + sizeof(struct iwl_phy_db_entry), + GFP_ATOMIC); + if (!phy_db->calib_ch_group_txp) + return -ENOMEM; + phy_db->n_group_txp = chg_id + 1; + } + } entry = iwl_phy_db_get_section(phy_db, type, chg_id); if (!entry) return -EINVAL; kfree(entry->data); - entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx); + entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC); if (!entry->data) { entry->size = 0; return -ENOMEM; @@ -296,7 +332,7 @@ static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id) if (ch_index == 0xff) return 0xff; - for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) { + for (i = 0; i < phy_db->n_group_txp; i++) { txp_chg = (void *)phy_db->calib_ch_group_txp[i].data; if (!txp_chg) return 0xff; @@ -447,7 +483,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) /* Send all the TXP channel specific data */ err = iwl_phy_db_send_all_channel_groups(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, - IWL_NUM_PAPD_CH_GROUPS); + phy_db->n_group_papd); if (err) { IWL_ERR(phy_db->trans, "Cannot send channel specific PAPD groups\n"); @@ -457,7 +493,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db) /* Send all the TXP channel specific data */ err = iwl_phy_db_send_all_channel_groups(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, - IWL_NUM_TXP_CH_GROUPS); + phy_db->n_group_txp); if (err) { IWL_ERR(phy_db->trans, "Cannot send channel specific TX power groups\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h index 24103877e..d34de3f71 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h @@ -73,8 +73,8 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans); void iwl_phy_db_free(struct iwl_phy_db *phy_db); -int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, - gfp_t alloc_ctx); +int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, + struct iwl_rx_packet *pkt); int iwl_send_phy_db_data(struct iwl_phy_db *phy_db); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index c46e596e1..6c1d20ded 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -345,6 +347,16 @@ enum secure_load_status_reg { #define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_ADDR (0xa0044c) +/* UMAC Internal Tx Fifo */ +#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538) +#define TXF_CPU2_WR_PTR (0xA00514) +#define TXF_CPU2_RD_PTR (0xA00510) +#define TXF_CPU2_FENCE_PTR (0xA00518) +#define TXF_CPU2_LOCK_FENCE (0xA00524) +#define TXF_CPU2_NUM (0xA0053C) +#define TXF_CPU2_READ_MODIFY_DATA (0xA00548) +#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C) + /* Radio registers access */ #define RSP_RADIO_CMD (0xa02804) #define RSP_RADIO_RDDAT (0xa02814) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 91d74b3f6..8193d36ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -519,7 +521,7 @@ struct iwl_trans; struct iwl_trans_txq_scd_cfg { u8 fifo; - s8 sta_id; + u8 sta_id; u8 tid; bool aggregate; int frame_limit; @@ -751,6 +753,7 @@ enum iwl_plat_pm_mode { * @dev - pointer to struct device * that represents the device * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. + * @hw_rf_id a u32 with the device RF ID * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. @@ -795,6 +798,7 @@ struct iwl_trans { struct device *dev; u32 max_skb_frags; u32 hw_rev; + u32 hw_rf_id; u32 hw_id; char hw_id_str[52]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 23e7e2937..2e06dfc1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o -iwlmvm-y += power.o coex.o coex_legacy.o +iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 2e098f8e0..a63f5bbb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) chanctx_conf = rcu_dereference(vif->chanctx_conf); if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) { rcu_read_unlock(); return BT_COEX_INVALID_LUT; } @@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_send_bt_init_conf_old(mvm); - lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { @@ -540,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* If channel context is invalid or not on 2.4GHz .. */ if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) { if (vif->type == NL80211_IFTYPE_STATION) { /* ... relax constraints and disable rssi events */ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, @@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); - return; - } - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", @@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); - return; - } - lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ @@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_coex_agg_time_limit_old(mvm, sta); - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; @@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) return true; @@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) if (ant & mvm->cfg->non_shared_ant) return true; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } @@ -877,21 +853,15 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) if (mvm->cfg->bt_shared_single_ant) return true; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band) + enum nl80211_band band) { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); - - if (band != IEEE80211_BAND_2GHZ) + if (band != NL80211_BAND_2GHZ) return false; return bt_activity >= BT_LOW_TRAFFIC; @@ -903,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, __le16 fc = hdr->frame_control; bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); - if (info->band != IEEE80211_BAND_2GHZ) + if (info->band != NL80211_BAND_2GHZ) return 0; if (unlikely(mvm->bt_tx_prio)) @@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_bt_coex_vif_change_old(mvm); - return; - } - iwl_mvm_bt_coex_notif_handle(mvm); } @@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); - return; - } - if (!iwl_mvm_bt_is_plcr_supported(mvm)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c deleted file mode 100644 index 015045733..000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c +++ /dev/null @@ -1,1315 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include <linux/ieee80211.h> -#include <linux/etherdevice.h> -#include <net/mac80211.h> - -#include "fw-api-coex.h" -#include "iwl-modparams.h" -#include "mvm.h" -#include "iwl-debug.h" - -#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \ - [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \ - ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS)) - -static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2, - BT_COEX_PRIO_TBL_PRIO_BYPASS, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1, - BT_COEX_PRIO_TBL_PRIO_LOW, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2, - BT_COEX_PRIO_TBL_PRIO_LOW, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1, - BT_COEX_PRIO_TBL_PRIO_HIGH, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2, - BT_COEX_PRIO_TBL_PRIO_HIGH, 1), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM, - BT_COEX_PRIO_TBL_DISABLED, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52, - BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24, - BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0), - EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE, - BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0), - 0, 0, 0, 0, 0, 0, -}; - -#undef EVENT_PRIO_ANT - -static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) -{ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; - - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0, - sizeof(struct iwl_bt_coex_prio_tbl_cmd), - &iwl_bt_prio_tbl); -} - -static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { - cpu_to_le32(0xf0f0f0f0), /* 50% */ - cpu_to_le32(0xc0c0c0c0), /* 25% */ - cpu_to_le32(0xfcfcfcfc), /* 75% */ - cpu_to_le32(0xfefefefe), /* 87.5% */ -}; - -static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, - { - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x40000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0x44000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - }, -}; - -static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { - { - /* Tight */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0x00004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Loose */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, - { - /* Tx Tx disabled */ - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xeeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xc0004000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), - }, -}; - -/* 20MHz / 40MHz below / 40Mhz above*/ -static const __le64 iwl_ci_mask[][3] = { - /* dummy entry for channel 0 */ - {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, - { - cpu_to_le64(0x0000001FFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x00007FFFFFULL), - }, - { - cpu_to_le64(0x000000FFFFULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0003FFFFFFULL), - }, - { - cpu_to_le64(0x000003FFFCULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x000FFFFFFCULL), - }, - { - cpu_to_le64(0x00001FFFE0ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x007FFFFFE0ULL), - }, - { - cpu_to_le64(0x00007FFF80ULL), - cpu_to_le64(0x00007FFFFFULL), - cpu_to_le64(0x01FFFFFF80ULL), - }, - { - cpu_to_le64(0x0003FFFC00ULL), - cpu_to_le64(0x0003FFFFFFULL), - cpu_to_le64(0x0FFFFFFC00ULL), - }, - { - cpu_to_le64(0x000FFFF000ULL), - cpu_to_le64(0x000FFFFFFCULL), - cpu_to_le64(0x3FFFFFF000ULL), - }, - { - cpu_to_le64(0x007FFF8000ULL), - cpu_to_le64(0x007FFFFFE0ULL), - cpu_to_le64(0xFFFFFF8000ULL), - }, - { - cpu_to_le64(0x01FFFE0000ULL), - cpu_to_le64(0x01FFFFFF80ULL), - cpu_to_le64(0xFFFFFE0000ULL), - }, - { - cpu_to_le64(0x0FFFF00000ULL), - cpu_to_le64(0x0FFFFFFC00ULL), - cpu_to_le64(0x0ULL), - }, - { - cpu_to_le64(0x3FFFC00000ULL), - cpu_to_le64(0x3FFFFFF000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFFE000000ULL), - cpu_to_le64(0xFFFFFF8000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFF8000000ULL), - cpu_to_le64(0xFFFFFE0000ULL), - cpu_to_le64(0x0) - }, - { - cpu_to_le64(0xFFC0000000ULL), - cpu_to_le64(0x0ULL), - cpu_to_le64(0x0ULL) - }, -}; - -enum iwl_bt_kill_msk { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_MAX, -}; - -static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { - [BT_KILL_MSK_DEFAULT] = 0xfffffc00, - [BT_KILL_MSK_NEVER] = 0xffffffff, - [BT_KILL_MSK_ALWAYS] = 0, -}; - -static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_NEVER, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_NEVER, - BT_KILL_MSK_DEFAULT, - }, -}; - -static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_ALWAYS, - }, - { - BT_KILL_MSK_DEFAULT, - BT_KILL_MSK_ALWAYS, - BT_KILL_MSK_DEFAULT, - }, -}; - -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - -static enum iwl_bt_coex_lut_type -iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) -{ - struct ieee80211_chanctx_conf *chanctx_conf; - enum iwl_bt_coex_lut_type ret; - u16 phy_ctx_id; - - /* - * Checking that we hold mvm->mutex is a good idea, but the rate - * control can't acquire the mutex since it runs in Tx path. - * So this is racy in that case, but in the worst case, the AMPDU - * size limit will be wrong for a short time which is not a big - * issue. - */ - - rcu_read_lock(); - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return BT_COEX_INVALID_LUT; - } - - ret = BT_COEX_TX_DIS_LUT; - - if (mvm->cfg->bt_shared_single_ant) { - rcu_read_unlock(); - return ret; - } - - phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); - - if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut); - else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id) - ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut); - /* else - default = TX TX disallowed */ - - rcu_read_unlock(); - - return ret; -} - -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; - u32 flags; - - ret = iwl_send_bt_prio_tbl(mvm); - if (ret) - return ret; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - - lockdep_assert_held(&mvm->mutex); - - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - switch (mvm->bt_force_ant_mode) { - case BT_FORCE_ANT_AUTO: - flags = BT_COEX_AUTO_OLD; - break; - case BT_FORCE_ANT_BT: - flags = BT_COEX_BT_OLD; - break; - case BT_FORCE_ANT_WIFI: - flags = BT_COEX_WIFI_OLD; - break; - default: - WARN_ON(1); - flags = 0; - } - - bt_cmd->flags = cpu_to_le32(flags); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE); - goto send_cmd; - } - - bt_cmd->max_kill = 5; - bt_cmd->bt4_antenna_isolation_thr = - IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS; - bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; - bt_cmd->bt4_tx_tx_delta_freq_thr = 15; - bt_cmd->bt4_tx_rx_max_freq0 = 15; - bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT; - bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT; - - flags = iwlwifi_mod_params.bt_coex_active ? - BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD; - bt_cmd->flags = cpu_to_le32(flags); - - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_BT_PRIO_BOOST | - BT_VALID_MAX_KILL | - BT_VALID_3W_TMRS | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS | - BT_VALID_REDUCED_TX_POWER | - BT_VALID_LUT | - BT_VALID_WIFI_RX_SW_PRIO_BOOST | - BT_VALID_WIFI_TX_SW_PRIO_BOOST | - BT_VALID_ANT_ISOLATION | - BT_VALID_ANT_ISOLATION_THRS | - BT_VALID_TXTX_DELTA_FREQ_THRS | - BT_VALID_TXRX_MAX_FREQ_0 | - BT_VALID_SYNC_TO_SCO | - BT_VALID_TTC | - BT_VALID_RRC); - - if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); - - if (iwl_mvm_bt_is_plcr_supported(mvm)) { - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); - } - - if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); - } - - if (IWL_MVM_BT_COEX_TTC) - bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); - - if (iwl_mvm_bt_is_rrc_supported(mvm)) - bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); - - if (mvm->cfg->bt_shared_single_ant) - memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, - sizeof(iwl_single_shared_ant)); - else - memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, - sizeof(iwl_combined_lookup)); - - /* Take first Co-running block LUT to get started */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost, - sizeof(iwl_bt_prio_boost)); - bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); - bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); - -send_cmd: - memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); - memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm) -{ - struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; - u32 primary_lut = le32_to_cpu(notif->primary_ch_lut); - u32 ag = le32_to_cpu(notif->bt_activity_grading); - struct iwl_bt_coex_cmd_old *bt_cmd; - u8 ack_kill_msk, cts_kill_msk; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .data[0] = &bt_cmd, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret = 0; - - lockdep_assert_held(&mvm->mutex); - - ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut]; - cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut]; - - if (mvm->bt_ack_kill_msk[0] == ack_kill_msk && - mvm->bt_cts_kill_msk[0] == cts_kill_msk) - return 0; - - mvm->bt_ack_kill_msk[0] = ack_kill_msk; - mvm->bt_cts_kill_msk[0] = cts_kill_msk; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]); - bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_KILL_ACK | - BT_VALID_KILL_CTS); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, - bool enable) -{ - struct iwl_bt_coex_cmd_old *bt_cmd; - /* Send ASYNC since this can be sent from an atomic context */ - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_DUP, }, - .flags = CMD_ASYNC, - }; - struct iwl_mvm_sta *mvmsta; - int ret; - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - if (!mvmsta) - return 0; - - /* nothing to do */ - if (mvmsta->bt_reduced_txpower == enable) - return 0; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - - bt_cmd->valid_bit_msk = - cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER); - bt_cmd->bt_reduced_tx_power = sta_id; - - if (enable) - bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT; - - IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", - enable ? "en" : "dis", sta_id); - - mvmsta->bt_reduced_txpower = enable; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; -} - -struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif_old *notif; - struct iwl_mvm *mvm; - struct ieee80211_chanctx_conf *primary; - struct ieee80211_chanctx_conf *secondary; - bool primary_ll; -}; - -static inline -void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable, int rssi) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = - enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = - enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; -} - -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_smps_mode smps_mode; - u32 bt_activity_grading; - int ave_rssi; - - lockdep_assert_held(&mvm->mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - /* default smps_mode for BSS / P2P client is AUTOMATIC */ - smps_mode = IEEE80211_SMPS_AUTOMATIC; - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: - return; - } - - chanctx_conf = rcu_dereference(vif->chanctx_conf); - - /* If channel context is invalid or not on 2.4GHz .. */ - if ((!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) { - if (vif->type == NL80211_IFTYPE_STATION) { - /* ... relax constraints and disable rssi events */ - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - } - return; - } - - bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); - if (bt_activity_grading >= BT_HIGH_TRAFFIC) - smps_mode = IEEE80211_SMPS_STATIC; - else if (bt_activity_grading >= BT_LOW_TRAFFIC) - smps_mode = vif->type == NL80211_IFTYPE_AP ? - IEEE80211_SMPS_OFF : - IEEE80211_SMPS_DYNAMIC; - - /* relax SMPS contraints for next association */ - if (!vif->bss_conf.assoc) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - if (mvmvif->phy_ctxt && - data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) - smps_mode = IEEE80211_SMPS_AUTOMATIC; - - IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", - mvmvif->id, data->notif->bt_status, bt_activity_grading, - smps_mode); - - if (vif->type == NL80211_IFTYPE_STATION) - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - - /* low latency is always primary */ - if (iwl_mvm_vif_low_latency(mvmvif)) { - data->primary_ll = true; - - data->secondary = data->primary; - data->primary = chanctx_conf; - } - - if (vif->type == NL80211_IFTYPE_AP) { - if (!mvmvif->ap_ibss_active) - return; - - if (chanctx_conf == data->primary) - return; - - if (!data->primary_ll) { - /* - * downgrade the current primary no matter what its - * type is. - */ - data->secondary = data->primary; - data->primary = chanctx_conf; - } else { - /* there is low latency vif - we will be secondary */ - data->secondary = chanctx_conf; - } - return; - } - - /* - * STA / P2P Client, try to be primary if first vif. If we are in low - * latency mode, we are already in primary and just don't do much - */ - if (!data->primary || data->primary == chanctx_conf) - data->primary = chanctx_conf; - else if (!data->secondary) - /* if secondary is not NULL, it might be a GO */ - data->secondary = chanctx_conf; - - /* - * don't reduce the Tx power if one of these is true: - * we are in LOOSE - * single share antenna product - * BT is active - * we are associated - */ - if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || - !data->notif->bt_status) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); - return; - } - - /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; - - /* if the RSSI isn't valid, fake it is very low */ - if (!ave_rssi) - ave_rssi = -100; - if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) - IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); - } - - /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); -} - -static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) -{ - struct iwl_bt_iterator_data data = { - .mvm = mvm, - .notif = &mvm->last_bt_notif_old, - }; - struct iwl_bt_coex_ci_cmd_old cmd = {}; - u8 ci_bw_idx; - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - rcu_read_lock(); - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_notif_iterator, &data); - - if (data.primary) { - struct ieee80211_chanctx_conf *chan = data.primary; - - if (WARN_ON(!chan->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_primary = 0; - } else { - cmd.co_run_bw_primary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_primary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv); - } - - if (data.secondary) { - struct ieee80211_chanctx_conf *chan = data.secondary; - - if (WARN_ON(!data.secondary->def.chan)) { - rcu_read_unlock(); - return; - } - - if (chan->def.width < NL80211_CHAN_WIDTH_40) { - ci_bw_idx = 0; - cmd.co_run_bw_secondary = 0; - } else { - cmd.co_run_bw_secondary = 1; - if (chan->def.center_freq1 > - chan->def.chan->center_freq) - ci_bw_idx = 2; - else - ci_bw_idx = 1; - } - - cmd.bt_secondary_ci = - iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; - cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv); - } - - rcu_read_unlock(); - - /* Don't spam the fw with the same command over and over */ - if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) { - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); - memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd)); - } - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; - - IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT status: %s\n", - notif->bt_status ? "ON" : "OFF"); - IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); - IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); - IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n", - notif->bt_agg_traffic_load); - - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); - - iwl_mvm_bt_coex_notif_handle(mvm); -} - -static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* If channel context is invalid or not on 2.4GHz - don't count it */ - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return; - } - rcu_read_unlock(); - - if (vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); -} - -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data rssi_event) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data data = { - .mvm = mvm, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - /* - * Rssi update while not associated - can happen since the statistics - * are handled asynchronously - */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - /* No BT - reports should be disabled */ - if (!mvm->last_bt_notif_old.bt_status) - return; - - IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, - rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); - - /* - * Check if rssi is good enough for reduced Tx power, but not in loose - * scheme. - */ - if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || - iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, - false); - else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); - - if (ret) - IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_rssi_iterator, &data); - - if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm)) - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); -} - -#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) -#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) - -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - if (mvm->last_bt_notif_old.ttc_enabled) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - - if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) - return LINK_QUAL_AGG_TIME_LIMIT_DEF; - - /* tight coex, high bt traffic, reduce AGG time limit */ - return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; -} - -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum iwl_bt_coex_lut_type lut_type; - - if (mvm->last_bt_notif_old.ttc_enabled) - return true; - - if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) < - BT_HIGH_TRAFFIC) - return true; - - /* - * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas - * since BT is already killed. - * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while - * we Tx. - * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. - */ - lut_type = iwl_get_coex_type(mvm, mvmsta->vif); - return lut_type != BT_COEX_LOOSE_LUT; -} - -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) -{ - u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - return ag < BT_HIGH_TRAFFIC; -} - -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - u32 bt_activity = - le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); - - if (band != IEEE80211_BAND_2GHZ) - return false; - - return bt_activity >= BT_LOW_TRAFFIC; -} - -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) -{ - iwl_mvm_bt_coex_notif_handle(mvm); -} - -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 ant_isolation = le32_to_cpup((void *)pkt->data); - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - struct iwl_bt_coex_cmd_old *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return; - cmd.data[0] = bt_cmd; - - bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); - bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE | - BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut20)); - - memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(bt_cmd->bt4_corun_lut40)); - - if (iwl_mvm_send_cmd(mvm, &cmd)) - IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); - - kfree(bt_cmd); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 4b560e441..4eeb6b78d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -75,7 +75,6 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ -#define IWL_MVM_P2P_UAPSD_STANDALONE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) @@ -110,6 +109,7 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 +#define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c1a313149..4fdc3dad3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; } - ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); + ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); @@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct iwl_wowlan_status *fw_status; int i; bool keep; - struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvm_ap_sta; fw_status = iwl_mvm_get_wakeup_status(mvm, vif); @@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, status.wake_packet = fw_status->wake_packet; /* still at hard-coded place 0 for D3 image */ - ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[0], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(ap_sta)) + mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); + if (!mvm_ap_sta) goto out_free; - mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status.qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 14004456b..b23271755 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_STATION_COUNT) { - struct ieee80211_sta *sta; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id], - lockdep_is_held(&mvm->mutex)); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvm_sta; + mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); + if (mvm_sta) { pos += scnprintf(buf+pos, bufsz-pos, "ap_sta_id %d - reduced Tx power %d\n", ap_sta_id, @@ -724,9 +721,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, ret = kstrtou32(data, 10, &value); if (ret == 0 && value) { - enum ieee80211_band band = (cmd->channel_num <= 14) ? - IEEE80211_BAND_2GHZ : - IEEE80211_BAND_5GHZ; + enum nl80211_band band = (cmd->channel_num <= 14) ? + NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ; struct ieee80211_channel chn = { .band = band, .center_freq = ieee80211_channel_to_frequency( @@ -1425,6 +1422,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } +static const char * const chanwidths[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = "noht", + [NL80211_CHAN_WIDTH_20] = "ht20", + [NL80211_CHAN_WIDTH_40] = "ht40", + [NL80211_CHAN_WIDTH_80] = "vht80", + [NL80211_CHAN_WIDTH_80P80] = "vht80p80", + [NL80211_CHAN_WIDTH_160] = "vht160", +}; + +static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct ieee80211_vif *vif = data; + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data; + u32 num_of_stations = le32_to_cpu(report->number_of_stations); + int i; + + IWL_INFO(mvm, "LQM report:\n"); + IWL_INFO(mvm, "\tstatus: %d\n", report->status); + IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id)); + IWL_INFO(mvm, "\ttx_frame_dropped: %d\n", + le32_to_cpu(report->tx_frame_dropped)); + IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n", + le32_to_cpu(report->time_in_measurement_window)); + IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n", + le32_to_cpu(report->total_air_time_other_stations)); + IWL_INFO(mvm, "\tchannel_freq: %d\n", + vif->bss_conf.chandef.center_freq1); + IWL_INFO(mvm, "\tchannel_width: %s\n", + chanwidths[vif->bss_conf.chandef.width]); + IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations); + for (i = 0; i < num_of_stations; i++) + IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i, + report->frequent_stations_air_time[i]); + + return true; +} + +static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_notification_wait wait_lqm_notif; + static u16 lqm_notif[] = { + WIDE_ID(MAC_CONF_GROUP, + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF) + }; + int err; + u32 duration; + u32 timeout; + + if (sscanf(buf, "%d,%d", &duration, &timeout) != 2) + return -EINVAL; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif, + lqm_notif, ARRAY_SIZE(lqm_notif), + iwl_mvm_lqm_notif_wait, vif); + mutex_lock(&mvm->mutex); + err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT, + duration, timeout); + mutex_unlock(&mvm->mutex); + + if (err) { + IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err); + iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif); + return err; + } + + /* wait for 2 * timeout (safety guard) and convert to jiffies*/ + timeout = msecs_to_jiffies((timeout * 2) / 1000); + + err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif, + timeout); + if (err) + IWL_ERR(mvm, "Getting lqm notif timed out\n"); + + return count; +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1449,6 +1529,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); +MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1488,6 +1569,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a43b3921c..406cf1cb9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -65,6 +65,7 @@ *****************************************************************************/ #include <linux/vmalloc.h> #include <linux/ieee80211.h> +#include <linux/netdevice.h> #include "mvm.h" #include "fw-dbg.h" @@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, return pos; } -static -int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif, - char *buf, int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; @@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_profile_notif_old *notif = - &mvm->last_bt_notif_old; - - pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - notif->rrc_enabled); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_enabled); - } else { - struct iwl_bt_coex_profile_notif *notif = - &mvm->last_bt_notif; - - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - - pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf+pos, - bufsz-pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf+pos, bufsz-pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - (notif->ttc_rrc_status >> 4) & 0xF); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_rrc_status & 0xF); - } + pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); + + pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", + notif->bt_ci_compliance); + pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", + le32_to_cpu(notif->primary_ch_lut)); + pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + pos += scnprintf(buf + pos, + bufsz - pos, "bt_activity_grading = %d\n", + le32_to_cpu(notif->bt_activity_grading)); + pos += scnprintf(buf + pos, bufsz - pos, + "antenna isolation = %d CORUN LUT index = %d\n", + mvm->last_ant_isol, mvm->last_corun_lut); + pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", + (notif->ttc_rrc_status >> 4) & 0xF); + pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", + notif->ttc_rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); @@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { - struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - - pos += scnprintf(buf+pos, bufsz-pos, - "BT Configuration CMD - 0=default, 1=never, 2=always\n"); - pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n", - mvm->bt_ack_kill_msk[0]); - pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n", - mvm->bt_cts_kill_msk[0]); - - } else { - struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; - - pos += scnprintf(buf+pos, bufsz-pos, - "Channel inhibition CMD\n"); - pos += scnprintf(buf+pos, bufsz-pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf+pos, bufsz-pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - } + pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tPrimary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_primary_ci)); + pos += scnprintf(buf + pos, bufsz - pos, + "\tSecondary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); @@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; @@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); - memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); @@ -1416,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT); PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE); + PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD); + PRINT_MVM_REF(IWL_MVM_REF_RX); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index eec52c57f..5f22cc7ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status { u8 decrypt_key[16]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd rsc; -} __packed; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ struct iwl_wowlan_status { struct iwl_wowlan_gtk_status gtk; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index 7a16e55df..1ca8e4988 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info { IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; +enum iwl_rx_l3_proto_values { + IWL_RX_L3_TYPE_NONE, + IWL_RX_L3_TYPE_IPV4, + IWL_RX_L3_TYPE_IPV4_FRAG, + IWL_RX_L3_TYPE_IPV6_FRAG, + IWL_RX_L3_TYPE_IPV6, + IWL_RX_L3_TYPE_IPV6_IN_IPV4, + IWL_RX_L3_TYPE_ARP, + IWL_RX_L3_TYPE_EAPOL, +}; + +#define IWL_RX_L3_PROTO_POS 4 + enum iwl_rx_l3l4_flags { IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), IWL_RX_L3L4_TCP_ACK = BIT(3), - IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4, + IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, }; @@ -424,21 +437,28 @@ struct iwl_rxq_sync_notification { /** * Internal message identifier * +* @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA */ enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, }; /** * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent * in &iwl_rxq_sync_cmd. Should be DWORD aligned. +* FW is agnostic to the payload, so there are no endianity requirements. * * @type: value from &iwl_mvm_rxq_notif_type +* @sync: ctrl path is waiting for all notifications to be received +* @cookie: internal cookie to identify old notifications * @data: payload */ struct iwl_mvm_internal_rxq_notif { - u32 type; + u16 type; + u16 sync; + u32 cookie; u8 data[]; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 90d911394..38b1d045b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -173,7 +173,7 @@ enum iwl_sta_key_flag { /** * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed - * @STA_MODIFY_KEY: this command modifies %key + * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx * @STA_MODIFY_TX_RATE: unused * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid @@ -183,7 +183,7 @@ enum iwl_sta_key_flag { * @STA_MODIFY_QUEUES: modify the queues used by this station */ enum iwl_sta_modify_flag { - STA_MODIFY_KEY = BIT(0), + STA_MODIFY_QUEUE_REMOVAL = BIT(0), STA_MODIFY_TID_DISABLE_TX = BIT(1), STA_MODIFY_TX_RATE = BIT(2), STA_MODIFY_ADD_BA_TID = BIT(3), @@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo { __le64 hw_tkip_mic_tx_key; } __packed; -#define IWL_ADD_STA_STATUS_MASK 0xFF -#define IWL_ADD_STA_BAID_MASK 0xFF00 +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_VALID_MASK 0x8000 +#define IWL_ADD_STA_BAID_MASK 0x7F00 +#define IWL_ADD_STA_BAID_SHIFT 8 /** * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index ba3f0bbdd..dadcccd88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts { #define IWL_BAR_DFAULT_RETRY_LIMIT 60 #define IWL_LOW_RETRY_LIMIT 7 +/** + * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values + * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words) + * from mac header end. For normal case it is 4 words for SNAP. + * note: tx_cmd, mac header and pad are not counted in the offset. + * This is used to help the offload in case there is tunneling such as + * IPv6 in IPv4, in such case the ip header offset should point to the + * inner ip header and IPv4 checksum of the external header should be + * calculated by driver. + * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum + * @TX_CMD_OFFLD_L3_EN: enable IP header checksum + * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV + * field. Doesn't include the pad. + * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for + * alignment + * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU + */ +enum iwl_tx_offload_assist_flags_pos { + TX_CMD_OFFLD_IP_HDR = 0, + TX_CMD_OFFLD_L4_EN = 6, + TX_CMD_OFFLD_L3_EN = 7, + TX_CMD_OFFLD_MH_SIZE = 8, + TX_CMD_OFFLD_PAD = 13, + TX_CMD_OFFLD_AMSDU = 14, +}; + +#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f +#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f + /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration * @tx_flags: combination of TX_CMD_FLG_* * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* @@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts { */ struct iwl_tx_cmd { __le16 len; - __le16 next_frame_len; + __le16 offload_assist; __le32 tx_flags; struct { u8 try_cnt; @@ -255,7 +286,7 @@ struct iwl_tx_cmd { __le16 reserved4; u8 payload[0]; struct ieee80211_hdr hdr[0]; -} __packed; /* TX_CMD_API_S_VER_3 */ +} __packed; /* TX_CMD_API_S_VER_6 */ /* * TX response related data diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 4a0fc47c8..41b80ae2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -80,12 +80,44 @@ #include "fw-api-stats.h" #include "fw-api-tof.h" -/* Tx queue numbers */ +/* Tx queue numbers for non-DQA mode */ enum { IWL_MVM_OFFCHANNEL_QUEUE = 8, IWL_MVM_CMD_QUEUE = 9, }; +/* + * DQA queue numbers + * + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure + * that we are never left without the possibility to connect to an AP. + * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. + * Each MGMT queue is mapped to a single STA + * MGMT frames are frames that return true on ieee80211_is_mgmt() + * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe + * responses + * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. + * DATA frames are intended for !ieee80211_is_mgmt() frames, but if + * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues + * as well + * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames + */ +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_GCAST_QUEUE = 3, + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 31, +}; + enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, IWL_MVM_TX_FIFO_BE, @@ -279,6 +311,11 @@ enum { /* Please keep this enum *SORTED* by hex value. * Needed for binary search, otherwise a warning will be triggered. */ +enum iwl_mac_conf_subcmd_ids { + LINK_QUALITY_MEASUREMENT_CMD = 0x1, + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, +}; + enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CTDP_CONFIG_CMD = 0x03, @@ -287,6 +324,10 @@ enum iwl_phy_ops_subcmd_ids { DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_system_subcmd_ids { + SHARED_MEM_CFG_CMD = 0x0, +}; + enum iwl_data_path_subcmd_ids { UPDATE_MU_GROUPS_CMD = 0x1, TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, @@ -302,6 +343,8 @@ enum iwl_prot_offload_subcmd_ids { enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, + SYSTEM_GROUP = 0x2, + MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, @@ -1923,6 +1966,7 @@ struct iwl_tdls_config_res { #define TX_FIFO_MAX_NUM 8 #define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 /** * Shared memory configuration information from the FW @@ -1940,6 +1984,12 @@ struct iwl_tdls_config_res { * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; @@ -1951,7 +2001,10 @@ struct iwl_shared_mem_cfg { __le32 rxfifo_size[RX_FIFO_MAX_NUM]; __le32 page_buff_addr; __le32 page_buff_size; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ /** * VHT MU-MIMO group configuration @@ -2002,4 +2055,60 @@ struct iwl_stored_beacon_notif { u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ +#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 + +enum iwl_lqm_cmd_operatrions { + LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, + LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, +}; + +enum iwl_lqm_status { + LQM_STATUS_SUCCESS = 0, + LQM_STATUS_TIMEOUT = 1, + LQM_STATUS_ABORT = 2, +}; + +/** + * Link Quality Measurement command + * @cmd_operatrion: command operation to be performed (start or stop) + * as defined above. + * @mac_id: MAC ID the measurement applies to. + * @measurement_time: time of the total measurement to be performed, in uSec. + * @timeout: maximum time allowed until a response is sent, in uSec. + */ +struct iwl_link_qual_msrmnt_cmd { + __le32 cmd_operation; + __le32 mac_id; + __le32 measurement_time; + __le32 timeout; +} __packed /* LQM_CMD_API_S_VER_1 */; + +/** + * Link Quality Measurement notification + * + * @frequent_stations_air_time: an array containing the total air time + * (in uSec) used by the most frequently transmitting stations. + * @number_of_stations: the number of uniqe stations included in the array + * (a number between 0 to 16) + * @total_air_time_other_stations: the total air time (uSec) used by all the + * stations which are not included in the above report. + * @time_in_measurement_window: the total time in uSec in which a measurement + * took place. + * @tx_frame_dropped: the number of TX frames dropped due to retry limit during + * measurement + * @mac_id: MAC ID the measurement applies to. + * @status: return status. may be one of the LQM_STATUS_* defined above. + * @reserved: reserved. + */ +struct iwl_link_qual_msrmnt_notif { + __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; + __le32 number_of_stations; + __le32 total_air_time_other_stations; + __le32 time_in_measurement_window; + __le32 tx_frame_dropped; + __le32 mac_id; + __le32 status; + __le32 reserved[3]; +} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 6938cd37b..e1b6b2c66 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -71,7 +71,7 @@ #include "iwl-csr.h" static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, - const void *data, size_t datalen) + void *data, size_t datalen) { const struct iwl_mvm_dump_ptrs *dump_ptrs = data; ssize_t bytes_read; @@ -104,7 +104,7 @@ static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count, return bytes_read + bytes_read_trans; } -static void iwl_mvm_free_coredump(const void *data) +static void iwl_mvm_free_coredump(void *data) { const struct iwl_mvm_dump_ptrs *fw_error_dump = data; @@ -265,6 +265,66 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, *dump_data = iwl_fw_error_next_data(*dump_data); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + /* Pull UMAC internal TXF data from all TXFs */ + for (i = 0; + i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i++) { + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the internal FIFOs */ + (*dump_data)->type = + cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); + (*dump_data)->len = + cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i); + + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_FIFO_ITEM_CNT)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_WR_PTR)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_RD_PTR)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_FENCE_PTR)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_CPU2_LOCK_FENCE)); + + /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ + iwl_trans_write_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_ADDR, + TXF_CPU2_WR_PTR); + + /* Dummy-read to advance the read pointer to head */ + iwl_trans_read_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_DATA); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = + iwl_trans_read_prph(mvm->trans, + TXF_CPU2_READ_MODIFY_DATA); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + } + iwl_trans_release_nic_access(mvm->trans, &flags); } @@ -280,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ -static const struct { +struct iwl_prph_range { u32 start, end; -} iwl_prph_dump_addr[] = { +}; + +static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, @@ -380,8 +442,18 @@ static const struct { { .start = 0x00a44000, .end = 0x00a7bf80 }, }; +static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { + { .start = 0x00a05c00, .end = 0x00a05c18 }, + { .start = 0x00a05400, .end = 0x00a056e8 }, + { .start = 0x00a08000, .end = 0x00a098bc }, + { .start = 0x00adfc00, .end = 0x00adfd1c }, + { .start = 0x00a02400, .end = 0x00a02758 }, +}; + static u32 iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data) + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) { struct iwl_fw_error_dump_prph *prph; unsigned long flags; @@ -390,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans, if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; @@ -429,9 +501,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct iwl_fw_error_dump_trigger_desc *dump_trig; struct iwl_mvm_dump_ptrs *fw_error_dump; u32 sram_len, sram_ofs; + struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem = + mvm->fw->dbg_mem_tlv; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->cfg->smem_len; - u32 sram2_len = mvm->cfg->dccm2_len; + u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len; + u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len; bool monitor_dump_only = false; int i; @@ -494,24 +568,54 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) sizeof(struct iwl_fw_error_dump_fifo); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + for (i = 0; + i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); + i++) { + if (!mem_cfg->internal_txfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->internal_txfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + /* Make room for PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; + int num_bytes_in_chunk = + iwl_prph_dump_addr_comm[i].end - + iwl_prph_dump_addr_comm[i].start + 4; prph_len += sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } + if (mvm->cfg->mq_rx_supported) { + for (i = 0; i < + ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_9000[i].end - + iwl_prph_dump_addr_9000[i].start + 4; + + prph_len += sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + } + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + - sram_len + sizeof(*dump_mem) + fifo_data_len + prph_len + radio_len + @@ -525,6 +629,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (sram2_len) file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + /* Make room for MEM segments */ + for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { + if (fw_dbg_mem[i]) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i]->len); + } + /* Make room for fw's virtual image pages, if it exists */ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size && mvm->fw_paging_db[0].fw_paging_block) @@ -551,6 +662,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; + if (!mvm->fw->dbg_dynamic_mem) + file_len += sram_len + sizeof(*dump_mem); + dump_file = vzalloc(file_len); if (!dump_file) { kfree(fw_error_dump); @@ -600,16 +714,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (monitor_dump_only) goto dump_trans_data; - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, - sram_len); + if (!mvm->fw->dbg_dynamic_mem) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(sram_ofs); + iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, + sram_len); + dump_data = iwl_fw_error_next_data(dump_data); + } + + for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) { + if (fw_dbg_mem[i]) { + u32 len = le32_to_cpu(fw_dbg_mem[i]->len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs); + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i]->data_type; + dump_mem->offset = cpu_to_le32(ofs); + iwl_trans_read_mem_bytes(mvm->trans, ofs, + dump_mem->data, + len); + dump_data = iwl_fw_error_next_data(dump_data); + } + } if (smem_len) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -617,10 +751,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, dump_mem->data, smem_len); + dump_data = iwl_fw_error_next_data(dump_data); } if (sram2_len) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -628,11 +762,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, dump_mem->data, sram2_len); + dump_data = iwl_fw_error_next_data(dump_data); } if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + sizeof(*dump_mem)); @@ -641,6 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, dump_mem->data, IWL8260_ICCM_LEN); + dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ @@ -651,7 +786,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) struct page *pages = mvm->fw_paging_db[i].fw_paging_block; - dump_data = iwl_fw_error_next_data(dump_data); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); @@ -659,12 +793,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) paging->index = cpu_to_le32(i); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); + dump_data = iwl_fw_error_next_data(dump_data); } } - dump_data = iwl_fw_error_next_data(dump_data); - if (prph_len) - iwl_dump_prph(mvm->trans, &dump_data); + if (prph_len) { + iwl_dump_prph(mvm->trans, &dump_data, + iwl_prph_dump_addr_comm, + ARRAY_SIZE(iwl_prph_dump_addr_comm)); + + if (mvm->cfg->mq_rx_supported) + iwl_dump_prph(mvm->trans, &dump_data, + iwl_prph_dump_addr_9000, + ARRAY_SIZE(iwl_prph_dump_addr_9000)); + } dump_trans_data: fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 09d895faf..7057f35cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -64,6 +64,7 @@ * *****************************************************************************/ #include <net/mac80211.h> +#include <linux/netdevice.h> #include "iwl-trans.h" #include "iwl-op-mode.h" @@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; + /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) - cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; - memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); + cmd.indirection_table[i] = + 1 + (i % (mvm->trans->num_rx_queues - 1)); + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } @@ -176,8 +181,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) } } - if (sec_idx >= IWL_UCODE_SECTION_MAX) { - IWL_ERR(mvm, "driver didn't find paging image\n"); + /* + * If paging is enabled there should be at least 2 more sections left + * (one for CSS and one for Paging data) + */ + if (sec_idx >= ARRAY_SIZE(image->sec) - 1) { + IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); iwl_free_fw_paging(mvm); return -EINVAL; } @@ -412,7 +421,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) goto exit; } - mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, + /* Add an extra page for headers */ + mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + + FW_PAGING_SIZE, GFP_KERNEL); if (!mvm->trans->paging_download_buf) { ret = -ENOMEM; @@ -524,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return true; } - WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); + WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } @@ -643,7 +654,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + if (iwl_mvm_is_dqa_supported(mvm)) + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; + else + mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); @@ -790,17 +804,22 @@ out: static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) { struct iwl_host_cmd cmd = { - .id = SHARED_MEM_CFG, .flags = CMD_WANT_SKB, .data = { NULL, }, .len = { 0, }, }; - struct iwl_rx_packet *pkt; struct iwl_shared_mem_cfg *mem_cfg; + struct iwl_rx_packet *pkt; u32 i; lockdep_assert_held(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) return; @@ -826,6 +845,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) le32_to_cpu(mem_cfg->page_buff_addr); mvm->shared_mem_cfg.page_buff_size = le32_to_cpu(mem_cfg->page_buff_size); + + /* new API has more data */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + mvm->shared_mem_cfg.rxfifo_addr = + le32_to_cpu(mem_cfg->rxfifo_addr); + mvm->shared_mem_cfg.internal_txfifo_addr = + le32_to_cpu(mem_cfg->internal_txfifo_addr); + + BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); + + for (i = 0; + i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i++) + mvm->shared_mem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + } + IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); iwl_free_resp(&cmd); @@ -944,7 +982,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Add all the PHY contexts */ - chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e885db346..7aae068c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, .exclude_vif = exclude_vif, .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE), + BIT(mvm->aux_queue), }; + if (iwl_mvm_is_dqa_supported(mvm)) + data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE); + else + data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE); + lockdep_assert_held(&mvm->mutex); /* mark all VIF used hw queues */ @@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - /* Find available queues, and allocate them to the ACs */ + /* + * Find available queues, and allocate them to the ACs. When in + * DQA-mode they aren't really used, and this is done only so the + * mac80211 ieee80211_check_queues() function won't fail + */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); - if (queue >= mvm->first_agg_queue) { + if (!iwl_mvm_is_dqa_supported(mvm) && + queue >= mvm->first_agg_queue) { IWL_ERR(mvm, "Failed to allocate queue\n"); ret = -EIO; goto exit_fail; @@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); + u8 queue; - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; + if (!iwl_mvm_is_dqa_supported(mvm)) { + queue = find_first_zero_bit(&used_hw_queues, + mvm->first_agg_queue); + + if (queue >= mvm->first_agg_queue) { + IWL_ERR(mvm, "Failed to allocate cab queue\n"); + ret = -EIO; + goto exit_fail; + } + } else { + queue = IWL_MVM_DQA_GCAST_QUEUE; } vif->cab_queue = queue; @@ -486,15 +501,21 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_TX_FIFO_VO, 0, + wdg_timeout); break; case NL80211_IFTYPE_AP: iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); /* fall through */ default: + /* If DQA is supported - queues will be enabled when needed */ + if (iwl_mvm_is_dqa_supported(mvm)) + break; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], @@ -514,15 +535,31 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MAX_TID_COUNT, 0); + break; case NL80211_IFTYPE_AP: iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); + + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); /* fall through */ default: + /* + * If DQA is supported - queues were already disabled, since in + * DQA-mode the queues are a property of the STA and not of the + * vif, and at this point the STA was already deleted + */ + if (iwl_mvm_is_dqa_supported(mvm)) + break; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], vif->hw_queue[ac], @@ -532,7 +569,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_band band, + enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; @@ -703,7 +740,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, rcu_read_lock(); chanctx = rcu_dereference(vif->chanctx_conf); iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : IEEE80211_BAND_2GHZ, + : NL80211_BAND_2GHZ, &cck_ack_rates, &ofdm_ack_rates); rcu_read_unlock(); @@ -1038,7 +1075,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) { + if (info->band == NL80211_BAND_5GHZ || vif->p2p) { rate = IWL_FIRST_OFDM_RATE; } else { rate = IWL_FIRST_CCK_RATE; @@ -1489,7 +1526,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); @@ -1499,5 +1536,5 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ - ieee80211_rx_napi(mvm->hw, skb, NULL); + ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a50f4df7e..18a8474b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); spin_lock_bh(&mvm->refs_lock); - WARN_ON(!mvm->refs[ref_type]--); + if (WARN_ON(!mvm->refs[ref_type])) { + spin_unlock_bh(&mvm->refs_lock); + return; + } + mvm->refs[ref_type]--; spin_unlock_bh(&mvm->refs_lock); iwl_trans_unref(mvm->trans); } @@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + if (iwl_mvm_has_new_rx_api(mvm)) + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + if (mvm->trans->num_rx_queues > 1) + ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - hw->queues = mvm->first_agg_queue; + if (!iwl_mvm_is_dqa_supported(mvm)) + hw->queues = mvm->first_agg_queue; + else + hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -550,18 +562,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; - if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; - if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) { - hw->wiphy->bands[IEEE80211_BAND_5GHZ] = - &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) + hw->wiphy->bands[NL80211_BAND_2GHZ] = + &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; + if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { + hw->wiphy->bands[NL80211_BAND_5GHZ] = + &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) - hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= + hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -665,12 +677,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } hw->netdev_features |= mvm->cfg->features; - if (!iwl_mvm_is_csum_supported(mvm)) - hw->netdev_features &= ~NETIF_F_RXCSUM; - - if (IWL_MVM_SW_TX_CSUM_OFFLOAD) - hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6; + if (!iwl_mvm_is_csum_supported(mvm)) { + hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | + NETIF_F_RXCSUM); + /* We may support SW TX CSUM */ + if (IWL_MVM_SW_TX_CSUM_OFFLOAD) + hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; + } ret = ieee80211_register_hw(mvm->hw); if (ret) @@ -847,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, u16 *ssn = ¶ms->ssn; u8 buf_size = params->buf_size; bool amsdu = params->amsdu; + u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); @@ -887,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, + timeout); break; case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, + timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu(mvm->cfg)) { @@ -992,6 +1008,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); + memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); @@ -1180,6 +1197,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->async_handlers_wk); + flush_work(&mvm->add_stream_wk); cancel_delayed_work_sync(&mvm->fw_dump_wk); iwl_mvm_free_fw_dump_desc(mvm); @@ -1823,6 +1841,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc && + mvmvif->lqm_active) + iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT, + 0, 0); + /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old @@ -2342,7 +2365,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; } - if (iwlwifi_mod_params.uapsd_disable) { + if (!vif->p2p && + (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } @@ -2378,6 +2402,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, peer_addr, action); } +static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta) +{ + struct iwl_mvm_tid_data *tid_data; + struct sk_buff *skb; + int i; + + spin_lock_bh(&mvm_sta->lock); + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { + tid_data = &mvm_sta->tid_data[i]; + while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) + ieee80211_free_txskb(mvm->hw, skb); + } + spin_unlock_bh(&mvm_sta->lock); +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2398,6 +2438,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* if a STA is being removed, reuse its ID */ flush_work(&mvm->sta_drained_wk); + /* + * If we are in a STA removal flow and in DQA mode: + * + * This is after the sync_rcu part, so the queues have already been + * flushed. No more TXs on their way in mac80211's path, and no more in + * the queues. + * Also, we won't be getting any new TX frames for this station. + * What we might have are deferred TX frames that need to be taken care + * of. + * + * Drop any still-queued deferred-frame before removing the STA, and + * make sure the worker is no longer handling frames for this STA. + */ + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST && + iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); + flush_work(&mvm->add_stream_wk); + + /* + * No need to make sure deferred TX indication is off since the + * worker will already remove it if it was on + */ + } + mutex_lock(&mvm->mutex); if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { @@ -2861,7 +2928,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), /* Set the channel info data */ - .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ? + .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? PHY_BAND_24 : PHY_BAND_5, .channel_info.channel = channel->hw_value, .channel_info.width = PHY_VHT_CHANNEL_MODE20, @@ -3630,6 +3697,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: + if (mvmvif->lqm_active) + iwl_mvm_send_lqm_cmd(vif, + LQM_CMD_OPERATION_STOP_MEASUREMENT, + 0, 0); + /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. @@ -3729,6 +3801,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, if (!vif || vif->type != NL80211_IFTYPE_STATION) return; + /* Make sure we're done with the deferred traffic before flushing */ + if (iwl_mvm_is_dqa_supported(mvm)) + flush_work(&mvm->add_stream_wk); + mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -3775,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, if (idx != 0) return -ENOENT; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); @@ -3822,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return; /* if beacon filtering isn't on mac80211 does it anyway */ @@ -3976,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, } } +void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, + struct iwl_mvm_internal_rxq_notif *notif, + u32 size) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); + u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_has_new_rx_api(mvm)) + return; + + notif->cookie = mvm->queue_sync_cookie; + + if (notif->sync) + atomic_set(&mvm->queue_sync_counter, + mvm->trans->num_rx_queues); + + ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); + if (ret) { + IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); + goto out; + } + + if (notif->sync) + ret = wait_event_timeout(notif_waitq, + atomic_read(&mvm->queue_sync_counter) == 0, + HZ); + WARN_ON_ONCE(!ret); + +out: + atomic_set(&mvm->queue_sync_counter, 0); + mvm->queue_sync_cookie++; +} + +static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_internal_rxq_notif data = { + .type = IWL_MVM_RXQ_EMPTY, + .sync = 1, + }; + + mutex_lock(&mvm->mutex); + iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); + mutex_unlock(&mvm->mutex); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, @@ -4032,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .event_callback = iwl_mvm_mac_event_callback, + .sync_rx_queues = iwl_mvm_sync_rx_queues, + CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 9abbc93e3..ffbd41dcc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -208,7 +208,7 @@ enum iwl_power_scheme { }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 +#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { @@ -301,6 +301,8 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_PROTECT_CSA, IWL_MVM_REF_FW_DBG_COLLECT, IWL_MVM_REF_INIT_UCODE, + IWL_MVM_REF_SENDING_CMD, + IWL_MVM_REF_RX, /* update debugfs.c when changing this */ @@ -453,6 +455,12 @@ struct iwl_mvm_vif { /* TCP Checksum Offload */ netdev_features_t features; + + /* + * link quality measurement - used to check whether this interface + * is in the middle of a link quality measurement + */ + bool lqm_active; }; static inline struct iwl_mvm_vif * @@ -602,6 +610,87 @@ struct iwl_mvm_shared_mem_cfg { u32 rxfifo_size[RX_FIFO_MAX_NUM]; u32 page_buff_addr; u32 page_buff_size; + u32 rxfifo_addr; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +}; + +/** + * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer + * @head_sn: reorder window head sn + * @num_stored: number of mpdus stored in the buffer + * @buf_size: the reorder buffer size as set by the last addba request + * @sta_id: sta id of this reorder buffer + * @queue: queue of this reorder buffer + * @last_amsdu: track last ASMDU SN for duplication detection + * @last_sub_index: track ASMDU sub frame index for duplication detection + * @entries: list of skbs stored + * @reorder_time: time the packet was stored in the reorder buffer + * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU + * it is the time of last received sub-frame + * @removed: prevent timer re-arming + * @lock: protect reorder buffer internal state + * @mvm: mvm pointer, needed for frame timer context + */ +struct iwl_mvm_reorder_buffer { + u16 head_sn; + u16 num_stored; + u8 buf_size; + u8 sta_id; + int queue; + u16 last_amsdu; + u8 last_sub_index; + struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF]; + unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; + struct timer_list reorder_timer; + bool removed; + spinlock_t lock; + struct iwl_mvm *mvm; +} ____cacheline_aligned_in_smp; + +/** + * struct iwl_mvm_baid_data - BA session data + * @sta_id: station id + * @tid: tid of the session + * @baid baid of the session + * @timeout: the timeout set in the addba request + * @last_rx: last rx jiffies, updated only if timeout passed from last update + * @session_timer: timer to check if BA session expired, runs at 2 * timeout + * @mvm: mvm pointer, needed for timer context + * @reorder_buf: reorder buffer, allocated per queue + */ +struct iwl_mvm_baid_data { + struct rcu_head rcu_head; + u8 sta_id; + u8 tid; + u8 baid; + u16 timeout; + unsigned long last_rx; + struct timer_list session_timer; + struct iwl_mvm *mvm; + struct iwl_mvm_reorder_buffer reorder_buf[]; +}; + +/* + * enum iwl_mvm_queue_status - queue status + * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved + * Basically, this means that this queue can be used for any purpose + * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use + * This is the state of a queue that has been dedicated for some RATID + * (agg'd or not), but that hasn't yet gone through the actual enablement + * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. + * Note that in this state there is no requirement to already know what TID + * should be used with this queue, it is just marked as a queue that will + * be used, and shouldn't be allocated to anyone else. + * @IWL_MVM_QUEUE_READY: queue is ready to be used + * This is the state of a queue that has been fully configured (including + * SCD pointers, etc), has a specific RA/TID assigned to it, and can be + * used to send traffic. + */ +enum iwl_mvm_queue_status { + IWL_MVM_QUEUE_FREE, + IWL_MVM_QUEUE_RESERVED, + IWL_MVM_QUEUE_READY, }; struct iwl_mvm { @@ -624,6 +713,8 @@ struct iwl_mvm { unsigned long status; + u32 queue_sync_cookie; + atomic_t queue_sync_counter; /* * for beacon filtering - * currently only one interface can be supported @@ -656,10 +747,12 @@ struct iwl_mvm { /* Map to HW queue */ u32 hw_queue_to_mac80211; u8 hw_queue_refcount; - bool setup_reserved; + u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + enum iwl_mvm_queue_status status; } queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ + struct work_struct add_stream_wk; /* To add streams to queues */ atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; const char *nvm_file_name; @@ -679,11 +772,11 @@ struct iwl_mvm { struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; + unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; - u32 secret_key[IWL_RSS_HASH_KEY_CNT]; /* configured by mac80211 */ u32 rts_threshold; @@ -694,6 +787,7 @@ struct iwl_mvm { struct iwl_mcast_filter_cmd *mcast_filter_cmd; enum iwl_mvm_scan_type scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; + struct timer_list scan_timer; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; @@ -903,6 +997,10 @@ struct iwl_mvm { u32 ciphers[6]; struct iwl_mvm_tof_data tof_data; + struct ieee80211_vif *nan_vif; +#define IWL_MAX_BAID 32 + struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; + /* * Drop beacons from other APs in AP mode when there are no connected * clients. @@ -1048,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && + !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) @@ -1063,7 +1162,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && - IWL_MVM_P2P_UAPSD_STANDALONE; + !(iwlwifi_mod_params.uapsd_disable & + IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) @@ -1115,9 +1215,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band); + enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); @@ -1224,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); -void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, const u8 *data, u32 count); @@ -1297,6 +1397,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); +void iwl_mvm_scan_timeout(unsigned long data); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, @@ -1449,26 +1550,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, - enum ieee80211_band band); + enum nl80211_band band); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); -bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); -void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); -int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); -void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - enum ieee80211_rssi_event_data); -u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, - struct ieee80211_sta *sta); -bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, - enum ieee80211_band band); -void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); - /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS void @@ -1563,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); +/* Re-configure the SCD for a queue that has already been configured */ +int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, + int tid, int frame_limit, u16 ssn); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); @@ -1625,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); +void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, + struct iwl_mvm_internal_rxq_notif *notif, + u32 size); +void iwl_mvm_reorder_timer_expired(unsigned long data); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); @@ -1634,4 +1727,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); +/* Link Quality Measurement */ +int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, + enum iwl_lqm_cmd_operatrions operation, + u32 duration, u32 timeout); +bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d27839909..a68054f12 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, - iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), + iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), @@ -421,6 +421,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_system_names[] = { + HCMD_NAME(SHARED_MEM_CFG_CMD), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { + HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD), + HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), @@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), + [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), + [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), @@ -537,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + if (!iwl_mvm_is_dqa_supported(mvm)) { + mvm->first_agg_queue = 16; + mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + } else { + mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; + mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; + } if (mvm->cfg->base_params->num_of_queues == 16) { mvm->aux_queue = 11; mvm->first_agg_queue = 12; @@ -562,12 +584,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); + INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); skb_queue_head_init(&mvm->d0i3_tx); init_waitqueue_head(&mvm->d0i3_exit_waitq); + atomic_set(&mvm->queue_sync_counter, 0); + SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); /* @@ -601,7 +626,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + if (iwl_mvm_is_dqa_supported(mvm)) + trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; + else + trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; @@ -707,8 +735,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); - /* init RSS hash key */ - get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); + setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout, + (unsigned long)mvm); return op_mode; @@ -763,6 +791,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_tof_clean(mvm); + del_timer_sync(&mvm->scan_timer); + + mutex_destroy(&mvm->mutex); + mutex_destroy(&mvm->d0i3_suspend_mutex); + ieee80211_free_hw(mvm->hw); } @@ -904,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode, if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); else if (pkt->hdr.cmd == FRAME_RELEASE) - iwl_mvm_rx_frame_release(mvm, rxb, 0); + iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else @@ -1182,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, struct iwl_d0i3_iter_data *iter_data) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvmsta; u32 available_tids = 0; u8 tid; @@ -1191,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) return false; - ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (IS_ERR_OR_NULL(ap_sta)) + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + if (!mvmsta) return false; - mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1606,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct iwl_rx_packet *pkt = rxb_addr(rxb); if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) - iwl_mvm_rx_frame_release(mvm, rxb, queue); + iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION && pkt->hdr.group_id == DATA_PATH_GROUP)) iwl_mvm_rx_queue_notif(mvm, rxb, queue); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 6e6a56f21..95138830b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, u8 active_cnt, idle_cnt; /* Set the channel info data */ - cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5); cmd->ci.channel = chandef->chan->hw_value; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index f313910cd..7b1f6ad60 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } - cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; + cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 61d0a8cd1..81dd2f6a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, /* Convert a ucode rate into an rs_rate object */ static int rs_rate_from_ucode_rate(const u32 ucode_rate, - enum ieee80211_band band, + enum nl80211_band band, struct rs_rate *rate) { u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK; @@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, if (!(ucode_rate & RATE_MCS_HT_MSK) && !(ucode_rate & RATE_MCS_VHT_MSK)) { if (num_of_ant == 1) { - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, return; } else if (is_siso(rate)) { /* Downgrade to Legacy if we were in SISO */ - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, rate->ant = column->ant; if (column->mode == RS_LEGACY) { - if (lq_sta->band == IEEE80211_BAND_5GHZ) + if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, } static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct rs_rate *rate, enum ieee80211_band band) + struct rs_rate *rate, enum nl80211_band band) { int index = rate->index; bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); @@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *vif = mvm_sta->vif; struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_band band; + enum nl80211_band band; struct iwl_rate_scale_data *window; struct rs_rate *rate = &tbl->rate; enum tpc_action action; @@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) - band = IEEE80211_NUM_BANDS; + band = NUM_NL80211_BANDS; else band = chanctx_conf->def.chan->band; rcu_read_unlock(); @@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm, rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; - else if (lq_sta->band == IEEE80211_BAND_5GHZ) + else if (lq_sta->band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; else rate->type = LQ_LEGACY_G; @@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm, } else { lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; - if (lq_sta->band == IEEE80211_BAND_5GHZ) { + if (lq_sta->band == NL80211_BAND_5GHZ) { lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); @@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, + enum nl80211_band band, struct rs_rate *rate) { int i, nentries; @@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, rate->index = find_first_bit(&lq_sta->active_legacy_rate, BITS_PER_LONG); - if (band == IEEE80211_BAND_5GHZ) { + if (band == NL80211_BAND_5GHZ) { rate->type = LQ_LEGACY_A; initial_rates = rs_optimal_rates_5ghz_legacy; nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); @@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum ieee80211_band band, + enum nl80211_band band, bool init) { struct iwl_scale_tbl_info *tbl; @@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init) + enum nl80211_band band, bool init) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r, #ifdef CONFIG_MAC80211_DEBUGFS static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq_cmd, - enum ieee80211_band band, + enum nl80211_band band, u32 ucode_rate) { struct rs_rate rate; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index bdb6f2d8d..90d046fb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -305,7 +305,7 @@ struct iwl_lq_sta { bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */ bool bfer_capable; /* Remote supports beamformee and we BFer */ - enum ieee80211_band band; + enum nl80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ unsigned long active_legacy_rate; @@ -358,7 +358,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum ieee80211_band band, bool init); + enum nl80211_band band, bool init); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 485cfc1a4..ab7f7eda9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, @@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - ieee80211_rx_napi(mvm->hw, skb, napi); + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } /* @@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; + bool take_ref; phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -319,7 +321,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); @@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, - crypt_len, rxb); + if (unlikely(ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control))) + rx_status->boottime_ns = ktime_get_boot_ns(); + + /* Take a reference briefly to kick off a d0i3 entry delay so + * we can handle bursts of RX packets without toggling the + * state too often. But don't do this for beacons if we are + * going to idle because the beacon filtering changes we make + * cause the firmware to send us collateral beacons. */ + take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) && + ieee80211_is_beacon(hdr->frame_control)); + + if (take_ref) + iwl_mvm_ref(mvm, IWL_MVM_REF_RX); + + iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, + ampdu_status, crypt_len, rxb); + + if (take_ref) + iwl_mvm_unref(mvm, IWL_MVM_REF_RX); } static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 9a54f2d2a..2c61516d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); else - ieee80211_rx_napi(mvm->hw, skb, napi); + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + u16 flags = le16_to_cpu(desc->l3l4_flags); + u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> + IWL_RX_L3_PROTO_POS); if (mvmvif->features & NETIF_F_RXCSUM && - desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) && - desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK)) + flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && + (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || + l3_prot == IWL_RX_L3_TYPE_IPV6 || + l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -390,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, return ret; } +/* + * Returns true if sn2 - buffer_size < sn1 < sn2. + * To be used only in order to compare reorder buffer head with NSSN. + * We fully trust NSSN unless it is behind us due to reorder timeout. + * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. + */ +static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) +{ + return ieee80211_sn_less(sn1, sn2) && + !ieee80211_sn_less(sn1, sn2 - buffer_size); +} + +#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) + +static void iwl_mvm_release_frames(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct napi_struct *napi, + struct iwl_mvm_reorder_buffer *reorder_buf, + u16 nssn) +{ + u16 ssn = reorder_buf->head_sn; + + lockdep_assert_held(&reorder_buf->lock); + + /* ignore nssn smaller than head sn - this can happen due to timeout */ + if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) + return; + + while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { + int index = ssn % reorder_buf->buf_size; + struct sk_buff_head *skb_list = &reorder_buf->entries[index]; + struct sk_buff *skb; + + ssn = ieee80211_sn_inc(ssn); + + /* holes are valid since nssn indicates frames were received. */ + if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list)) + continue; + /* Empty the list. Will have more than one frame for A-MSDU */ + while ((skb = __skb_dequeue(skb_list))) { + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, + reorder_buf->queue, + sta); + reorder_buf->num_stored--; + } + } + reorder_buf->head_sn = nssn; + + if (reorder_buf->num_stored && !reorder_buf->removed) { + u16 index = reorder_buf->head_sn % reorder_buf->buf_size; + + while (!skb_peek_tail(&reorder_buf->entries[index])) + index = (index + 1) % reorder_buf->buf_size; + /* modify timer to match next frame's expiration time */ + mod_timer(&reorder_buf->reorder_timer, + reorder_buf->reorder_time[index] + 1 + + RX_REORDER_BUF_TIMEOUT_MQ); + } else { + del_timer(&reorder_buf->reorder_timer); + } +} + +void iwl_mvm_reorder_timer_expired(unsigned long data) +{ + struct iwl_mvm_reorder_buffer *buf = (void *)data; + int i; + u16 sn = 0, index = 0; + bool expired = false; + + spin_lock_bh(&buf->lock); + + if (!buf->num_stored || buf->removed) { + spin_unlock_bh(&buf->lock); + return; + } + + for (i = 0; i < buf->buf_size ; i++) { + index = (buf->head_sn + i) % buf->buf_size; + + if (!skb_peek_tail(&buf->entries[index])) + continue; + if (!time_after(jiffies, buf->reorder_time[index] + + RX_REORDER_BUF_TIMEOUT_MQ)) + break; + expired = true; + sn = ieee80211_sn_add(buf->head_sn, i + 1); + } + + if (expired) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]); + /* SN is set to the last expired frame + 1 */ + iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn); + rcu_read_unlock(); + } else if (buf->num_stored) { + /* + * If no frame expired and there are stored frames, index is now + * pointing to the first unexpired frame - modify timer + * accordingly to this frame. + */ + mod_timer(&buf->reorder_timer, + buf->reorder_time[index] + + 1 + RX_REORDER_BUF_TIMEOUT_MQ); + } + spin_unlock_bh(&buf->lock); +} + +static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, + struct iwl_mvm_delba_data *data) +{ + struct iwl_mvm_baid_data *ba_data; + struct ieee80211_sta *sta; + struct iwl_mvm_reorder_buffer *reorder_buf; + u8 baid = data->baid; + + if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID)) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON_ONCE(!ba_data)) + goto out; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + goto out; + + reorder_buf = &ba_data->reorder_buf[queue]; + + /* release all frames that are in the reorder buffer to the stack */ + spin_lock_bh(&reorder_buf->lock); + iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf, + ieee80211_sn_add(reorder_buf->head_sn, + reorder_buf->buf_size)); + spin_unlock_bh(&reorder_buf->lock); + del_timer_sync(&reorder_buf->reorder_timer); + +out: + rcu_read_unlock(); +} + void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -400,15 +549,184 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, notif = (void *)pkt->data; internal_notif = (void *)notif->payload; + if (internal_notif->sync) { + if (mvm->queue_sync_cookie != internal_notif->cookie) { + WARN_ONCE(1, + "Received expired RX queue sync message\n"); + return; + } + atomic_dec(&mvm->queue_sync_counter); + } + switch (internal_notif->type) { + case IWL_MVM_RXQ_EMPTY: + break; case IWL_MVM_RXQ_NOTIF_DEL_BA: - /* TODO */ + iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } } +/* + * Returns true if the MPDU was buffered\dropped, false if it should be passed + * to upper layer. + */ +static bool iwl_mvm_reorder(struct iwl_mvm *mvm, + struct napi_struct *napi, + int queue, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct iwl_rx_mpdu_desc *desc) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_mvm_sta *mvm_sta; + struct iwl_mvm_baid_data *baid_data; + struct iwl_mvm_reorder_buffer *buffer; + struct sk_buff *tail; + u32 reorder = le32_to_cpu(desc->reorder_data); + bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; + u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + u8 sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + int index; + u16 nssn, sn; + u8 baid; + + baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT; + + if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) + return false; + + /* no sta yet */ + if (WARN_ON(IS_ERR_OR_NULL(sta))) + return false; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + /* not a data packet */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return false; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return false; + + baid_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN(!baid_data, + "Received baid %d, but no data exists for this BAID\n", baid)) + return false; + if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, + "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", + baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, + tid)) + return false; + + nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; + sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> + IWL_RX_MPDU_REORDER_SN_SHIFT; + + buffer = &baid_data->reorder_buf[queue]; + + spin_lock_bh(&buffer->lock); + + /* + * If there was a significant jump in the nssn - adjust. + * If the SN is smaller than the NSSN it might need to first go into + * the reorder buffer, in which case we just release up to it and the + * rest of the function will take of storing it and releasing up to the + * nssn + */ + if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, + buffer->buf_size)) { + u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; + + iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); + } + + /* drop any oudated packets */ + if (ieee80211_sn_less(sn, buffer->head_sn)) + goto drop; + + /* release immediately if allowed by nssn and no stored frames */ + if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { + if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, + buffer->buf_size)) + buffer->head_sn = nssn; + /* No need to update AMSDU last SN - we are moving the head */ + spin_unlock_bh(&buffer->lock); + return false; + } + + index = sn % buffer->buf_size; + + /* + * Check if we already stored this frame + * As AMSDU is either received or not as whole, logic is simple: + * If we have frames in that position in the buffer and the last frame + * originated from AMSDU had a different SN then it is a retransmission. + * If it is the same SN then if the subframe index is incrementing it + * is the same AMSDU - otherwise it is a retransmission. + */ + tail = skb_peek_tail(&buffer->entries[index]); + if (tail && !amsdu) + goto drop; + else if (tail && (sn != buffer->last_amsdu || + buffer->last_sub_index >= sub_frame_idx)) + goto drop; + + /* put in reorder buffer */ + __skb_queue_tail(&buffer->entries[index], skb); + buffer->num_stored++; + buffer->reorder_time[index] = jiffies; + + if (amsdu) { + buffer->last_amsdu = sn; + buffer->last_sub_index = sub_frame_idx; + } + + iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); + spin_unlock_bh(&buffer->lock); + return true; + +drop: + kfree_skb(skb); + spin_unlock_bh(&buffer->lock); + return true; +} + +static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) +{ + unsigned long now = jiffies; + unsigned long timeout; + struct iwl_mvm_baid_data *data; + + rcu_read_lock(); + + data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON(!data)) + goto out; + + if (!data->timeout) + goto out; + + timeout = data->timeout; + /* + * Do not update last rx all the time to avoid cache bouncing + * between the rx queues. + * Update it every timeout. Worst case is the session will + * expire after ~ 2 * timeout, which doesn't matter that much. + */ + if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now)) + /* Update is atomic */ + data->last_rx = now; + +out: + rcu_read_unlock(); +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -451,8 +769,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); - rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ : - IEEE80211_BAND_2GHZ; + rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, desc, rx_status); @@ -479,6 +797,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & + IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT); /* * We have tx blocked stations (with CS bit). If we heard @@ -531,6 +852,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) + iwl_mvm_agg_rx_received(mvm, baid); } /* @@ -588,12 +911,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, /* TODO: PHY info - gscan */ iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); rcu_read_unlock(); } -void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, +void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { - /* TODO */ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_frame_release *release = (void *)pkt->data; + struct ieee80211_sta *sta; + struct iwl_mvm_reorder_buffer *reorder_buf; + struct iwl_mvm_baid_data *ba_data; + + int baid = release->baid; + + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mvm->baid_map[baid]); + if (WARN_ON_ONCE(!ba_data)) + goto out; + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + goto out; + + reorder_buf = &ba_data->reorder_buf[queue]; + + spin_lock_bh(&reorder_buf->lock); + iwl_mvm_release_frames(mvm, sta, napi, reorder_buf, + le16_to_cpu(release->nssn)); + spin_unlock_bh(&reorder_buf->lock); + +out: + rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 09eb72c4a..e78fc567f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -70,6 +70,7 @@ #include "mvm.h" #include "fw-api-scan.h" +#include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 @@ -162,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) return cpu_to_le16(rx_chain); } -static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band) +static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band) { - if (band == IEEE80211_BAND_2GHZ) + if (band == NL80211_BAND_2GHZ) return cpu_to_le32(PHY_BAND_24); else return cpu_to_le32(PHY_BAND_5); } static inline __le32 -iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, +iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, bool no_cck) { u32 tx_ant; @@ -181,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; - if (band == IEEE80211_BAND_2GHZ && !no_cck) + if (band == NL80211_BAND_2GHZ && !no_cck) return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | tx_ant); else @@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ieee80211_scan_completed(mvm->hw, scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); + } else { + IWL_ERR(mvm, + "got scan complete notification but no scan is running\n"); } mvm->last_ebs_successful = @@ -586,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_2GHZ, + NL80211_BAND_2GHZ, no_cck); tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, - IEEE80211_BAND_5GHZ, + NL80211_BAND_5GHZ, no_cck); tx_cmd[1].sta_id = mvm->aux_sta.sta_id; } @@ -690,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Insert ds parameter set element on 2.4 GHz band */ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, - ies->ies[IEEE80211_BAND_2GHZ], - ies->len[IEEE80211_BAND_2GHZ], + ies->ies[NL80211_BAND_2GHZ], + ies->len[NL80211_BAND_2GHZ], pos); params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[0].len = cpu_to_le16(newpos - pos); pos = newpos; - memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], - ies->len[IEEE80211_BAND_5GHZ]); + memcpy(pos, ies->ies[NL80211_BAND_5GHZ], + ies->len[NL80211_BAND_5GHZ]); params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[1].len = - cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]); - pos += ies->len[IEEE80211_BAND_5GHZ]; + cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); + pos += ies->len[NL80211_BAND_5GHZ]; memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); @@ -916,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) unsigned int rates = 0; int i; - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); @@ -934,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) struct iwl_scan_config *scan_config; struct ieee80211_supported_band *band; int num_channels = - mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; int ret, i, j = 0, cmd_size; struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), @@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | SCAN_CONFIG_FLAG_SET_RX_CHAINS | + SCAN_CONFIG_FLAG_SET_AUX_STA_ID | SCAN_CONFIG_FLAG_SET_ALL_TIMES | SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_MAC_ADDR | @@ -988,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_channels; i++, j++) scan_config->channel_array[j] = band->channels[i].hw_value; - band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; + band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_channels; i++, j++) scan_config->channel_array[j] = band->channels[i].hw_value; @@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } +#define SCAN_TIMEOUT (20 * HZ) + +void iwl_mvm_scan_timeout(unsigned long data) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)data; + + IWL_ERR(mvm, "regular scan timed out\n"); + + del_timer(&mvm->scan_timer); + iwl_force_nmi(mvm->trans); +} + int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_status |= IWL_MVM_SCAN_REGULAR; iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); + mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT); + return 0; } @@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { ieee80211_scan_completed(mvm->hw, aborted); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; @@ -1607,6 +1628,7 @@ out: * to release the scan reference here. */ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + del_timer(&mvm->scan_timer); if (notify) ieee80211_scan_completed(mvm->hw, true); } else if (notify) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index c2def1232..443a42855 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, } } - if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { + if (sta) { BUILD_BUG_ON(sizeof(sf_full_timeout) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); @@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_sta *sta; int ret = 0; - if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13) - sf_cmd.state = cpu_to_le32(new_state); - if (mvm->cfg->disable_dummy_notification) sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); @@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, switch (new_state) { case SF_UNINIT: - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) - iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: if (sta_id == IWL_MVM_STATION_COUNT) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ef99942d7..b23ab4a45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update) + bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { @@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 status; u32 agg_size = 0, mpdu_dens = 0; - if (!update) { + if (!update || (flags & STA_MODIFY_QUEUES)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); + + if (flags & STA_MODIFY_QUEUES) + add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } switch (sta->bandwidth) { @@ -220,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return ret; } +static void iwl_mvm_rx_agg_session_expired(unsigned long data) +{ + struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; + struct iwl_mvm_baid_data *ba_data; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + unsigned long timeout; + + rcu_read_lock(); + + ba_data = rcu_dereference(*rcu_ptr); + + if (WARN_ON(!ba_data)) + goto unlock; + + if (!ba_data->timeout) + goto unlock; + + timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); + if (time_is_after_jiffies(timeout)) { + mod_timer(&ba_data->session_timer, timeout); + goto unlock; + } + + /* Timer expired */ + sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, + sta->addr, ba_data->tid); +unlock: + rcu_read_unlock(); +} + static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { @@ -274,6 +310,229 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); } +static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 ac, int tid, + struct ieee80211_hdr *hdr) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .sta_id = mvmsta->sta_id, + .tid = tid, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + u8 mac_queue = mvmsta->vif->hw_queue[ac]; + int queue = -1; + int ssn; + int ret; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + + /* + * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one + * exists + */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) { + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, + IWL_MVM_DQA_MAX_MGMT_QUEUE); + if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) + IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", + queue); + + /* If no such queue is found, we'll use a DATA queue instead */ + } + + if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + queue = mvmsta->reserved_queue; + IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); + } + + if (queue < 0) + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + + /* + * Mark TXQ as ready, even though it hasn't been fully configured yet, + * to make sure no one else takes it. + * This will allow avoiding re-acquiring the lock at the end of the + * configuration. On error we'll mark it back as free. + */ + if (queue >= 0) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + + spin_unlock_bh(&mvm->queue_info_lock); + + /* TODO: support shared queues for same RA */ + if (queue < 0) + return -ENOSPC; + + /* + * Actual en/disablement of aggregations is through the ADD_STA HCMD, + * but for configuring the SCD to send A-MPDUs we need to mark the queue + * as aggregatable. + * Mark all DATA queues as allowing to be aggregated at some point + */ + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); + + IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", + queue, mvmsta->sta_id, tid); + + ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, + wdg_timeout); + + spin_lock_bh(&mvmsta->lock); + mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tfd_queue_msk |= BIT(queue); + + if (mvmsta->reserved_queue == queue) + mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; + spin_unlock_bh(&mvmsta->lock); + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); + if (ret) + goto out_err; + + return 0; + +out_err: + iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); + + return ret; +} + +static inline u8 iwl_mvm_tid_to_ac_queue(int tid) +{ + if (tid == IWL_MAX_TID_COUNT) + return IEEE80211_AC_VO; /* MGMT */ + + return tid_to_mac80211_ac[tid]; +} + +static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct sk_buff_head deferred_tx; + u8 mac_queue; + bool no_queue = false; /* Marks if there is a problem with the queue */ + u8 ac; + + lockdep_assert_held(&mvm->mutex); + + skb = skb_peek(&tid_data->deferred_tx_frames); + if (!skb) + return; + hdr = (void *)skb->data; + + ac = iwl_mvm_tid_to_ac_queue(tid); + mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; + + if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && + iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { + IWL_ERR(mvm, + "Can't alloc TXQ for sta %d tid %d - dropping frame\n", + mvmsta->sta_id, tid); + + /* + * Mark queue as problematic so later the deferred traffic is + * freed, as we can do nothing with it + */ + no_queue = true; + } + + __skb_queue_head_init(&deferred_tx); + + /* Disable bottom-halves when entering TX path */ + local_bh_disable(); + spin_lock(&mvmsta->lock); + skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); + spin_unlock(&mvmsta->lock); + + while ((skb = __skb_dequeue(&deferred_tx))) + if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) + ieee80211_free_txskb(mvm->hw, skb); + local_bh_enable(); + + /* Wake queue */ + iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); +} + +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, + add_stream_wk); + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long deferred_tid_traffic; + int sta_id, tid; + + mutex_lock(&mvm->mutex); + + /* Go over all stations with deferred traffic */ + for_each_set_bit(sta_id, mvm->sta_deferred_frames, + IWL_MVM_STATION_COUNT) { + clear_bit(sta_id, mvm->sta_deferred_frames); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; + + for_each_set_bit(tid, &deferred_tid_traffic, + IWL_MAX_TID_COUNT + 1) + iwl_mvm_tx_deferred_stream(mvm, sta, tid); + } + + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum nl80211_iftype vif_type) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int queue; + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure we have free resources for this STA */ + if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && + (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == + IWL_MVM_QUEUE_FREE)) + queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; + else + queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue < 0) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "No available queues for new station\n"); + return -ENOSPC; + } + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; + + spin_unlock_bh(&mvm->queue_info_lock); + + mvmsta->reserved_queue = queue; + + IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", + queue, mvmsta->sta_id); + + return 0; +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -314,18 +573,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ret = iwl_mvm_tdls_sta_init(mvm, sta); if (ret) return ret; - } else { + } else if (!iwl_mvm_is_dqa_supported(mvm)) { for (i = 0; i < IEEE80211_NUM_ACS; i++) if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); } /* for HW restart - reset everything but the sequence number */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; + + if (!iwl_mvm_is_dqa_supported(mvm)) + continue; + + /* + * Mark all queues for this STA as unallocated and defer TX + * frames until the queue is allocated + */ + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } + mvm_sta->deferred_traffic_tid_map = 0; mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && @@ -338,7 +608,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); + if (iwl_mvm_is_dqa_supported(mvm)) { + ret = iwl_mvm_reserve_sta_stream(mvm, sta, + ieee80211_vif_type_p2p(vif)); + if (ret) + goto err; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); if (ret) goto err; @@ -364,7 +641,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - return iwl_mvm_sta_send_to_fw(mvm, sta, true); + return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, @@ -509,6 +786,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) mutex_unlock(&mvm->mutex); } +static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ + int ac; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { + if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) + continue; + + ac = iwl_mvm_tid_to_ac_queue(i); + iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, + vif->hw_queue[ac], i, 0); + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + } +} + int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -537,6 +834,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + /* If DQA is supported - the queues can be disabled now */ + if (iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + /* if we are associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; @@ -750,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_VO, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + int queue; + + if ((vif->type == NL80211_IFTYPE_AP) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) + queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) + queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) + return -EINVAL; + + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, + wdg_timeout); + } + if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; @@ -778,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask; + u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(vif); + if (!iwl_mvm_is_dqa_supported(mvm)) + qmask = iwl_mvm_mac_get_queues_mask(vif); - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. - */ - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP) { + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ qmask &= ~BIT(vif->cab_queue); + if (iwl_mvm_is_dqa_supported(mvm)) + qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + } else if (iwl_mvm_is_dqa_supported(mvm) && + vif->type == NL80211_IFTYPE_P2P_DEVICE) { + qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + } + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, ieee80211_vif_type_p2p(vif)); } @@ -849,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 +static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) +{ + struct iwl_mvm_delba_notif notif = { + .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, + .metadata.sync = 1, + .delba.baid = baid, + }; + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); +}; + +static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, + struct iwl_mvm_baid_data *data) +{ + int i; + + iwl_mvm_sync_rxq_del_ba(mvm, data->baid); + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + int j; + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + + spin_lock_bh(&reorder_buf->lock); + if (likely(!reorder_buf->num_stored)) { + spin_unlock_bh(&reorder_buf->lock); + continue; + } + + /* + * This shouldn't happen in regular DELBA since the internal + * delBA notification should trigger a release of all frames in + * the reorder buffer. + */ + WARN_ON(1); + + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_purge(&reorder_buf->entries[j]); + /* + * Prevent timer re-arm. This prevents a very far fetched case + * where we timed out on the notification. There may be prior + * RX frames pending in the RX queue before the notification + * that might get processed between now and the actual deletion + * and we would re-arm the timer although we are deleting the + * reorder buffer. + */ + reorder_buf->removed = true; + spin_unlock_bh(&reorder_buf->lock); + del_timer_sync(&reorder_buf->reorder_timer); + } +} + +static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, + u32 sta_id, + struct iwl_mvm_baid_data *data, + u16 ssn, u8 buf_size) +{ + int i; + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + int j; + + reorder_buf->num_stored = 0; + reorder_buf->head_sn = ssn; + reorder_buf->buf_size = buf_size; + /* rx reorder timer */ + reorder_buf->reorder_timer.function = + iwl_mvm_reorder_timer_expired; + reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; + init_timer(&reorder_buf->reorder_timer); + spin_lock_init(&reorder_buf->lock); + reorder_buf->mvm = mvm; + reorder_buf->queue = i; + reorder_buf->sta_id = sta_id; + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_head_init(&reorder_buf->entries[j]); + } +} + int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size) + int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; + struct iwl_mvm_baid_data *baid_data = NULL; int ret; u32 status; @@ -864,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return -ENOSPC; } + if (iwl_mvm_has_new_rx_api(mvm) && start) { + /* + * Allocate here so if allocation fails we can bail out early + * before starting the BA session in the firmware + */ + baid_data = kzalloc(sizeof(*baid_data) + + mvm->trans->num_rx_queues * + sizeof(baid_data->reorder_buf[0]), + GFP_KERNEL); + if (!baid_data) + return -ENOMEM; + } + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; @@ -882,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) - return ret; + goto out_free; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: @@ -900,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - if (!ret) { - if (start) - mvm->rx_ba_sessions++; - else if (mvm->rx_ba_sessions > 0) - /* check that restart flow didn't zero the counter */ - mvm->rx_ba_sessions--; + if (ret) + goto out_free; + + if (start) { + u8 baid; + + mvm->rx_ba_sessions++; + + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { + ret = -EINVAL; + goto out_free; + } + baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> + IWL_ADD_STA_BAID_SHIFT); + baid_data->baid = baid; + baid_data->timeout = timeout; + baid_data->last_rx = jiffies; + init_timer(&baid_data->session_timer); + baid_data->session_timer.function = + iwl_mvm_rx_agg_session_expired; + baid_data->session_timer.data = + (unsigned long)&mvm->baid_map[baid]; + baid_data->mvm = mvm; + baid_data->tid = tid; + baid_data->sta_id = mvm_sta->sta_id; + + mvm_sta->tid_to_baid[tid] = baid; + if (timeout) + mod_timer(&baid_data->session_timer, + TU_TO_EXP_TIME(timeout * 2)); + + iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, + baid_data, ssn, buf_size); + /* + * protect the BA data with RCU to cover a case where our + * internal RX sync mechanism will timeout (not that it's + * supposed to happen) and we will free the session data while + * RX is being processed in parallel + */ + WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); + rcu_assign_pointer(mvm->baid_map[baid], baid_data); + } else if (mvm->rx_ba_sessions > 0) { + u8 baid = mvm_sta->tid_to_baid[tid]; + + /* check that restart flow didn't zero the counter */ + mvm->rx_ba_sessions--; + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) + return -EINVAL; + + baid_data = rcu_access_pointer(mvm->baid_map[baid]); + if (WARN_ON(!baid_data)) + return -EINVAL; + + /* synchronize all rx queues so we can safely delete */ + iwl_mvm_free_reorder(mvm, baid_data); + del_timer_sync(&baid_data->session_timer); + RCU_INIT_POINTER(mvm->baid_map[baid], NULL); + kfree_rcu(baid_data, rcu_head); } + return 0; +out_free: + kfree(baid_data); return ret; } @@ -925,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { - mvm_sta->tfd_queue_msk &= ~BIT(queue); + /* In DQA-mode the queue isn't removed on agg termination */ + if (!iwl_mvm_is_dqa_supported(mvm)) + mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -1008,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvm->queue_info_lock); - txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, - mvm->last_agg_queue); - if (txq_id < 0) { - ret = txq_id; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Failed to allocate agg queue\n"); - goto release_locks; + /* + * Note the possible cases: + * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed + * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free + * one and mark it as reserved + * 3. In DQA mode, but no traffic yet on this TID: same treatment as in + * non-DQA mode, since the TXQ hasn't yet been allocated + */ + txq_id = mvmsta->tid_data[tid].txq_id; + if (!iwl_mvm_is_dqa_supported(mvm) || + mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, + mvm->last_agg_queue); + if (txq_id < 0) { + ret = txq_id; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + goto release_locks; + } + + /* TXQ hasn't yet been enabled, so mark it only as reserved */ + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } - mvm->queue_info[txq_id].setup_reserved = true; spin_unlock_bh(&mvm->queue_info_lock); + IWL_DEBUG_TX_QUEUES(mvm, + "AGG for tid %d will be on queue #%d\n", + tid, txq_id); + tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; @@ -1053,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; + bool alloc_queue = true; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { @@ -1078,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], - ssn, &cfg, wdg_timeout); + /* In DQA mode, the existing queue might need to be reconfigured */ + if (iwl_mvm_is_dqa_supported(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { + /* + * If reconfiguring an existing queue, it first must be + * drained + */ + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } + + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; + } + } + } + + if (alloc_queue) + iwl_mvm_enable_txq(mvm, queue, + vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, + &cfg, wdg_timeout); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) @@ -1087,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].setup_reserved = false; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); /* @@ -1134,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - /* No need to mark as reserved anymore */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); switch (tid_data->state) { @@ -1162,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); + } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -1215,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); if (old_state >= IWL_AGG_ON) { @@ -1230,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, + tid, 0); + } } return 0; @@ -1285,6 +1854,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then @@ -1391,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_seq seq; const u8 *pn; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); + break; + default: + return -EINVAL; + } + memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1a8f69a41..d2c58f134 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -80,6 +80,60 @@ struct iwl_mvm; struct iwl_mvm_vif; /** + * DOC: DQA - Dynamic Queue Allocation -introduction + * + * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi + * driver to allow dynamic allocation of queues on-demand, rather than allocate + * them statically ahead of time. Ideally, we would like to allocate one queue + * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 + * even if it also needs to send traffic to a sleeping STA1, without being + * blocked by the sleeping station. + * + * Although the queues in DQA mode are dynamically allocated, there are still + * some queues that are statically allocated: + * TXQ #0 - command queue + * TXQ #1 - aux frames + * TXQ #2 - P2P device frames + * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames + * TXQ #4 - BSS DATA frames queue + * TXQ #5-8 - Non-QoS and MGMT frames queue pool + * TXQ #9 - P2P GO/SoftAP probe responses + * TXQ #10-31 - DATA frames queue pool + * The queues are dynamically taken from either the MGMT frames queue pool or + * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every + * queue. + * + * When a frame for a previously unseen RA/TID comes in, it needs to be deferred + * until a queue is allocated for it, and only then can be TXed. Therefore, it + * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called + * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. + * + * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT + * queues in the pool. If there is no longer a free MGMT queue to allocate, a + * queue will be allocated from the DATA pool instead. Since QoS NDPs can create + * a problem for aggregations, they too will use a MGMT queue. + * + * When adding a STA, a DATA queue is reserved for it so that it can TX from + * it. If no such free queue exists for reserving, the STA addition will fail. + * + * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a + * new RA/TID comes in for an existing STA, one of the STA's queues will become + * shared and will serve more than the single TID (but always for the same RA!). + * + * When a RA/TID needs to become aggregated, no new queue is required to be + * allocated, only mark the queue as aggregated via the ADD_STA command. Note, + * however, that a shared queue cannot be aggregated, and only after the other + * TIDs become inactive and are removed - only then can the queue be + * reconfigured and become aggregated. + * + * When removing a station, its queues are returned to the pool for reuse. Here + * we also need to make sure that we are synced with the worker thread that TXes + * the deferred frames so we don't get into a situation where the queues are + * removed and then the worker puts deferred frames onto the released queues or + * tries to allocate new queues for a STA we don't need anymore. + */ + +/** * DOC: station table - introduction * * The station table is a list of data structure that reprensent the stations. @@ -253,6 +307,7 @@ enum iwl_mvm_agg_state { /** * struct iwl_mvm_tid_data - holds the states for each RA / TID + * @deferred_tx_frames: deferred TX frames for this RA/TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -260,7 +315,7 @@ enum iwl_mvm_agg_state { * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. - * @txq_id: Tx queue used by the BA session + * @txq_id: Tx queue used by the BA session / DQA * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that @@ -268,6 +323,7 @@ enum iwl_mvm_agg_state { * @tx_time: medium time consumed by this A-MPDU */ struct iwl_mvm_tid_data { + struct sk_buff_head deferred_tx_frames; u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ @@ -292,6 +348,15 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +struct iwl_mvm_delba_data { + u32 baid; +} __packed; + +struct iwl_mvm_delba_notif { + struct iwl_mvm_internal_rxq_notif metadata; + struct iwl_mvm_delba_data delba; +} __packed; + /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection @@ -316,7 +381,11 @@ struct iwl_mvm_rxq_dup_data { * we need to signal the EOSP * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. - * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. + * @tid_to_baid: a simple map of TID to baid + * @reserved_queue: the queue reserved for this STA for DQA purposes + * Every STA has is given one reserved queue to allow it to operate. If no + * such queue can be guaranteed, the STA addition will fail. * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? @@ -329,6 +398,7 @@ struct iwl_mvm_rxq_dup_data { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data + * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -345,12 +415,17 @@ struct iwl_mvm_sta { bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; - struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; + struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; + u8 tid_to_baid[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; + u16 deferred_traffic_tid_map; + + u8 reserved_queue; + /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; @@ -378,8 +453,18 @@ struct iwl_mvm_int_sta { u32 tfd_queue_msk; }; +/** + * Send the STA info to the FW. + * + * @mvm: the iwl_mvm* to use + * @sta: the STA + * @update: this is true if the FW is being updated about a STA it already knows + * about. Otherwise (if this is a new STA), this should be false. + * @flags: if update==true, this marks what is being changed via ORs of values + * from enum iwl_sta_modify_flag. Otherwise, this is ignored. + */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update); + bool update, unsigned int flags); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -413,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size); + int tid, u16 ssn, bool start, u8 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -459,5 +544,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 18711c5de..9f160fc58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } if (chandef) { - cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? + cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5); cmd.ci.channel = chandef->chan->hw_value; cmd.ci.width = iwl_mvm_get_channel_width(chandef); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f1f28255a..58fc7b3c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) return; - /* - * We are now handling a temperature notification from the firmware - * in ASYNC and hold the mutex. thermal_notify_framework will call - * us back through get_temp() which ought to send a SYNC command to - * the firmware and hence to take the mutex. - * Avoid the deadlock by unlocking the mutex here. - */ if (mvm->tz_device.tzone) { struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; - mutex_unlock(&mvm->mutex); thermal_notify_framework(tz_dev->tzone, tz_dev->fw_trips_index[ths_crossed]); - mutex_lock(&mvm->mutex); } #endif /* CONFIG_THERMAL */ } @@ -368,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) { - struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int i, err; for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); + if (!mvmsta) continue; - mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (enable == mvmsta->tt_tx_protection) continue; err = iwl_mvm_tx_protection(mvm, mvmsta, enable); @@ -796,9 +785,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - return -EBUSY; - *state = mvm->cooling_dev.cur_state; return 0; @@ -813,9 +799,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) return -EIO; - if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) - return -EBUSY; - mutex_lock(&mvm->mutex); if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 34731e29c..779bafcbc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -67,6 +67,7 @@ #include <linux/etherdevice.h> #include <linux/tcp.h> #include <net/ip.h> +#include <net/ipv6.h> #include "iwl-trans.h" #include "iwl-eeprom-parse.h" @@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, addr, tid, ssn); } +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) + +static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd) +{ +#if IS_ENABLED(CONFIG_INET) + u16 mh_len = ieee80211_hdrlen(hdr->frame_control); + u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist); + u8 protocol = 0; + + /* + * Do not compute checksum if already computed or if transport will + * compute it + */ + if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) + return; + + /* We do not expect to be requested to csum stuff we do not support */ + if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || + (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)), + "No support for requested checksum\n")) { + skb_checksum_help(skb); + return; + } + + if (skb->protocol == htons(ETH_P_IP)) { + protocol = ip_hdr(skb)->protocol; + } else { +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ipv6h = + (struct ipv6hdr *)skb_network_header(skb); + unsigned int off = sizeof(*ipv6h); + + protocol = ipv6h->nexthdr; + while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { + /* only supported extension headers */ + if (protocol != NEXTHDR_ROUTING && + protocol != NEXTHDR_HOP && + protocol != NEXTHDR_DEST && + protocol != NEXTHDR_FRAGMENT) { + skb_checksum_help(skb); + return; + } + + if (protocol == NEXTHDR_FRAGMENT) { + struct frag_hdr *hp = + OPT_HDR(struct frag_hdr, skb, off); + + protocol = hp->nexthdr; + off += sizeof(struct frag_hdr); + } else { + struct ipv6_opt_hdr *hp = + OPT_HDR(struct ipv6_opt_hdr, skb, off); + + protocol = hp->nexthdr; + off += ipv6_optlen(hp); + } + } + /* if we get here - protocol now should be TCP/UDP */ +#endif + } + + if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { + WARN_ON_ONCE(1); + skb_checksum_help(skb); + return; + } + + /* enable L4 csum */ + offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); + + /* + * Set offset to IP header (snap). + * We don't support tunneling so no need to take care of inner header. + * Size is in words. + */ + offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); + + /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ + if (skb->protocol == htons(ETH_P_IP) && + (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { + ip_hdr(skb)->check = 0; + offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); + } + + /* reset UDP/TCP header csum */ + if (protocol == IPPROTO_TCP) + tcp_hdr(skb)->check = 0; + else + udp_hdr(skb)->check = 0; + + /* mac header len should include IV, size is in words */ + if (info->control.hw_key) + mh_len += info->control.hw_key->iv_len; + mh_len /= 2; + offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; + + tx_cmd->offload_assist = cpu_to_le16(offload_assist); +#endif +} + /* * Sets most of the Tx cmd's fields */ @@ -127,6 +233,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) + tx_cmd->offload_assist |= + cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)); } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); @@ -187,9 +296,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, /* Total # bytes to be transmitted */ tx_cmd->len = cpu_to_le16((u16)skb->len + (uintptr_t)skb_info->driver_data[0]); - tx_cmd->next_frame_len = 0; tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; + + /* padding is inserted later in transport */ + if (ieee80211_hdrlen(fc) % 4 && + !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU)))) + tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD)); + + iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd); } /* @@ -245,7 +360,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, &mvm->nvm_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ - if (info->band == IEEE80211_BAND_5GHZ) + if (info->band == NL80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* For 2.4 GHZ band, check that there is no need to remap */ @@ -258,7 +373,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), mvm->mgmt_last_antenna_idx); - if (info->band == IEEE80211_BAND_2GHZ && + if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; else @@ -360,6 +475,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, return dev_cmd; } +static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, __le16 fc) +{ + if (iwl_mvm_is_dqa_supported(mvm)) { + if (info->control.vif->type == NL80211_IFTYPE_AP && + ieee80211_is_probe_resp(fc)) + return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + else if (ieee80211_is_mgmt(fc) && + info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + } + + return info->hw_queue; +} + int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -369,6 +499,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct iwl_tx_cmd *tx_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int queue; memcpy(&info, skb->cb, sizeof(info)); @@ -393,6 +524,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info.control.vif->type == NL80211_IFTYPE_STATION) IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + queue = info.hw_queue; + /* * If the interface on which the frame is sent is the P2P_DEVICE * or an AP/GO interface use the broadcast station associated @@ -408,10 +541,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP) + info.control.vif->type == NL80211_IFTYPE_AP) { sta_id = mvmvif->bcast_sta.sta_id; - else if (info.control.vif->type == NL80211_IFTYPE_STATION && - is_multicast_ether_addr(hdr->addr1)) { + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, + hdr->frame_control); + } else if (info.control.vif->type == NL80211_IFTYPE_STATION && + is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); if (ap_sta_id != IWL_MVM_STATION_COUNT) @@ -419,7 +554,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } - IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue); + IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) @@ -430,7 +565,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) { + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } @@ -463,6 +598,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0; unsigned int dbg_max_amsdu_len; + netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 *qc, tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + @@ -473,15 +609,30 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; + dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || - !mvmsta->tlc_amsdu) { + (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) { num_subframes = 1; pad = 0; goto segment; } /* + * Do not build AMSDU for IPv6 with extension headers. + * ask stack to segment and checkum the generated MPDUs for us. + */ + if (skb->protocol == htons(ETH_P_IPV6) && + ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != + IPPROTO_TCP) { + num_subframes = 1; + pad = 0; + netdev_features &= ~NETIF_F_CSUM_MASK; + goto segment; + } + + /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ @@ -493,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } max_amsdu_len = sta->max_amsdu_len; - dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); /* the Tx FIFO to which this A-MSDU will be routed */ txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; @@ -507,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = min_t(unsigned int, max_amsdu_len, mvm->shared_mem_cfg.txfifo_size[txf] - 256); - if (dbg_max_amsdu_len) + if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, dbg_max_amsdu_len); @@ -575,7 +725,7 @@ segment: skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); - next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); + next = skb_gso_segment(skb, netdev_features); skb_shinfo(skb)->gso_size = mss; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; @@ -641,6 +791,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } #endif +static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, u8 tid, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 mac_queue = info->hw_queue; + struct sk_buff_head *deferred_tx_frames; + + lockdep_assert_held(&mvm_sta->lock); + + mvm_sta->deferred_traffic_tid_map |= BIT(tid); + set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); + + deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; + + skb_queue_tail(deferred_tx_frames, skb); + + /* + * The first deferred frame should've stopped the MAC queues, so we + * should never get a second deferred frame for the RA/TID. + */ + if (!WARN(skb_queue_len(deferred_tx_frames) != 1, + "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid, + skb_queue_len(deferred_tx_frames))) { + iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); + schedule_work(&mvm->add_stream_wk); + } +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -656,7 +835,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u8 txq_id = info->hw_queue; - bool is_data_qos = false, is_ampdu = false; + bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -697,8 +876,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); - is_data_qos = true; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; + } else if (iwl_mvm_is_dqa_supported(mvm) && + (ieee80211_is_qos_nullfunc(fc) || + ieee80211_is_nullfunc(fc))) { + /* + * nullfunc frames should go to the MGMT queue regardless of QOS + */ + tid = IWL_MAX_TID_COUNT; + txq_id = mvmsta->tid_data[tid].txq_id; } /* Copy MAC header from skb into command buffer */ @@ -719,18 +905,36 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->tid_data[tid].txq_id; } + if (iwl_mvm_is_dqa_supported(mvm)) { + if (unlikely(mvmsta->tid_data[tid].txq_id == + IEEE80211_INVAL_HW_QUEUE)) { + iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); + + /* + * The frame is now deferred, and the worker scheduled + * will re-allocate it, so we can free it for now. + */ + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); + return 0; + } + + txq_id = mvmsta->tid_data[tid].txq_id; + } + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; - if (is_data_qos && !ieee80211_has_morefrags(fc)) + if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); - if (txq_id < mvm->first_agg_queue) + /* Increase pending frames count if this isn't AMPDU */ + if (!is_ampdu) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -883,7 +1087,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status) #endif /* CONFIG_IWLWIFI_DEBUG */ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, - enum ieee80211_band band, + enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) @@ -978,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; bool is_ndp = false; + bool txq_agg = false; /* Is this TXQ aggregated */ __skb_queue_head_init(&skbs); @@ -1108,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); + txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON); + if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, @@ -1163,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... */ - if (txq_id >= mvm->first_agg_queue) + if (txq_agg) goto out; /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); + WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); /* If we have still frames for this STA nothing to do here */ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) @@ -1261,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); - struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int queue = SEQ_TO_QUEUE(sequence); - if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue)) + if (WARN_ON_ONCE(queue < mvm->first_agg_queue && + (!iwl_mvm_is_dqa_supported(mvm) || + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) return; if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) @@ -1273,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); - if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 53cdc5760..161b99efd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ - if (!(cmd->flags & CMD_ASYNC)) + if (!(cmd->flags & CMD_ASYNC)) { lockdep_assert_held(&mvm->mutex); + if (!(cmd->flags & CMD_SEND_IN_IDLE)) + iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); + } ret = iwl_trans_send_cmd(mvm->trans, cmd); + if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) + iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD); + /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if @@ -217,14 +223,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { }; int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, - enum ieee80211_band band) + enum nl80211_band band) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; int idx; int band_offset = 0; /* Legacy rate format, search for match in table */ - if (band == IEEE80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (fw_rate_idx_to_plcp[idx] == rate) @@ -491,98 +497,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) -{ - struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table_v1 table; - u32 base; - - base = mvm->error_event_table; - if (mvm->cur_ucode == IWL_UCODE_INIT) { - if (!base) - base = mvm->fw->init_errlog_ptr; - } else { - if (!base) - base = mvm->fw->inst_errlog_ptr; - } - - if (base < 0x800000) { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); - return; - } - - iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - - if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { - IWL_ERR(trans, "Start IWL Error Log Dump:\n"); - IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", - mvm->status, table.valid); - } - - /* Do not change this output - scripts rely on it */ - - IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); - - trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, - table.data1, table.data2, table.data3, - table.blink2, table.ilink1, table.ilink2, - table.bcon_time, table.gp1, table.gp2, - table.gp3, table.ucode_ver, 0, - table.hw_ver, table.brd_ver); - IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); - IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); - IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); - IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); - IWL_ERR(mvm, "0x%08X | data1\n", table.data1); - IWL_ERR(mvm, "0x%08X | data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | data3\n", table.data3); - IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); - IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); - IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); - IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); - IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); - IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); - IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); - IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); - IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); - IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); - IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); - IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); - IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); - IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); - IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); - IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); - IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); - IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); - IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); - IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); - IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); - IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); - - if (mvm->support_umac_log) - iwl_mvm_dump_umac_error_log(mvm); -} - void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; u32 base; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { - iwl_mvm_dump_nic_error_log_old(mvm); - return; - } - base = mvm->error_event_table; if (mvm->cur_ucode == IWL_UCODE_INIT) { if (!base) @@ -667,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].hw_queue_refcount == 0 && - !mvm->queue_info[i].setup_reserved) + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } +int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, + int tid, int frame_limit, u16 ssn) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 1, + .window = frame_limit, + .sta_id = sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = fifo, + .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), + .tid = tid, + }; + int ret; + + spin_lock_bh(&mvm->queue_info_lock); + if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, + "Trying to reconfig unallocated queue %d\n", queue)) { + spin_unlock_bh(&mvm->queue_info_lock); + return -ENXIO; + } + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", + queue, fifo, ret); + + return ret; +} + void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) @@ -694,6 +647,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_refcount++; if (mvm->queue_info[queue].hw_queue_refcount > 1) enable_queue = false; + else + mvm->queue_info[queue].ra_sta_id = cfg->sta_id; mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); IWL_DEBUG_TX_QUEUES(mvm, @@ -766,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_refcount--; cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; + if (!cmd.enable) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", @@ -779,6 +736,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, return; } + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].hw_queue_refcount || mvm->queue_info[queue].tid_bitmap || @@ -1079,3 +1038,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, out: ieee80211_connection_loss(vif); } + +int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, + enum iwl_lqm_cmd_operatrions operation, + u32 duration, u32 timeout) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_link_qual_msrmnt_cmd cmd = { + .cmd_operation = cpu_to_le32(operation), + .mac_id = cpu_to_le32(mvm_vif->id), + .measurement_time = cpu_to_le32(duration), + .timeout = cpu_to_le32(timeout), + }; + u32 cmdid = + iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0); + int ret; + + if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LQM_SUPPORT)) + return -EOPNOTSUPP; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return -EINVAL; + + switch (operation) { + case LQM_CMD_OPERATION_START_MEASUREMENT: + if (iwl_mvm_lqm_active(mvm_vif->mvm)) + return -EBUSY; + if (!vif->bss_conf.assoc) + return -EINVAL; + mvm_vif->lqm_active = true; + break; + case LQM_CMD_OPERATION_STOP_MEASUREMENT: + if (!iwl_mvm_lqm_active(mvm_vif->mvm)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd), + &cmd); + + /* command failed - roll back lqm_active state */ + if (ret) { + mvm_vif->lqm_active = + operation == LQM_CMD_OPERATION_STOP_MEASUREMENT; + } + + return ret; +} + +static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + bool *lqm_active = _data; + + *lqm_active = *lqm_active || mvm_vif->lqm_active; +} + +bool iwl_mvm_lqm_active(struct iwl_mvm *mvm) +{ + bool ret = false; + + lockdep_assert_held(&mvm->mutex); + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_lqm_active_iterator, &ret); + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 79d7cd7d4..a588b05e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -493,17 +493,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -593,6 +596,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; + const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int ret; @@ -620,6 +624,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) cfg = cfg_7265d; iwl_trans->cfg = cfg_7265d; } + + if (iwl_trans->cfg->rf_id) { + if (cfg == &iwl9260_2ac_cfg) + cfg_9260lc = &iwl9260lc_2ac_cfg; + if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { + cfg = cfg_9260lc; + iwl_trans->cfg = cfg_9260lc; + } + } #endif pci_set_drvdata(pdev, iwl_trans); @@ -661,10 +674,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* The PCI device starts with a reference taken and we are * supposed to release it here. But to simplify the * interaction with the opmode, we don't do it now, but let - * the opmode release it when it's ready. To account for this - * reference, we start with ref_count set to 1. + * the opmode release it when it's ready. */ - trans_pcie->ref_count = 1; return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index dadafbdef..de6974f9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -348,7 +348,7 @@ struct iwl_tso_hdr_page { struct iwl_trans_pcie { struct iwl_rxq *rxq; struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; - struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; + struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; @@ -403,10 +403,6 @@ struct iwl_trans_pcie { bool cmd_hold_nic_awake; bool ref_cmd_in_flight; - /* protect ref counter */ - spinlock_t ref_lock; - u32 ref_count; - dma_addr_t fw_mon_phys; struct page *fw_mon_page; u32 fw_mon_size; @@ -485,9 +481,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); -void iwl_trans_pcie_ref(struct iwl_trans *trans); -void iwl_trans_pcie_unref(struct iwl_trans *trans); - static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) { struct iwl_tfd_tb *tb = &tfd->tbs[idx]; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4be3c35af..0a4a3c502 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } -static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) +static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs, + u64 val) { - iwl_write_prph(trans, ofs, val & 0xffffffff); - iwl_write_prph(trans, ofs + 4, val >> 32); + iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); + iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); } /* @@ -208,10 +209,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, rxq->write_actual = round_down(rxq->write, 8); if (trans->cfg->mq_rx_supported) - iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), - rxq->write_actual); - else - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), + rxq->write_actual); + /* + * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to + * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will + * not wake the NIC. + */ + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) @@ -694,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; + unsigned long flags; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ switch (trans_pcie->rx_buf_size) { @@ -711,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; } + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + /* Stop Rx DMA */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* reset and flush pointers */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); /* Reset driver's Rx queue write index */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->bd_dma >> 8)); + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, - rxq->rb_stts_dma >> 4); + iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); /* Enable Rx DMA * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in @@ -737,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) * RB timeout 0x10 * 256 RBDs */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | - FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - rb_size| - (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| - (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + rb_size | + (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); @@ -757,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size, enabled = 0; + unsigned long flags; int i; switch (trans_pcie->rx_buf_size) { @@ -774,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) rb_size = RFH_RXF_DMA_RB_SIZE_4K; } + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + /* Stop Rx DMA */ - iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); /* disable free amd used rx queue operation */ - iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ - iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), - (u64)(trans_pcie->rxq[i].bd_dma)); + iwl_pcie_write_prph_64_no_grab(trans, + RFH_Q_FRBDCB_BA_LSB(i), + trans_pcie->rxq[i].bd_dma); /* Tell device where to find RBD used table in DRAM */ - iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), - (u64)(trans_pcie->rxq[i].used_bd_dma)); + iwl_pcie_write_prph_64_no_grab(trans, + RFH_Q_URBDCB_BA_LSB(i), + trans_pcie->rxq[i].used_bd_dma); /* Tell device where in DRAM to update its Rx status */ - iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), - trans_pcie->rxq[i].rb_stts_dma); + iwl_pcie_write_prph_64_no_grab(trans, + RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ - iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); - iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); - iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); enabled |= BIT(i) | BIT(i + 16); } @@ -808,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) * Drop frames that exceed RB size * 512 RBDs */ - iwl_write_prph(trans, RFH_RXF_DMA_CFG, - RFH_DMA_EN_ENABLE_VAL | - rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | - RFH_RXF_DMA_MIN_RB_4_8 | - RFH_RXF_DMA_DROP_TOO_LARGE_MASK | - RFH_RXF_DMA_RBDCB_SIZE_512); + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | + rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_DROP_TOO_LARGE_MASK | + RFH_RXF_DMA_RBDCB_SIZE_512); /* * Activate DMA snooping. * Set RX DMA chunk size to 64B * Default queue is 0 */ - iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | - (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | - RFH_GEN_CFG_SERVICE_DMA_SNOOP); + iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | + (DEFAULT_RXQ_NUM << + RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | + RFH_GEN_CFG_SERVICE_DMA_SNOOP); /* Enable the relevant rx queues */ - iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); + + iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); @@ -908,6 +929,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; + BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != + ARRAY_SIZE(trans_pcie->rx_pool)); for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; @@ -1292,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, * write 1 clear (W1C) register, meaning that it's being clear * by writing 1 to the bit. */ - iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); + iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); } /* @@ -1805,19 +1828,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; - struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta_fh, inta_hw; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock(&trans_pcie->irq_lock); - inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD); - inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); + inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); /* * Clear causes registers to avoid being handling the same cause. */ - iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); - iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); + iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock(&trans_pcie->irq_lock); if (unlikely(!(inta_fh | inta_hw))) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b2b79354d..f603d7830 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -269,9 +269,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ - if (trans->cfg->base_params->pll_cfg_val) - iwl_set_bit(trans, CSR_ANA_PLL_CFG, - trans->cfg->base_params->pll_cfg_val); + if (trans->cfg->base_params->pll_cfg) + iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); /* * Set "initialization complete" bit to move adapter from @@ -361,8 +360,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); + usleep_range(1000, 2000); /* * Set "initialization complete" bit to move adapter from @@ -408,8 +406,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * SHRD_HW_RST). Turn MAC off before proceeding. */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); + usleep_range(1000, 2000); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); @@ -506,8 +503,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) /* Reset the entire device */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - udelay(10); + usleep_range(1000, 2000); /* * Clear "initialization complete" bit to move adapter from @@ -586,7 +582,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); - msleep(1); + usleep_range(1000, 2000); for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ @@ -1074,7 +1070,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) /* stop and reset the on-board processor */ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - udelay(20); + usleep_range(1000, 2000); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. @@ -1321,6 +1317,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, * after this call. */ iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); @@ -1434,7 +1431,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, int ret, i; if (trans->cfg->mq_rx_supported) { - max_vector = min_t(u32, (num_possible_cpus() + 1), + max_vector = min_t(u32, (num_possible_cpus() + 2), IWL_MAX_RX_HW_QUEUES); for (i = 0; i < max_vector; i++) trans_pcie->msix_entries[i].entry = i; @@ -1465,7 +1462,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, ret = pci_enable_msi(pdev); if (ret) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); + dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { @@ -1499,8 +1496,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); for (j = 0; j < i; j++) - free_irq(trans_pcie->msix_entries[i].vector, - &trans_pcie->msix_entries[i]); + free_irq(trans_pcie->msix_entries[j].vector, + &trans_pcie->msix_entries[j]); pci_disable_msix(pdev); return ret; } @@ -1525,8 +1522,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* Reset the entire device */ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - - usleep_range(10, 15); + usleep_range(1000, 2000); iwl_pcie_apm_init(trans); @@ -1694,6 +1690,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) } free_percpu(trans_pcie->tso_hdr_page); + mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } @@ -1948,7 +1945,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) "WR pointer moved while flushing %d -> %d\n", wr_ptr, write_ptr)) return -ETIMEDOUT; - msleep(1); + usleep_range(1000, 2000); } if (q->read_ptr != q->write_ptr) { @@ -2011,41 +2008,35 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } -void iwl_trans_pcie_ref(struct iwl_trans *trans) +static void iwl_trans_pcie_ref(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; if (iwlwifi_mod_params.d0i3_disable) return; - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - trans_pcie->ref_count++; pm_runtime_get(&trans_pcie->pci_dev->dev); - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); + +#ifdef CONFIG_PM + IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", + atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); +#endif /* CONFIG_PM */ } -void iwl_trans_pcie_unref(struct iwl_trans *trans) +static void iwl_trans_pcie_unref(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; if (iwlwifi_mod_params.d0i3_disable) return; - spin_lock_irqsave(&trans_pcie->ref_lock, flags); - IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); - if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) { - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); - return; - } - trans_pcie->ref_count--; - pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); - spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); +#ifdef CONFIG_PM + IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", + atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); +#endif /* CONFIG_PM */ } static const char *get_csr_string(int cmd) @@ -2793,7 +2784,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); - spin_lock_init(&trans_pcie->ref_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); @@ -2912,6 +2902,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } + trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); + iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 16ad820ca..d6beac9af 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -32,6 +32,7 @@ #include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/pm_runtime.h> #include <net/ip6_checksum.h> #include <net/tso.h> @@ -596,6 +597,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb) } } +static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); + iwl_trans_unref(trans); + } + + if (!trans->cfg->base_params->apmg_wake_up_wa) + return; + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return; + + trans_pcie->cmd_hold_nic_awake = false; + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} + /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ @@ -620,6 +643,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) } iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); + + if (q->read_ptr == q->write_ptr) { + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (txq_id != trans_pcie->cmd_queue) { + IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", + q->id); + iwl_trans_unref(trans); + } else { + iwl_pcie_clear_cmd_in_flight(trans); + } + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } } txq->active = false; @@ -1098,7 +1135,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (q->read_ptr == q->write_ptr) { IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); - iwl_trans_pcie_unref(trans); + iwl_trans_unref(trans); } out: @@ -1117,7 +1154,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, !trans_pcie->ref_cmd_in_flight) { trans_pcie->ref_cmd_in_flight = true; IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_pcie_ref(trans); + iwl_trans_ref(trans); } /* @@ -1148,29 +1185,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, return 0; } -static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - lockdep_assert_held(&trans_pcie->reg_lock); - - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_pcie_unref(trans); - } - - if (trans->cfg->base_params->apmg_wake_up_wa) { - if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) - return 0; - - trans_pcie->cmd_hold_nic_awake = false; - __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - } - return 0; -} - /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * @@ -1786,6 +1800,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); + if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + pm_runtime_active(&trans_pcie->pci_dev->dev), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); + return -ETIMEDOUT; + } + } + cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; @@ -2197,6 +2221,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, __le16 fc; u8 hdr_len; u16 wifi_seq; + bool amsdu; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -2288,11 +2313,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; - tb1_len = ALIGN(len, 4); - - /* Tell NIC about any 2-byte padding after MAC header */ - if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + /* do not align A-MSDU to dword as the subframe header aligns it */ + amsdu = ieee80211_is_data_qos(fc) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + if (trans_pcie->sw_csum_tx || !amsdu) { + tb1_len = ALIGN(len, 4); + /* Tell NIC about any 2-byte padding after MAC header */ + if (tb1_len != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + } else { + tb1_len = len; + } /* The first TB points to the scratchbuf data - min_copy bytes */ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, @@ -2310,8 +2342,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - if (ieee80211_is_data_qos(fc) && - (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) { + if (amsdu) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) @@ -2342,7 +2373,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, txq->frozen_expiry_remainder = txq->wd_timeout; } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); - iwl_trans_pcie_ref(trans); + iwl_trans_ref(trans); } /* Tell device the write index *just past* this latest filled TFD */ |