summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/net/wireless/iwlwifi/pcie
Initial import
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c671
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h523
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c1235
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c2604
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1906
5 files changed, 6939 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
new file mode 100644
index 000000000..b18569734
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -0,0 +1,671 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
+
+#include "iwl-trans.h"
+#include "iwl-drv.h"
+#include "internal.h"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static const struct pci_device_id iwl_hw_card_ids[] = {
+#if IS_ENABLED(CONFIG_IWLDVM)
+ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+/* 6x00 Series */
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+/* 6x05 Series */
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+/* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+
+/* 6x50 WiFi/WiMax Series */
+ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+
+/* 6150 WiFi/WiMax Series */
+ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+
+/* 1000 Series WiFi */
+ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+
+/* 100 Series WiFi */
+ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+
+/* 130 Series WiFi */
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+
+/* 2x00 Series */
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+/* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+
+/* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+
+/* 105 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+/* 135 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+#endif /* CONFIG_IWLDVM */
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+/* 7260 Series */
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+
+/* 3165 Series */
+ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
+#endif /* CONFIG_IWLMVM */
+
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure\n");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element\n");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_DEBUG_INFO(trans,
+ "Could not retrieve root port ACPI handle\n");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ struct iwl_trans *iwl_trans;
+ struct iwl_trans_pcie *trans_pcie;
+ int ret;
+
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
+ if (IS_ERR(iwl_trans))
+ return PTR_ERR(iwl_trans);
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+ /*
+ * special-case 7265D, it has the same PCI IDs.
+ *
+ * Note that because we already pass the cfg to the transport above,
+ * all the parameters that the transport uses must, until that is
+ * changed, be identical to the ones in the 7265D configuration.
+ */
+ if (cfg == &iwl7265_2ac_cfg)
+ cfg_7265d = &iwl7265d_2ac_cfg;
+ else if (cfg == &iwl7265_2n_cfg)
+ cfg_7265d = &iwl7265d_2n_cfg;
+ else if (cfg == &iwl7265_n_cfg)
+ cfg_7265d = &iwl7265d_n_cfg;
+ if (cfg_7265d &&
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
+ cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
+ }
+#endif
+
+ pci_set_drvdata(pdev, iwl_trans);
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
+
+ if (IS_ERR(trans_pcie->drv)) {
+ ret = PTR_ERR(trans_pcie->drv);
+ goto out_free_trans;
+ }
+
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
+ /* register transport layer debugfs here */
+ ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
+ if (ret)
+ goto out_free_drv;
+
+ return 0;
+
+out_free_drv:
+ iwl_drv_stop(trans_pcie->drv);
+out_free_trans:
+ iwl_trans_pcie_free(iwl_trans);
+ return ret;
+}
+
+static void iwl_pci_remove(struct pci_dev *pdev)
+{
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_drv_stop(trans_pcie->drv);
+ iwl_trans_pcie_free(trans);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwl_pci_suspend(struct device *device)
+{
+ /* Before you put code here, think about WoWLAN. You cannot check here
+ * whether WoWLAN is enabled or not, and your code will run even if
+ * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
+ */
+
+ return 0;
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ bool hw_rfkill;
+
+ /* Before you put code here, think about WoWLAN. You cannot check here
+ * whether WoWLAN is enabled or not, and your code will run even if
+ * WoWLAN is enabled - the NIC may be alive.
+ */
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ if (!trans->op_mode)
+ return 0;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+
+#define IWL_PM_OPS (&iwl_dev_pm_ops)
+
+#else
+
+#define IWL_PM_OPS NULL
+
+#endif
+
+static struct pci_driver iwl_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = iwl_hw_card_ids,
+ .probe = iwl_pci_probe,
+ .remove = iwl_pci_remove,
+ .driver.pm = IWL_PM_OPS,
+};
+
+int __must_check iwl_pci_register_driver(void)
+{
+ int ret;
+ ret = pci_register_driver(&iwl_pci_driver);
+ if (ret)
+ pr_err("Unable to initialize PCI module\n");
+
+ return ret;
+}
+
+void iwl_pci_unregister_driver(void)
+{
+ pci_unregister_driver(&iwl_pci_driver);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
new file mode 100644
index 000000000..376b84e54
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-op-mode.h"
+
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+/**
+ * struct isr_statistics - interrupt statistics
+ *
+ */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 tx;
+ u32 unhandled;
+};
+
+/**
+ * struct iwl_rxq - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @write_actual:
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rxq {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ bool need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ */
+static inline int iwl_queue_inc_wrap(int index)
+{
+ return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ */
+static inline int iwl_queue_dec_wrap(int index)
+{
+ return --index & (TFD_QUEUE_SIZE_MAX - 1);
+}
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+ u32 flags;
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
+ * the software buffers (in the variables @meta, @txb in struct
+ * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
+ * the same struct) have 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_queue {
+ int write_ptr; /* 1-st empty entry (index) host_w*/
+ int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_window; /* safe queue window */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+/*
+ * The FH will write back to the first TB only, so we need
+ * to copy some data into the buffer regardless of whether
+ * it should be mapped or not. This indicates how big the
+ * first TB must be to include the scratch buffer. Since
+ * the scratch is 4 bytes at offset 12, it's 16 now. If we
+ * make it bigger then allocations will be bigger and copy
+ * slower, so that's probably not useful.
+ */
+#define IWL_HCMD_SCRATCHBUF_SIZE 16
+
+struct iwl_pcie_txq_entry {
+ struct iwl_device_cmd *cmd;
+ struct sk_buff *skb;
+ /* buffer to free after command completes */
+ const void *free_buf;
+ struct iwl_cmd_meta meta;
+};
+
+struct iwl_pcie_txq_scratch_buf {
+ struct iwl_cmd_header hdr;
+ u8 buf[8];
+ __le32 scratch;
+};
+
+/**
+ * struct iwl_txq - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @tfds: transmit frame descriptors (DMA memory)
+ * @scratchbufs: start of command headers, including scratch buffers, for
+ * the writeback -- this is DMA memory and an array holding one buffer
+ * for each command on the queue
+ * @scratchbufs_dma: DMA address for the scratchbufs start
+ * @entries: transmit entries (driver state)
+ * @lock: queue lock
+ * @stuck_timer: timer that fires if queue gets stuck
+ * @trans_pcie: pointer back to transport (for timer)
+ * @need_update: indicates need to update read/write index
+ * @active: stores if queue is active
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
+ * @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+struct iwl_txq {
+ struct iwl_queue q;
+ struct iwl_tfd *tfds;
+ struct iwl_pcie_txq_scratch_buf *scratchbufs;
+ dma_addr_t scratchbufs_dma;
+ struct iwl_pcie_txq_entry *entries;
+ spinlock_t lock;
+ unsigned long frozen_expiry_remainder;
+ struct timer_list stuck_timer;
+ struct iwl_trans_pcie *trans_pcie;
+ bool need_update;
+ bool frozen;
+ u8 active;
+ bool ampdu;
+ unsigned long wd_timeout;
+};
+
+static inline dma_addr_t
+iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->scratchbufs_dma +
+ sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
+}
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_replenish: work that will be called when buffers need to be allocated
+ * @drv - pointer to iwl_drv
+ * @trans: pointer to the generic transport area
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
+ * @pci_dev: basic pci-network driver stuff
+ * @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @cmd_queue - command queue number
+ * @rx_buf_size_8k: 8 kB RX buffer size
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @rx_page_order: page order for receive buffer size
+ * @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
+ * @fw_mon_phys: physical address of the buffer for the firmware monitor
+ * @fw_mon_page: points to the first page of the buffer for the firmware monitor
+ * @fw_mon_size: size of the buffer for the firmware monitor
+ */
+struct iwl_trans_pcie {
+ struct iwl_rxq rxq;
+ struct work_struct rx_replenish;
+ struct iwl_trans *trans;
+ struct iwl_drv *drv;
+
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ dma_addr_t ict_tbl_dma;
+ int ict_index;
+ bool use_ict;
+ struct isr_statistics isr_stats;
+
+ spinlock_t irq_lock;
+ u32 inta_mask;
+ u32 scd_base_addr;
+ struct iwl_dma_ptr scd_bc_tbls;
+ struct iwl_dma_ptr kw;
+
+ struct iwl_txq *txq;
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+
+ /* PCI bus related data */
+ struct pci_dev *pci_dev;
+ void __iomem *hw_base;
+
+ bool ucode_write_complete;
+ wait_queue_head_t ucode_write_waitq;
+ wait_queue_head_t wait_command_queue;
+
+ u8 cmd_queue;
+ u8 cmd_fifo;
+ unsigned int cmd_q_wdg_timeout;
+ u8 n_no_reclaim_cmds;
+ u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+
+ bool rx_buf_size_8k;
+ bool bc_table_dword;
+ bool scd_set_active;
+ u32 rx_page_order;
+
+ const char *const *command_names;
+
+ /*protect hw register */
+ spinlock_t reg_lock;
+ bool cmd_hold_nic_awake;
+ bool ref_cmd_in_flight;
+
+ /* protect ref counter */
+ spinlock_t ref_lock;
+ u32 ref_count;
+
+ dma_addr_t fw_mon_phys;
+ struct page *fw_mon_page;
+ u32 fw_mon_size;
+};
+
+#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+ ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
+
+static inline struct iwl_trans *
+iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
+{
+ return container_of((void *)trans_pcie, struct iwl_trans,
+ trans_specific);
+}
+
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ * Other functions: iwl_pcie_XXX
+ */
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ const struct iwl_cfg *cfg);
+void iwl_trans_pcie_free(struct iwl_trans *trans);
+
+/*****************************************************
+* RX
+******************************************************/
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
+
+/*****************************************************
+* ICT - interrupt handling
+******************************************************/
+irqreturn_t iwl_pcie_isr(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout);
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd);
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status);
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+
+void iwl_trans_pcie_ref(struct iwl_trans *trans);
+void iwl_trans_pcie_unref(struct iwl_trans *trans);
+
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+/*****************************************************
+* Error handling
+******************************************************/
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(trans, CSR_INT, 0xffffffff);
+ iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+ IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ }
+}
+
+static inline void iwl_stop_queue(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ } else
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->q.id);
+}
+
+static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ?
+ (i >= q->read_ptr && i < q->write_ptr) :
+ !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
+ u8 cmd)
+{
+ if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
+ return "UNKNOWN";
+ return trans_pcie->command_names[cmd];
+}
+
+static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+{
+ return !(iwl_read32(trans, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+}
+
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
new file mode 100644
index 000000000..7ff69c642
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -0,0 +1,1235 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "iwl-op-mode.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rxq_alloc() Allocates rx_free
+ * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_pcie_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_pcie_rxq_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
+ */
+static int iwl_rxq_space(const struct iwl_rxq *rxq)
+{
+ /* Make sure RX_QUEUE_SIZE is a power of 2 */
+ BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
+}
+
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
+ */
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ u32 reg;
+
+ lockdep_assert_held(&rxq->lock);
+
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ rxq->need_update = true;
+ return;
+ }
+ }
+
+ rxq->write_actual = round_down(rxq->write, 8);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
+
+ exit_unlock:
+ spin_unlock(&rxq->lock);
+}
+
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rx_mem_buffer *rxb;
+
+ /*
+ * If the device isn't enabled - not need to try to add buffers...
+ * This can happen when we stop the device and still have an interrupt
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
+ * So don't try to restock if the APM has been already stopped.
+ */
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return;
+
+ spin_lock(&rxq->lock);
+ while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock(&rxq->lock);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ schedule_work(&trans_pcie->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
+ }
+}
+
+/*
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
+ *
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
+ */
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock(&rxq->lock);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock(&rxq->lock);
+ return;
+ }
+ spin_unlock(&rxq->lock);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+ "order: %d\n",
+ trans_pcie->rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock(&rxq->lock);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock(&rxq->lock);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
+ rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
+ spin_unlock(&rxq->lock);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ spin_lock(&rxq->lock);
+ list_add(&rxb->list, &rxq->rx_used);
+ spin_unlock(&rxq->lock);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock(&rxq->lock);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+
+ spin_unlock(&rxq->lock);
+ }
+}
+
+static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ int i;
+
+ lockdep_assert_held(&rxq->lock);
+
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ if (!rxq->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+}
+
+/*
+ * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
+ *
+ * When moving to rx_free an page is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_pcie_rxq_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+{
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
+
+ iwl_pcie_rxq_restock(trans);
+}
+
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
+
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+}
+
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct device *dev = trans->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+ if (trans_pcie->rx_buf_size_8k)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ /* reset and flush pointers */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size|
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /* W/A for interrupt coalescing bug in 7260 and 3160 */
+ if (trans->cfg->host_interrupt_operation_mode)
+ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+}
+
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+{
+ int i;
+
+ lockdep_assert_held(&rxq->lock);
+
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ rxq->free_count = 0;
+
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add(&rxq->pool[i].list, &rxq->rx_used);
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ int i, err;
+
+ if (!rxq->bd) {
+ err = iwl_pcie_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock(&rxq->lock);
+
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_rxq_free_rbs(trans);
+ iwl_pcie_rx_init_rxb_lists(rxq);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ spin_unlock(&rxq->lock);
+
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+
+ iwl_pcie_rx_hw_init(trans, rxq);
+
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
+
+ return 0;
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_free_rbs(trans);
+ spin_unlock(&rxq->lock);
+
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+}
+
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ bool page_stolen = false;
+ int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ u32 offset = 0;
+
+ if (WARN_ON(!rxb))
+ return;
+
+ dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
+
+ while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
+ struct iwl_rx_packet *pkt;
+ struct iwl_device_cmd *cmd;
+ u16 sequence;
+ bool reclaim;
+ int index, cmd_index, err, len;
+ struct iwl_rx_cmd_buffer rxcb = {
+ ._offset = offset,
+ ._rx_page_order = trans_pcie->rx_page_order,
+ ._page = rxb->page,
+ ._page_stolen = false,
+ .truesize = max_len,
+ };
+
+ pkt = rxb_addr(&rxcb);
+
+ if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
+ break;
+
+ IWL_DEBUG_RX(trans,
+ "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ rxcb._offset,
+ get_cmd_string(trans_pcie, pkt->hdr.cmd),
+ pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+
+ len = iwl_rx_packet_len(pkt);
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+ trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+ if (reclaim) {
+ int i;
+
+ for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+ if (trans_pcie->no_reclaim_cmds[i] ==
+ pkt->hdr.cmd) {
+ reclaim = false;
+ break;
+ }
+ }
+ }
+
+ sequence = le16_to_cpu(pkt->hdr.sequence);
+ index = SEQ_TO_INDEX(sequence);
+ cmd_index = get_cmd_index(&txq->q, index);
+
+ if (reclaim)
+ cmd = txq->entries[cmd_index].cmd;
+ else
+ cmd = NULL;
+
+ err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+
+ if (reclaim) {
+ kzfree(txq->entries[cmd_index].free_buf);
+ txq->entries[cmd_index].free_buf = NULL;
+ }
+
+ /*
+ * After here, we should always check rxcb._page_stolen,
+ * if it is true then one of the handlers took the page.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * iwl_trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (!rxcb._page_stolen)
+ iwl_pcie_hcmd_complete(trans, &rxcb, err);
+ else
+ IWL_WARN(trans, "Claim null rxb?\n");
+ }
+
+ page_stolen |= rxcb._page_stolen;
+ offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+ }
+
+ /* page was stolen from us -- free our reference */
+ if (page_stolen) {
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ dma_map_page(trans->dev, rxb->page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ /*
+ * free the page(s) as well to not break
+ * the invariant that the items on the used
+ * list have no page(s)
+ */
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+}
+
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
+ */
+static void iwl_pcie_rx_handle(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ u32 r, i;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+restart:
+ spin_lock(&rxq->lock);
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ struct iwl_rx_mem_buffer *rxb;
+
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+
+ IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
+ r, i, rxb);
+ iwl_pcie_rx_handle_rb(trans, rxb);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ count = 0;
+ goto restart;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+
+ if (fill_rx)
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+ else
+ iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
+}
+
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+ if (trans->cfg->internal_wimax_coex &&
+ (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ iwl_op_mode_wimax_active(trans->op_mode);
+ wake_up(&trans_pcie->wait_command_queue);
+ return;
+ }
+
+ iwl_pcie_dump_csr(trans);
+ iwl_dump_fh(trans, NULL);
+
+ local_bh_disable();
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
+ local_bh_enable();
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ u32 inta;
+
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
+}
+
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+{
+ struct iwl_trans *trans = dev_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+ u32 inta = 0;
+ u32 handled = 0;
+
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
+
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
+ inta, iwl_read32(trans, CSR_INT_MASK));
+
+ spin_unlock(&trans_pcie->irq_lock);
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+ isr_stats->hw++;
+ iwl_pcie_irq_handle_error(trans);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ goto out;
+ }
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
+ isr_stats->sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ }
+ }
+
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ bool hw_rfkill;
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rfkill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL, &trans->status);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(trans, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
+
+ isr_stats->wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(trans, CSR_FH_INT_STATUS,
+ CSR_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(trans,
+ CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * the device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this;
+ * periodic interrupt will detect any dangling Rx activity.
+ */
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write8(trans, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ isr_stats->unhandled++;
+ }
+
+ if (inta & ~(trans_pcie->inta_mask)) {
+ IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~trans_pcie->inta_mask);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+
+/* Free dram table */
+void iwl_pcie_free_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->ict_tbl) {
+ dma_free_coherent(trans->dev, ICT_SIZE,
+ trans_pcie->ict_tbl,
+ trans_pcie->ict_tbl_dma);
+ trans_pcie->ict_tbl = NULL;
+ trans_pcie->ict_tbl_dma = 0;
+ }
+}
+
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->ict_tbl =
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->ict_tbl)
+ return -ENOMEM;
+
+ /* just an API sanity check ... it is guaranteed to be aligned */
+ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+ iwl_pcie_free_ict(trans);
+ return -EINVAL;
+ }
+
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
+
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 val;
+
+ if (!trans_pcie->ict_tbl)
+ return;
+
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_disable_interrupts(trans);
+
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+
+ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+
+ iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
+ trans_pcie->use_ict = true;
+ trans_pcie->ict_index = 0;
+ iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock(&trans_pcie->irq_lock);
+ trans_pcie->use_ict = false;
+ spin_unlock(&trans_pcie->irq_lock);
+}
+
+irqreturn_t iwl_pcie_isr(int irq, void *data)
+{
+ struct iwl_trans *trans = data;
+
+ if (!trans)
+ return IRQ_NONE;
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+ return IRQ_WAKE_THREAD;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
new file mode 100644
index 000000000..dc179094e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -0,0 +1,2604 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-scd.h"
+#include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
+#include "internal.h"
+#include "iwl-fh.h"
+
+/* extended range in FW SRAM */
+#define IWL_FW_MEM_EXTENDED_START 0x40000
+#define IWL_FW_MEM_EXTENDED_END 0x57FFF
+
+static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->fw_mon_page)
+ return;
+
+ dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
+ __free_pages(trans_pcie->fw_mon_page,
+ get_order(trans_pcie->fw_mon_size));
+ trans_pcie->fw_mon_page = NULL;
+ trans_pcie->fw_mon_phys = 0;
+ trans_pcie->fw_mon_size = 0;
+}
+
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page *page = NULL;
+ dma_addr_t phys;
+ u32 size;
+ u8 power;
+
+ if (trans_pcie->fw_mon_page) {
+ dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ return;
+ }
+
+ phys = 0;
+ for (power = 26; power >= 11; power--) {
+ int order;
+
+ size = BIT(power);
+ order = get_order(size);
+ page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
+ order);
+ if (!page)
+ continue;
+
+ phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, phys)) {
+ __free_pages(page, order);
+ page = NULL;
+ continue;
+ }
+ IWL_INFO(trans,
+ "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
+ size, order);
+ break;
+ }
+
+ if (WARN_ON_ONCE(!page))
+ return;
+
+ trans_pcie->fw_mon_page = page;
+ trans_pcie->fw_mon_phys = phys;
+ trans_pcie->fw_mon_size = size;
+}
+
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
+}
+
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+{
+ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ else
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+static void iwl_pcie_apm_config(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 lctl;
+ u16 cap;
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ else
+ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+ dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+ trans->ltr_enabled ? "En" : "Dis");
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
+{
+ int ret = 0;
+ IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwl_pcie_apm_config(trans);
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (trans->cfg->base_params->pll_cfg_val)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+ trans->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+ goto out;
+ }
+
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
+ /*
+ * Enable DMA clock and wait for it to stabilize.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+out:
+ return ret;
+}
+
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is possible.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (WARN_ON(ret < 0)) {
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ /*
+ * Reset entire device again - do controller reset (results in
+ * SHRD_HW_RST). Turn MAC off before proceeding.
+ */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
+static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret < 0)
+ IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(trans, "stop master\n");
+
+ return ret;
+}
+
+static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+ IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+ if (op_mode_leave) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ iwl_pcie_apm_init(trans);
+
+ /* inform ME that we are leaving */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_WAKE_ME);
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE |
+ CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(5);
+ }
+
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+ /* Stop device's DMA activity */
+ iwl_pcie_apm_stop_master(trans);
+
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* nic_init */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_apm_init(trans);
+
+ spin_unlock(&trans_pcie->irq_lock);
+
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
+
+ iwl_op_mode_nic_config(trans->op_mode);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ iwl_pcie_rx_init(trans);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_pcie_tx_init(trans))
+ return -ENOMEM;
+
+ if (trans->cfg->base_params->shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+ }
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+{
+ int ret;
+
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+
+ if (ret >= 0)
+ iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+
+ IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+{
+ int ret;
+ int t = 0;
+ int iter;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_pcie_set_hw_ready(trans);
+ /* If the card is ready, exit 0 */
+ if (ret >= 0)
+ return 0;
+
+ for (iter = 0; iter < 10; iter++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
+
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
+
+ IWL_ERR(trans, "Couldn't prepare the card\n");
+
+ return ret;
+}
+
+/*
+ * ucode
+ */
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+ dma_addr_t phy_addr, u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(trans,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(trans,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+ trans_pcie->ucode_write_complete, 5 * HZ);
+ if (!ret) {
+ IWL_ERR(trans, "Failed to load firmware chunk!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
+{
+ u8 *v_addr;
+ dma_addr_t p_addr;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
+ int ret = 0;
+
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
+
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!v_addr) {
+ IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+ chunk_sz = PAGE_SIZE;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+ &p_addr, GFP_KERNEL);
+ if (!v_addr)
+ return -ENOMEM;
+ }
+
+ for (offset = 0; offset < section->len; offset += chunk_sz) {
+ u32 copy_size, dst_addr;
+ bool extended_addr = false;
+
+ copy_size = min_t(u32, chunk_sz, section->len - offset);
+ dst_addr = section->offset + offset;
+
+ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWL_FW_MEM_EXTENDED_END)
+ extended_addr = true;
+
+ if (extended_addr)
+ iwl_set_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+ memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+ copy_size);
+
+ if (extended_addr)
+ iwl_clear_bits_prph(trans, LMPM_CHICK,
+ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+ if (ret) {
+ IWL_ERR(trans,
+ "Could not load the [%d] uCode section\n",
+ section_num);
+ break;
+ }
+ }
+
+ dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
+ return ret;
+}
+
+/*
+ * Driver Takes the ownership on secure machine before FW load
+ * and prevent race with the BT load.
+ * W/A for ROM bug. (should be remove in the next Si step)
+ */
+static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+{
+ u32 val, loop = 1000;
+
+ /*
+ * Check the RSA semaphore is accessible.
+ * If the HW isn't locked and the rsa semaphore isn't accessible,
+ * we are in trouble.
+ */
+ val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ if (val & (BIT(1) | BIT(17))) {
+ IWL_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
+ return 0;
+ }
+
+ /* take ownership on the AUX IF */
+ iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
+ iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
+
+ do {
+ iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
+ val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
+ if (val == 0x1) {
+ iwl_write_prph(trans, RSA_ENABLE, 0);
+ return 0;
+ }
+
+ udelay(10);
+ loop--;
+ } while (loop > 0);
+
+ IWL_ERR(trans, "Failed to take ownership on secure machine\n");
+ return -EIO;
+}
+
+static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0, sec_num = 0x1;
+ u32 val, last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+
+ /* Notify the ucode of the loaded section number and status */
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ sec_num = (sec_num << 1) | 0x1;
+ }
+
+ *first_ucode_section = last_read_idx;
+
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ int i;
+
+ if (dest->version)
+ IWL_ERR(trans,
+ "DBG DEST version is %d - expect issues\n",
+ dest->version);
+
+ IWL_INFO(trans, "Applying debug destination %s\n",
+ get_fw_dbg_mode_string(dest->monitor_mode));
+
+ if (dest->monitor_mode == EXTERNAL_MODE)
+ iwl_pcie_alloc_fw_monitor(trans);
+ else
+ IWL_WARN(trans, "PCI should have external buffer debug\n");
+
+ for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+ u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+ u32 val = le32_to_cpu(dest->reg_ops[i].val);
+
+ switch (dest->reg_ops[i].op) {
+ case CSR_ASSIGN:
+ iwl_write32(trans, addr, val);
+ break;
+ case CSR_SETBIT:
+ iwl_set_bit(trans, addr, BIT(val));
+ break;
+ case CSR_CLEARBIT:
+ iwl_clear_bit(trans, addr, BIT(val));
+ break;
+ case PRPH_ASSIGN:
+ iwl_write_prph(trans, addr, val);
+ break;
+ case PRPH_SETBIT:
+ iwl_set_bits_prph(trans, addr, BIT(val));
+ break;
+ case PRPH_CLEARBIT:
+ iwl_clear_bits_prph(trans, addr, BIT(val));
+ break;
+ default:
+ IWL_ERR(trans, "FW debug - unknown OP %d\n",
+ dest->reg_ops[i].op);
+ break;
+ }
+ }
+
+ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ trans_pcie->fw_mon_phys >> dest->base_shift);
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size) >> dest->end_shift);
+ }
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* load to FW the binary non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+ if (ret)
+ return ret;
+
+ if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
+ /* load to FW the binary sections of CPU2 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ /* supported for 7000 only for the moment */
+ if (iwlwifi_mod_params.fw_monitor &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_pcie_alloc_fw_monitor(trans);
+
+ if (trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
+ trans_pcie->fw_mon_phys >> 4);
+ iwl_write_prph(trans, MON_BUFF_END_ADDR,
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size) >> 4);
+ }
+ } else if (trans->dbg_dest_tlv) {
+ iwl_pcie_apply_destination(trans);
+ }
+
+ /* release CPU reset */
+ iwl_write32(trans, CSR_RESET, 0);
+
+ return 0;
+}
+
+static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans, "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ if (trans->dbg_dest_tlv)
+ iwl_pcie_apply_destination(trans);
+
+ /* TODO: remove in the next Si step */
+ ret = iwl_pcie_rsa_race_bug_wa(trans);
+ if (ret)
+ return ret;
+
+ /* configure the ucode to be ready to get the secured image */
+ /* release CPU reset */
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ /* load to FW the binary sections of CPU2 */
+ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
+{
+ int ret;
+ bool hw_rfkill;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ if (hw_rfkill && !run_in_rfkill)
+ return -ERFKILL;
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+ iwl_enable_interrupts(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Load the given image to the HW */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ return iwl_pcie_load_given_ucode_8000(trans, fw);
+ else
+ return iwl_pcie_load_given_ucode(trans, fw);
+}
+
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ iwl_pcie_reset_ict(trans);
+ iwl_pcie_tx_start(trans, scd_addr);
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_disable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ /* device going down, Stop using ICT table */
+ iwl_pcie_disable_ict(trans);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+
+ /* Power-down device's busmaster DMA clocks */
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_pcie_apm_stop(trans, false);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
+ */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_disable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+
+ /* clear all status bits */
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ /* re-take ownership to prevent other users from stealing the deivce */
+ iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+ iwl_trans_pcie_stop_device(trans, true);
+}
+
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+{
+ iwl_disable_interrupts(trans);
+
+ /*
+ * in testing mode, the host stays awake and the
+ * hardware won't be reset (not even partially)
+ */
+ if (test)
+ return;
+
+ iwl_pcie_disable_ict(trans);
+
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+
+ iwl_pcie_set_pwr(trans, true);
+}
+
+static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test)
+{
+ u32 val;
+ int ret;
+
+ if (test) {
+ iwl_enable_interrupts(trans);
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+ }
+
+ /*
+ * Also enables interrupts - none will happen as the device doesn't
+ * know we're waking it up, only when the opmode actually tells it
+ * after this call.
+ */
+ iwl_pcie_reset_ict(trans);
+
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+ return ret;
+ }
+
+ iwl_pcie_set_pwr(trans, false);
+
+ iwl_trans_pcie_tx_reset(trans);
+
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
+ return ret;
+ }
+
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+ *status = IWL_D3_STATUS_RESET;
+ else
+ *status = IWL_D3_STATUS_ALIVE;
+
+ return 0;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+ bool hw_rfkill;
+ int err;
+
+ err = iwl_pcie_prepare_card_hw(trans);
+ if (err) {
+ IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+ return err;
+ }
+
+ /* Reset the entire device */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
+ iwl_pcie_apm_init(trans);
+
+ /* From now on, the op_mode will be kept updated about RF kill state */
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_disable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ iwl_pcie_apm_stop(trans, true);
+
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_disable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ iwl_pcie_disable_ict(trans);
+}
+
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+{
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+ ((reg & 0x000FFFFF) | (3 << 24)));
+ return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+}
+
+static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
+ u32 val)
+{
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
+ ((addr & 0x000FFFFF) | (3 << 24)));
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+ trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans_pcie->n_no_reclaim_cmds = 0;
+ else
+ trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+ if (trans_pcie->n_no_reclaim_cmds)
+ memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+ trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+
+ trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
+ if (trans_pcie->rx_buf_size_8k)
+ trans_pcie->rx_page_order = get_order(8 * 1024);
+ else
+ trans_pcie->rx_page_order = get_order(4 * 1024);
+
+ trans_pcie->command_names = trans_cfg->command_names;
+ trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+
+ /* init ref_count to 1 (should be cleared when ucode is loaded) */
+ trans_pcie->ref_count = 1;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
+}
+
+void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
+ iwl_pcie_tx_free(trans);
+ iwl_pcie_rx_free(trans);
+
+ free_irq(trans_pcie->pci_dev->irq, trans);
+ iwl_pcie_free_ict(trans);
+
+ pci_disable_msi(trans_pcie->pci_dev);
+ iounmap(trans_pcie->hw_base);
+ pci_release_regions(trans_pcie->pci_dev);
+ pci_disable_device(trans_pcie->pci_dev);
+ kmem_cache_destroy(trans->dev_cmd_pool);
+
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
+ iwl_pcie_free_fw_monitor(trans);
+
+ kfree(trans);
+}
+
+static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+{
+ if (state)
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
+ else
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+}
+
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
+ unsigned long *flags)
+{
+ int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+
+ if (trans_pcie->cmd_hold_nic_awake)
+ goto out;
+
+ /* this bit wakes up the NIC */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ * 5000 series and later (including 1000 series) have non-volatile SRAM,
+ * and do not save/restore SRAM when power cycling.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (unlikely(ret < 0)) {
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ if (!silent) {
+ u32 val = iwl_read32(trans, CSR_GP_CNTRL);
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ val);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+ return false;
+ }
+ }
+
+out:
+ /*
+ * Fool sparse by faking we release the lock - sparse will
+ * track nic_access anyway.
+ */
+ __release(&trans_pcie->reg_lock);
+ return true;
+}
+
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ unsigned long *flags)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ /*
+ * Fool sparse by faking we acquiring the lock - sparse will
+ * track nic_access anyway.
+ */
+ __acquire(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_hold_nic_awake)
+ goto out;
+
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
+out:
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+}
+
+static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ const u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs,
+ bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = &trans_pcie->txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->q.read_ptr == txq->q.write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ del_timer(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+#define IWL_FLUSH_WAIT_MS 2000
+
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ struct iwl_queue *q;
+ int cnt;
+ unsigned long now = jiffies;
+ u32 scd_sram_addr;
+ u8 buf[16];
+ int ret = 0;
+
+ /* waiting for all the tx frames complete might take a while */
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
+ if (cnt == trans_pcie->cmd_queue)
+ continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
+ msleep(1);
+ }
+
+ if (q->read_ptr != q->write_ptr) {
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", cnt);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
+ }
+
+ if (!ret)
+ return 0;
+
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+
+ if (cnt & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ cnt, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
+ }
+
+ return ret;
+}
+
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+}
+
+void iwl_trans_pcie_ref(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ if (iwlwifi_mod_params.d0i3_disable)
+ return;
+
+ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+ trans_pcie->ref_count++;
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+}
+
+void iwl_trans_pcie_unref(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ if (iwlwifi_mod_params.d0i3_disable)
+ return;
+
+ spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+ IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+ if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+ return;
+ }
+ trans_pcie->ref_count--;
+ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+}
+
+static const char *get_csr_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
+{
+ int i;
+ static const u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(trans, "CSR values:\n");
+ IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(trans, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(trans, csr_tbl[i]));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ size_t bufsz;
+
+ bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
+
+ if (!trans_pcie->txq)
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ txq = &trans_pcie->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, trans_pcie->queue_used),
+ !!test_bit(cnt, trans_pcie->queue_stopped),
+ txq->need_update, txq->frozen,
+ (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
+ rxq->write_actual);
+ pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
+ rxq->need_update);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ int pos = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ isr_stats->hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ isr_stats->sw);
+ if (isr_stats->sw || isr_stats->hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ isr_stats->err_code);
+ }
+#ifdef CONFIG_IWLWIFI_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ isr_stats->sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ isr_stats->alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ isr_stats->ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ isr_stats->wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n", isr_stats->rx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ isr_stats->tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ isr_stats->unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ memset(isr_stats, 0, sizeof(*isr_stats));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ iwl_pcie_dump_csr(trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ char *buf = NULL;
+ ssize_t ret;
+
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+ return ret;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+ return 0;
+
+err:
+ IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+ return -ENOMEM;
+}
+#else
+static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ struct dentry *dir)
+{
+ return 0;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < IWL_NUM_OF_TBS; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+ return cmdlen;
+}
+
+static const struct {
+ u32 start, end;
+} iwl_prph_dump_addr[] = {
+ { .start = 0x00a00000, .end = 0x00a00000 },
+ { .start = 0x00a0000c, .end = 0x00a00024 },
+ { .start = 0x00a0002c, .end = 0x00a0003c },
+ { .start = 0x00a00410, .end = 0x00a00418 },
+ { .start = 0x00a00420, .end = 0x00a00420 },
+ { .start = 0x00a00428, .end = 0x00a00428 },
+ { .start = 0x00a00430, .end = 0x00a0043c },
+ { .start = 0x00a00444, .end = 0x00a00444 },
+ { .start = 0x00a004c0, .end = 0x00a004cc },
+ { .start = 0x00a004d8, .end = 0x00a004d8 },
+ { .start = 0x00a004e0, .end = 0x00a004f0 },
+ { .start = 0x00a00840, .end = 0x00a00840 },
+ { .start = 0x00a00850, .end = 0x00a00858 },
+ { .start = 0x00a01004, .end = 0x00a01008 },
+ { .start = 0x00a01010, .end = 0x00a01010 },
+ { .start = 0x00a01018, .end = 0x00a01018 },
+ { .start = 0x00a01024, .end = 0x00a01024 },
+ { .start = 0x00a0102c, .end = 0x00a01034 },
+ { .start = 0x00a0103c, .end = 0x00a01040 },
+ { .start = 0x00a01048, .end = 0x00a01094 },
+ { .start = 0x00a01c00, .end = 0x00a01c20 },
+ { .start = 0x00a01c58, .end = 0x00a01c58 },
+ { .start = 0x00a01c7c, .end = 0x00a01c7c },
+ { .start = 0x00a01c28, .end = 0x00a01c54 },
+ { .start = 0x00a01c5c, .end = 0x00a01c5c },
+ { .start = 0x00a01c60, .end = 0x00a01cdc },
+ { .start = 0x00a01ce0, .end = 0x00a01d0c },
+ { .start = 0x00a01d18, .end = 0x00a01d20 },
+ { .start = 0x00a01d2c, .end = 0x00a01d30 },
+ { .start = 0x00a01d40, .end = 0x00a01d5c },
+ { .start = 0x00a01d80, .end = 0x00a01d80 },
+ { .start = 0x00a01d98, .end = 0x00a01d9c },
+ { .start = 0x00a01da8, .end = 0x00a01da8 },
+ { .start = 0x00a01db8, .end = 0x00a01df4 },
+ { .start = 0x00a01dc0, .end = 0x00a01dfc },
+ { .start = 0x00a01e00, .end = 0x00a01e2c },
+ { .start = 0x00a01e40, .end = 0x00a01e60 },
+ { .start = 0x00a01e68, .end = 0x00a01e6c },
+ { .start = 0x00a01e74, .end = 0x00a01e74 },
+ { .start = 0x00a01e84, .end = 0x00a01e90 },
+ { .start = 0x00a01e9c, .end = 0x00a01ec4 },
+ { .start = 0x00a01ed0, .end = 0x00a01ee0 },
+ { .start = 0x00a01f00, .end = 0x00a01f1c },
+ { .start = 0x00a01f44, .end = 0x00a01ffc },
+ { .start = 0x00a02000, .end = 0x00a02048 },
+ { .start = 0x00a02068, .end = 0x00a020f0 },
+ { .start = 0x00a02100, .end = 0x00a02118 },
+ { .start = 0x00a02140, .end = 0x00a0214c },
+ { .start = 0x00a02168, .end = 0x00a0218c },
+ { .start = 0x00a021c0, .end = 0x00a021c0 },
+ { .start = 0x00a02400, .end = 0x00a02410 },
+ { .start = 0x00a02418, .end = 0x00a02420 },
+ { .start = 0x00a02428, .end = 0x00a0242c },
+ { .start = 0x00a02434, .end = 0x00a02434 },
+ { .start = 0x00a02440, .end = 0x00a02460 },
+ { .start = 0x00a02468, .end = 0x00a024b0 },
+ { .start = 0x00a024c8, .end = 0x00a024cc },
+ { .start = 0x00a02500, .end = 0x00a02504 },
+ { .start = 0x00a0250c, .end = 0x00a02510 },
+ { .start = 0x00a02540, .end = 0x00a02554 },
+ { .start = 0x00a02580, .end = 0x00a025f4 },
+ { .start = 0x00a02600, .end = 0x00a0260c },
+ { .start = 0x00a02648, .end = 0x00a02650 },
+ { .start = 0x00a02680, .end = 0x00a02680 },
+ { .start = 0x00a026c0, .end = 0x00a026d0 },
+ { .start = 0x00a02700, .end = 0x00a0270c },
+ { .start = 0x00a02804, .end = 0x00a02804 },
+ { .start = 0x00a02818, .end = 0x00a0281c },
+ { .start = 0x00a02c00, .end = 0x00a02db4 },
+ { .start = 0x00a02df4, .end = 0x00a02fb0 },
+ { .start = 0x00a03000, .end = 0x00a03014 },
+ { .start = 0x00a0301c, .end = 0x00a0302c },
+ { .start = 0x00a03034, .end = 0x00a03038 },
+ { .start = 0x00a03040, .end = 0x00a03048 },
+ { .start = 0x00a03060, .end = 0x00a03068 },
+ { .start = 0x00a03070, .end = 0x00a03074 },
+ { .start = 0x00a0307c, .end = 0x00a0307c },
+ { .start = 0x00a03080, .end = 0x00a03084 },
+ { .start = 0x00a0308c, .end = 0x00a03090 },
+ { .start = 0x00a03098, .end = 0x00a03098 },
+ { .start = 0x00a030a0, .end = 0x00a030a0 },
+ { .start = 0x00a030a8, .end = 0x00a030b4 },
+ { .start = 0x00a030bc, .end = 0x00a030bc },
+ { .start = 0x00a030c0, .end = 0x00a0312c },
+ { .start = 0x00a03c00, .end = 0x00a03c5c },
+ { .start = 0x00a04400, .end = 0x00a04454 },
+ { .start = 0x00a04460, .end = 0x00a04474 },
+ { .start = 0x00a044c0, .end = 0x00a044ec },
+ { .start = 0x00a04500, .end = 0x00a04504 },
+ { .start = 0x00a04510, .end = 0x00a04538 },
+ { .start = 0x00a04540, .end = 0x00a04548 },
+ { .start = 0x00a04560, .end = 0x00a0457c },
+ { .start = 0x00a04590, .end = 0x00a04598 },
+ { .start = 0x00a045c0, .end = 0x00a045f4 },
+};
+
+static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ struct iwl_fw_error_dump_prph *prph;
+ unsigned long flags;
+ u32 prph_len = 0, i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+ int reg;
+ __le32 *val;
+
+ prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+ (*data)->len = cpu_to_le32(sizeof(*prph) +
+ num_bytes_in_chunk);
+ prph = (void *)(*data)->data;
+ prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
+ val = (void *)prph->data;
+
+ for (reg = iwl_prph_dump_addr[i].start;
+ reg <= iwl_prph_dump_addr[i].end;
+ reg += 4)
+ *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
+ reg));
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ return prph_len;
+}
+
+#define IWL_CSR_TO_DUMP (0x250)
+
+static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
+ __le32 *val;
+ int i;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+ (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
+ val = (void *)(*data)->data;
+
+ for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return csr_len;
+}
+
+static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data)
+{
+ u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+ unsigned long flags;
+ __le32 *val;
+ int i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+ (*data)->len = cpu_to_le32(fh_regs_len);
+ val = (void *)(*data)->data;
+
+ for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
+ *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ *data = iwl_fw_error_next_data(*data);
+
+ return sizeof(**data) + fh_regs_len;
+}
+
+static
+struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ struct iwl_trans_dump_data *dump_data;
+ u32 len;
+ u32 monitor_len;
+ int i, ptr;
+
+ /* transport dump header */
+ len = sizeof(*dump_data);
+
+ /* host commands */
+ len += sizeof(*data) +
+ cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+
+ /* CSR registers */
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+ /* PRPH registers */
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
+
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+
+ /* FH registers */
+ len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+ /* FW monitor */
+ if (trans_pcie->fw_mon_page) {
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+ trans_pcie->fw_mon_size;
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv) {
+ u32 base, end;
+
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg_dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ end += (1 << trans->dbg_dest_tlv->end_shift);
+ monitor_len = end - base;
+ len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+ monitor_len;
+ } else {
+ monitor_len = 0;
+ }
+
+ dump_data = vzalloc(len);
+ if (!dump_data)
+ return NULL;
+
+ len = 0;
+ data = (void *)dump_data->data;
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->q.write_ptr;
+ for (i = 0; i < cmdq->q.n_window; i++) {
+ u8 idx = get_cmd_index(&cmdq->q, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
+
+ len += iwl_trans_pcie_dump_prph(trans, &data);
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ /* data is already pointing to the next section */
+
+ if ((trans_pcie->fw_mon_page &&
+ trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+ trans->dbg_dest_tlv) {
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->dbg_dest_tlv) {
+ write_ptr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+ fw_mon_data = (void *)data->data;
+ fw_mon_data->fw_mon_wr_ptr =
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
+ fw_mon_data->fw_mon_cycle_cnt =
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+ fw_mon_data->fw_mon_base_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base));
+
+ len += sizeof(*data) + sizeof(*fw_mon_data);
+ if (trans_pcie->fw_mon_page) {
+ data->len = cpu_to_le32(trans_pcie->fw_mon_size +
+ sizeof(*fw_mon_data));
+
+ /*
+ * The firmware is now asserted, it won't write anything
+ * to the buffer. CPU can take ownership to fetch the
+ * data. The buffer will be handed back to the device
+ * before the firmware will be restarted.
+ */
+ dma_sync_single_for_cpu(trans->dev,
+ trans_pcie->fw_mon_phys,
+ trans_pcie->fw_mon_size,
+ DMA_FROM_DEVICE);
+ memcpy(fw_mon_data->data,
+ page_address(trans_pcie->fw_mon_page),
+ trans_pcie->fw_mon_size);
+
+ len += trans_pcie->fw_mon_size;
+ } else {
+ /* If we are here then the buffer is internal */
+
+ /*
+ * Update pointers to reflect actual values after
+ * shifting
+ */
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ iwl_trans_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
+ data->len = cpu_to_le32(sizeof(*fw_mon_data) +
+ monitor_len);
+ len += monitor_len;
+ }
+ }
+
+ dump_data->len = len;
+
+ return dump_data;
+}
+
+static const struct iwl_trans_ops trans_ops_pcie = {
+ .start_hw = iwl_trans_pcie_start_hw,
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
+ .fw_alive = iwl_trans_pcie_fw_alive,
+ .start_fw = iwl_trans_pcie_start_fw,
+ .stop_device = iwl_trans_pcie_stop_device,
+
+ .d3_suspend = iwl_trans_pcie_d3_suspend,
+ .d3_resume = iwl_trans_pcie_d3_resume,
+
+ .send_cmd = iwl_trans_pcie_send_hcmd,
+
+ .tx = iwl_trans_pcie_tx,
+ .reclaim = iwl_trans_pcie_reclaim,
+
+ .txq_disable = iwl_trans_pcie_txq_disable,
+ .txq_enable = iwl_trans_pcie_txq_enable,
+
+ .dbgfs_register = iwl_trans_pcie_dbgfs_register,
+
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
+ .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
+
+ .write8 = iwl_trans_pcie_write8,
+ .write32 = iwl_trans_pcie_write32,
+ .read32 = iwl_trans_pcie_read32,
+ .read_prph = iwl_trans_pcie_read_prph,
+ .write_prph = iwl_trans_pcie_write_prph,
+ .read_mem = iwl_trans_pcie_read_mem,
+ .write_mem = iwl_trans_pcie_write_mem,
+ .configure = iwl_trans_pcie_configure,
+ .set_pmi = iwl_trans_pcie_set_pmi,
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access,
+ .release_nic_access = iwl_trans_pcie_release_nic_access,
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+ .ref = iwl_trans_pcie_ref,
+ .unref = iwl_trans_pcie_unref,
+
+ .dump_data = iwl_trans_pcie_dump_data,
+};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ const struct iwl_cfg *cfg)
+{
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ u16 pci_cmd;
+ int err;
+
+ trans = kzalloc(sizeof(struct iwl_trans) +
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+ if (!trans) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ trans->ops = &trans_ops_pcie;
+ trans->cfg = cfg;
+ trans_lockdep_init(trans);
+ trans_pcie->trans = trans;
+ spin_lock_init(&trans_pcie->irq_lock);
+ spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->ref_lock);
+ init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto out_no_pci;
+
+ if (!cfg->base_params->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto out_pci_disable_device;
+ }
+
+ trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!trans_pcie->hw_base) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
+ err = pci_enable_msi(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+
+ trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ unsigned long flags;
+ int ret;
+
+ trans->hw_rev = (trans->hw_rev & 0xfff0) |
+ (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+
+ /*
+ * in-order to recognize C step driver should read chip version
+ * id located at the AUX bus MISC address space.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+ goto out_pci_disable_msi;
+ }
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ u32 hw_step;
+
+ hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
+ hw_step |= ENABLE_WFPM;
+ __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
+ hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
+ hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+ if (hw_step == 0x3)
+ trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+ (SILICON_C_STEP << 2);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
+ }
+
+ trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+ snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+ "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
+
+ snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans->dev_cmd_headroom = 0;
+ trans->dev_cmd_pool =
+ kmem_cache_create(trans->dev_cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!trans->dev_cmd_pool) {
+ err = -ENOMEM;
+ goto out_pci_disable_msi;
+ }
+
+ if (iwl_pcie_alloc_ict(trans))
+ goto out_free_cmd_pool;
+
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
+
+ return trans;
+
+out_free_ict:
+ iwl_pcie_free_ict(trans);
+out_free_cmd_pool:
+ kmem_cache_destroy(trans->dev_cmd_pool);
+out_pci_disable_msi:
+ pci_disable_msi(pdev);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_no_pci:
+ kfree(trans);
+out:
+ return ERR_PTR(err);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
new file mode 100644
index 000000000..5ef8044c2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -0,0 +1,1906 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-scd.h"
+#include "iwl-op-mode.h"
+#include "internal.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "dvm/commands.h"
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+static int iwl_queue_space(const struct iwl_queue *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+ * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ max = q->n_window;
+ else
+ max = TFD_QUEUE_SIZE_MAX - 1;
+
+ /*
+ * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+ * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+{
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* slots_num must be power-of-two size, otherwise
+ * get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_pcie_txq_stuck_timer(unsigned long data)
+{
+ struct iwl_txq *txq = (void *)data;
+ struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ u32 scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ u8 buf[16];
+ int i;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->q.read_ptr == txq->q.write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ jiffies_to_msecs(txq->wd_timeout));
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+ if (i & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ i, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+ }
+
+ iwl_force_nmi(trans);
+}
+
+/*
+ * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+
+ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ sta_id = tx_cmd->sta_id;
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->cmd_queue)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ lockdep_assert_held(&txq->lock);
+
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ txq_id != trans_pcie->cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ /*
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
+ */
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
+ return;
+ }
+ }
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
+
+ spin_lock_bh(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+ return addr;
+}
+
+static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfd *tfd)
+{
+ int i;
+ int num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* first TB is never freed - it's the scratchbuf data */
+
+ for (i = 1; i < num_tbs; i++)
+ dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
+
+ tfd->num_tbs = 0;
+}
+
+/*
+ * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans - transport private data
+ * @txq - tx queue
+ * @dma_dir - the direction of the DMA mapping
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_tfd *tfd_tmp = txq->tfds;
+
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int rd_ptr = txq->q.read_ptr;
+ int idx = get_cmd_index(&txq->q, rd_ptr);
+
+ lockdep_assert_held(&txq->lock);
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+
+ /* free SKB */
+ if (txq->entries) {
+ struct sk_buff *skb;
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+ }
+}
+
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ dma_addr_t addr, u16 len, bool reset)
+{
+ struct iwl_queue *q;
+ struct iwl_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ IWL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
+ return -EINVAL;
+
+ iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ struct iwl_txq *txq, int slots_num,
+ u32 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t scratchbuf_sz;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
+ (unsigned long)txq);
+ txq->trans_pcie = trans_pcie;
+
+ txq->q.n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
+ BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
+ sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch));
+
+ scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
+
+ txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
+ &txq->scratchbufs_dma,
+ GFP_KERNEL);
+ if (!txq->scratchbufs)
+ goto err_free_tfds;
+
+ txq->q.id = txq_id;
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+error:
+ if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = false;
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/*
+ * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ spin_lock_bh(&txq->lock);
+ while (q->write_ptr != q->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, q->read_ptr);
+ iwl_pcie_txq_free_tfd(trans, txq);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ }
+ txq->active = false;
+ spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = trans->dev;
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++) {
+ kzfree(txq->entries[i].cmd);
+ kzfree(txq->entries[i].free_buf);
+ }
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->q.dma_addr);
+ txq->q.dma_addr = 0;
+ txq->tfds = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(*txq->scratchbufs) * txq->q.n_window,
+ txq->scratchbufs, txq->scratchbufs_dma);
+ }
+
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ del_timer_sync(&txq->stuck_timer);
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int nq = trans->cfg->base_params->num_of_queues;
+ int chan;
+ u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+ WARN_ON(scd_base_addr != 0 &&
+ scd_base_addr != trans_pcie->scd_base_addr);
+
+ /* reset context data, TX status and translation data */
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
+
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+ iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+ trans_pcie->cmd_fifo,
+ trans_pcie->cmd_q_wdg_timeout);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_scd_activate_fifos(trans);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Enable L1-Active */
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->q.read_ptr = 0;
+ txq->q.write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ /*
+ * Send 0 as the scd_base_addr since the device may have be reset
+ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+ * contain garbage.
+ */
+ iwl_pcie_tx_start(trans, 0);
+}
+
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+ int ch, ret;
+ u32 mask = 0;
+
+ spin_lock(&trans_pcie->irq_lock);
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ goto out;
+
+ /* Stop each Tx DMA channel */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+ }
+
+ /* Wait for DMA channels to be idle */
+ ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+out:
+ spin_unlock(&trans_pcie->irq_lock);
+}
+
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
+ */
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ /* Turn off all Tx DMA fifos */
+ iwl_scd_deactivate_fifos(trans);
+
+ /* Turn off all Tx DMA channels */
+ iwl_pcie_tx_stop_fh(trans);
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
+ return 0;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++)
+ iwl_pcie_txq_unmap(trans, txq_id);
+
+ return 0;
+}
+
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
+{
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ iwl_pcie_txq_free(trans, txq_id);
+ }
+
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_pcie_tx_free(trans);
+
+ return ret;
+}
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+ int txq_id, slots_num;
+ bool alloc = false;
+
+ if (!trans_pcie->txq) {
+ ret = iwl_pcie_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_scd_deactivate_fifos(trans);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock(&trans_pcie->irq_lock);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ if (trans->cfg->base_params->num_of_queues > 20)
+ iwl_set_bits_prph(trans, SCD_GP_CTRL,
+ SCD_GP_CTRL_ENABLE_31_QUEUES);
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_pcie_tx_free(trans);
+ return ret;
+}
+
+static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->wd_timeout)
+ return;
+
+ /*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->q.read_ptr == txq->q.write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+ struct iwl_queue *q = &txq->q;
+ int last_to_free;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ return;
+
+ spin_lock_bh(&txq->lock);
+
+ if (!txq->active) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
+ if (txq->q.read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+ txq_id, txq->q.read_ptr, tfd_num, ssn);
+
+ /*Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used */
+ last_to_free = iwl_queue_dec_wrap(tfd_num);
+
+ if (!iwl_queue_used(q, last_to_free)) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+ q->write_ptr, q->read_ptr);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ q->read_ptr != tfd_num;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+
+ if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+ continue;
+
+ __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+
+ txq->entries[txq->q.read_ptr].skb = NULL;
+
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+
+ iwl_pcie_txq_free_tfd(trans, txq);
+ }
+
+ iwl_pcie_txq_progress(txq);
+
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
+
+ if (q->read_ptr == q->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ iwl_trans_pcie_unref(trans);
+ }
+
+out:
+ spin_unlock_bh(&txq->lock);
+}
+
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
+ const struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+ !trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = true;
+ IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+ iwl_trans_pcie_ref(trans);
+ }
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
+ */
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_hold_nic_awake) {
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
+ return -EIO;
+ }
+ trans_pcie->cmd_hold_nic_awake = true;
+ }
+
+ return 0;
+}
+
+static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+ iwl_trans_pcie_unref(trans);
+ }
+
+ if (trans->cfg->base_params->apmg_wake_up_wa) {
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+ return 0;
+
+ trans_pcie->cmd_hold_nic_awake = false;
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ }
+ return 0;
+}
+
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ unsigned long flags;
+ int nfreed = 0;
+
+ lockdep_assert_held(&txq->lock);
+
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, q->write_ptr, q->read_ptr);
+ iwl_force_nmi(trans);
+ }
+ }
+
+ if (q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ iwl_pcie_clear_cmd_in_flight(trans);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
+ iwl_pcie_txq_progress(txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+ u16 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ int fifo = -1;
+
+ if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+ WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+
+ txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
+
+ if (cfg) {
+ fifo = cfg->fifo;
+
+ /* Disable the scheduler prior configuring the cmd queue */
+ if (txq_id == trans_pcie->cmd_queue &&
+ trans_pcie->scd_set_active)
+ iwl_scd_enable_set_active(trans, 0);
+
+ /* Stop this Tx queue before configuring it */
+ iwl_scd_txq_set_inactive(trans, txq_id);
+
+ /* Set this queue as a chain-building queue unless it is CMD */
+ if (txq_id != trans_pcie->cmd_queue)
+ iwl_scd_txq_set_chain(trans, txq_id);
+
+ if (cfg->aggregate) {
+ u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+ /* enable aggregations for the queue */
+ iwl_scd_txq_enable_agg(trans, txq_id);
+ txq->ampdu = true;
+ } else {
+ /*
+ * disable aggregations for the queue, this will also
+ * make the ra_tid mapping configuration irrelevant
+ * since it is now a non-AGG queue.
+ */
+ iwl_scd_txq_disable_agg(trans, txq_id);
+
+ ssn = txq->q.read_ptr;
+ }
+ }
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ txq->q.read_ptr = (ssn & 0xff);
+ txq->q.write_ptr = (ssn & 0xff);
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
+
+ if (cfg) {
+ u8 frame_limit = cfg->frame_limit;
+
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_trans_write_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+
+ /* enable the scheduler for this queue (only) */
+ if (txq_id == trans_pcie->cmd_queue &&
+ trans_pcie->scd_set_active)
+ iwl_scd_enable_set_active(trans, BIT(txq_id));
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d on FIFO %d WrPtr: %d\n",
+ txq_id, fifo, ssn & 0xff);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Activate queue %d WrPtr: %d\n",
+ txq_id, ssn & 0xff);
+ }
+
+ txq->active = true;
+}
+
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
+ bool configure_scd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 stts_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+ static const u32 zero_val[4] = {};
+
+ trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
+ trans_pcie->txq[txq_id].frozen = false;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
+ return;
+ }
+
+ if (configure_scd) {
+ iwl_scd_txq_set_inactive(trans, txq_id);
+
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
+ }
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+ trans_pcie->txq[txq_id].ampdu = false;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int idx;
+ u16 copy_size, cmd_size, scratch_size;
+ bool had_nocopy = false;
+ int i, ret;
+ u32 cmd_pos;
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+
+ copy_size = sizeof(out_cmd->hdr);
+ cmd_size = sizeof(out_cmd->hdr);
+
+ /* need one for the header if the first is NOCOPY */
+ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
+ if (!cmd->len[i])
+ continue;
+
+ /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+
+ /*
+ * If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
+ * allocated into separate TFDs, then we will need to
+ * increase the size of the buffers.
+ */
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ spin_lock_bh(&txq->lock);
+
+ if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_bh(&txq->lock);
+
+ IWL_ERR(trans, "No space in command queue\n");
+ iwl_op_mode_cmd_queue_full(trans->op_mode);
+ idx = -ENOSPC;
+ goto free_dup_buf;
+ }
+
+ idx = get_cmd_index(q, q->write_ptr);
+ out_cmd = txq->entries[idx].cmd;
+ out_meta = &txq->entries[idx].meta;
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+
+ /* set up the header */
+
+ out_cmd->hdr.cmd = cmd->id;
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+
+ /* and copy the data that needs to be copied */
+ cmd_pos = offsetof(struct iwl_device_cmd, payload);
+ copy_size = sizeof(out_cmd->hdr);
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
+ if (!cmd->len[i])
+ continue;
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+ * in total (for the scratchbuf handling), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
+ }
+ }
+
+ IWL_DEBUG_HC(trans,
+ "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+
+ /* start the TFD with the scratchbuf */
+ scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
+ memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
+ iwl_pcie_txq_build_tfd(trans, txq,
+ iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
+ scratch_size, true);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > scratch_size) {
+ phys_addr = dma_map_single(trans->dev,
+ ((u8 *)&out_cmd->hdr) + scratch_size,
+ copy_size - scratch_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+ copy_size - scratch_size, false);
+ }
+
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ const void *data = cmddata[i];
+
+ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ continue;
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
+ cmdlen[i], DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ idx = -ENOMEM;
+ goto out;
+ }
+
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
+ }
+
+ out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kzfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
+
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+
+ /* start timer if queue currently empty */
+ if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
+ if (ret < 0) {
+ idx = ret;
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ goto out;
+ }
+
+ /* Increment and update queue's write index */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+ out:
+ spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
+ return idx;
+}
+
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ * @handler_status: return value of the handler of the command
+ * (put in setup_rx_handlers)
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index;
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN(txq_id != trans_pcie->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ iwl_print_hex_error(trans, pkt, 32);
+ return;
+ }
+
+ spin_lock_bh(&txq->lock);
+
+ cmd_index = get_cmd_index(&txq->q, index);
+ cmd = txq->entries[cmd_index].cmd;
+ meta = &txq->entries[cmd_index].meta;
+
+ iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ struct page *p = rxb_steal_page(rxb);
+
+ meta->source->resp_pkt = pkt;
+ meta->source->_rx_page_addr = (unsigned long)page_address(p);
+ meta->source->_rx_page_order = trans_pcie->rx_page_order;
+ meta->source->handler_status = handler_status;
+ }
+
+ iwl_pcie_cmdq_reclaim(trans, txq_id, index);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
+ IWL_WARN(trans,
+ "HCMD_ACTIVE already clear for command %s\n",
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
+ }
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
+ wake_up(&trans_pcie->wait_command_queue);
+ }
+
+ meta->flags = 0;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(trans_pcie, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n",
+ get_cmd_string(trans_pcie, cmd->id)))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(trans_pcie, cmd->id), ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ get_cmd_string(trans_pcie, cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -ETIMEDOUT;
+
+ iwl_force_nmi(trans);
+ iwl_trans_fw_error(trans);
+
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ dump_stack();
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ trans_pcie->txq[trans_pcie->cmd_queue].
+ entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (cmd->flags & CMD_ASYNC)
+ return iwl_pcie_send_hcmd_async(trans, cmd);
+
+ /* We still can fail on RFKILL that can be asserted while we wait */
+ return iwl_pcie_send_hcmd_sync(trans, cmd);
+}
+
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq;
+ struct iwl_queue *q;
+ dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+ void *tb1_addr;
+ u16 len, tb1_len, tb2_len;
+ bool wait_write_ptr;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 wifi_seq;
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ spin_lock(&txq->lock);
+
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(txq->ampdu &&
+ (wifi_seq & 0xff) != q->write_ptr,
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[q->write_ptr].skb = skb;
+ txq->entries[q->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
+ scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[q->write_ptr].meta;
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+ hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
+ tb1_len = ALIGN(len, 4);
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (tb1_len != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* The first TB points to the scratchbuf data - min_copy bytes */
+ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
+ IWL_HCMD_SCRATCHBUF_SIZE);
+ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+ IWL_HCMD_SCRATCHBUF_SIZE, true);
+
+ /* there must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
+ tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+ goto out_err;
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+
+ /*
+ * Set up TFD's third entry to point directly to remainder
+ * of skb, if any (802.11 null frames have no payload).
+ */
+ tb2_len = skb->len - hdr_len;
+ if (tb2_len > 0) {
+ dma_addr_t tb2_phys = dma_map_single(trans->dev,
+ skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ goto out_err;
+ }
+ iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ &txq->tfds[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+ skb->data + hdr_len, tb2_len);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb,
+ skb->data + hdr_len, tb2_len);
+
+ wait_write_ptr = ieee80211_has_morefrags(fc);
+
+ /* start timer if queue currently empty */
+ if (q->read_ptr == q->write_ptr) {
+ if (txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ iwl_trans_pcie_ref(trans);
+ }
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ else
+ iwl_stop_queue(trans, txq);
+ }
+ spin_unlock(&txq->lock);
+ return 0;
+out_err:
+ spin_unlock(&txq->lock);
+ return -1;
+}