summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath')
-rw-r--r--drivers/net/wireless/ath/Kconfig62
-rw-r--r--drivers/net/wireless/ath/Makefile24
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1800
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h151
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523_hw.h431
-rw-r--r--drivers/net/wireless/ath/ath.h328
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig47
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c318
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h225
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1165
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h438
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c1446
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h706
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2155
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h166
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c243
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h225
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c874
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h359
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c114
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1431
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2065
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c585
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c58
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h489
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c5628
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h72
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2773
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h269
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h1032
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c548
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h90
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h453
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c385
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.h46
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h70
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c244
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h533
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c228
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h38
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h1063
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2796
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1459
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c5513
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h4948
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig75
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile22
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c233
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c754
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h119
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1717
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c359
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3205
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h121
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c154
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c1139
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h165
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c786
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h367
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c928
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c1796
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h495
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c213
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c1605
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c203
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c834
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c343
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1010
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c3965
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c731
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2604
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c1380
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h853
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h534
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c116
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c122
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h106
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig65
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile49
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c548
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h271
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c3882
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h66
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h85
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c362
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h990
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c1863
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h143
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h187
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c702
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h282
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc-ops.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h677
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2935
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c1737
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1916
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1313
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c160
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1440
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h356
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c102
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h332
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1877
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c1228
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4166
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2731
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig178
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile76
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c196
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c531
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h125
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c849
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h674
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1364
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1089
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c990
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c456
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h3180
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c428
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c730
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h617
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c599
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c1719
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c5510
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h359
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c1184
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c616
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1573
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h392
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c1013
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2251
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h1326
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c258
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h65
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c454
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h1020
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h507
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h1298
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h1250
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h291
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1240
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h1355
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h760
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h1168
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h1046
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h1361
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1104
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c686
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c425
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h137
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c444
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h123
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c1606
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c180
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c253
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h72
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c620
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h144
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c415
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h91
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1395
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h325
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c268
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c207
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h45
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c151
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h70
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c351
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.h103
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c581
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h717
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c1123
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c1035
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c1362
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c514
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1383
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h116
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h645
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c505
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c524
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c332
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1024
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1877
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c1177
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c511
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h230
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h290
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3258
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1215
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1061
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c540
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c976
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h750
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2657
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c767
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1073
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c1176
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h2051
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h168
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_mci.h310
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_wow.h136
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c278
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c352
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h207
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c348
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2978
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig56
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h670
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c222
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h174
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c884
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.h134
-rw-r--r--drivers/net/wireless/ath/carl9170/eeprom.h216
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c447
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h326
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h277
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h817
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c190
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c538
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2113
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c1729
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h564
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c1012
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1711
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c1201
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h435
-rw-r--r--drivers/net/wireless/ath/debug.c47
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c363
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h112
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c429
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h77
-rw-r--r--drivers/net/wireless/ath/hw.c190
-rw-r--r--drivers/net/wireless/ath/key.c609
-rw-r--r--drivers/net/wireless/ath/main.c92
-rw-r--r--drivers/net/wireless/ath/reg.h65
-rw-r--r--drivers/net/wireless/ath/regd.c805
-rw-r--r--drivers/net/wireless/ath/regd.h263
-rw-r--r--drivers/net/wireless/ath/regd_common.h478
-rw-r--r--drivers/net/wireless/ath/spectral_common.h113
-rw-r--r--drivers/net/wireless/ath/trace.c20
-rw-r--r--drivers/net/wireless/ath/trace.h71
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c181
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c816
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h284
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4659
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1114
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c62
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2235
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h130
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c330
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h167
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h266
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig41
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile22
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1047
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c1462
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c117
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c44
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h149
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c493
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c625
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c173
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c927
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c235
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c304
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c476
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h239
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c1453
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h493
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h780
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h34
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c1357
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1352
299 files changed, 227886 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
new file mode 100644
index 000000000..ce7826009
--- /dev/null
+++ b/drivers/net/wireless/ath/Kconfig
@@ -0,0 +1,62 @@
+config ATH_COMMON
+ tristate
+
+menuconfig ATH_CARDS
+ tristate "Atheros Wireless Cards"
+ depends on CFG80211 && (!UML || BROKEN)
+ ---help---
+ This will enable the support for the Atheros wireless drivers.
+ ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
+ enables the common ath.ko module which shares common helpers.
+
+ For more information and documentation on this module you can visit:
+
+ http://wireless.kernel.org/en/users/Drivers/ath
+
+ For information on all Atheros wireless drivers visit:
+
+ http://wireless.kernel.org/en/users/Drivers/Atheros
+
+if ATH_CARDS
+
+config ATH_DEBUG
+ bool "Atheros wireless debugging"
+ ---help---
+ Say Y, if you want to debug atheros wireless drivers.
+ Right now only ath9k makes use of this.
+
+config ATH_TRACEPOINTS
+ bool "Atheros wireless tracing"
+ depends on ATH_DEBUG
+ depends on EVENT_TRACING
+ ---help---
+ This option enables tracepoints for atheros wireless drivers.
+ Currently, ath9k makes use of this facility.
+
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+ bool "Atheros dynamic user regulatory hints"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled in countries where
+ this feature is explicitly allowed and only on cards that
+ specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+ bool "Atheros dynamic user regulatory testing"
+ depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems
+ undergoing certification testing.
+
+source "drivers/net/wireless/ath/ath5k/Kconfig"
+source "drivers/net/wireless/ath/ath9k/Kconfig"
+source "drivers/net/wireless/ath/carl9170/Kconfig"
+source "drivers/net/wireless/ath/ath6kl/Kconfig"
+source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
+source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
+
+endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
new file mode 100644
index 000000000..89f8d5979
--- /dev/null
+++ b/drivers/net/wireless/ath/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_ATH5K) += ath5k/
+obj-$(CONFIG_ATH9K_HW) += ath9k/
+obj-$(CONFIG_CARL9170) += carl9170/
+obj-$(CONFIG_ATH6KL) += ath6kl/
+obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
+obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
+
+obj-$(CONFIG_ATH_COMMON) += ath.o
+
+ath-objs := main.o \
+ regd.o \
+ hw.o \
+ key.o \
+ dfs_pattern_detector.o \
+ dfs_pri_detector.o
+
+ath-$(CONFIG_ATH_DEBUG) += debug.o
+ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
+
+ccflags-y += -D__CHECK_ENDIAN__
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 000000000..0d320cc77
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,8 @@
+config AR5523
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select ATH_COMMON
+ select FW_LOADER
+ ---help---
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 000000000..ebf7f3bf0
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 000000000..9dc8c7abf
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1800 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This driver is based on the uath driver written by Damien Bergamini for
+ * OpenBSD, who did black-box analysis of the Windows binary driver to find
+ * out how the hardware works. It contains a lot magic numbers because of
+ * that and only has minimal functionality.
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "ar5523.h"
+#include "ar5523_hw.h"
+
+/*
+ * Various supported device vendors/products.
+ * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
+ */
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar);
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
+
+static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
+ struct ar5523_tx_cmd *cmd)
+{
+ int dlen, olen;
+ __be32 *rp;
+
+ dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);
+
+ if (dlen < 0) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
+ dlen);
+
+ rp = (__be32 *)(hdr + 1);
+ if (dlen >= sizeof(u32)) {
+ olen = be32_to_cpu(rp[0]);
+ dlen -= sizeof(u32);
+ if (olen == 0) {
+ /* convention is 0 =>'s one word */
+ olen = sizeof(u32);
+ }
+ } else
+ olen = 0;
+
+ if (cmd->odata) {
+ if (cmd->olen < olen) {
+ ar5523_err(ar, "olen to small %d < %d\n",
+ cmd->olen, olen);
+ cmd->olen = 0;
+ cmd->res = -EOVERFLOW;
+ } else {
+ cmd->olen = olen;
+ memcpy(cmd->odata, &rp[1], olen);
+ cmd->res = 0;
+ }
+ }
+
+out:
+ complete(&cmd->done);
+}
+
+static void ar5523_cmd_rx_cb(struct urb *urb)
+{
+ struct ar5523 *ar = urb->context;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
+ int dlen;
+ u32 code, hdrlen;
+
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "RX USB error %d.\n", urb->status);
+ goto skip;
+ }
+
+ if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
+ ar5523_err(ar, "RX USB to short.\n");
+ goto skip;
+ }
+
+ ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
+ be32_to_cpu(hdr->code) & 0xff, hdr->priv);
+
+ code = be32_to_cpu(hdr->code);
+ hdrlen = be32_to_cpu(hdr->len);
+
+ switch (code & 0xff) {
+ default:
+ /* reply to a read command */
+ if (hdr->priv != AR5523_CMD_ID) {
+ ar5523_err(ar, "Unexpected command id: %02x\n",
+ code & 0xff);
+ goto skip;
+ }
+ ar5523_read_reply(ar, hdr, cmd);
+ break;
+
+ case WDCMSG_DEVICE_AVAIL:
+ ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
+ cmd->res = 0;
+ cmd->olen = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_SEND_COMPLETE:
+ ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
+ atomic_read(&ar->tx_nr_pending));
+ if (!test_bit(AR5523_HW_UP, &ar->flags))
+ ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
+ else {
+ mod_timer(&ar->tx_wd_timer,
+ jiffies + AR5523_TX_WD_TIMEOUT);
+ ar5523_data_tx_pkt_put(ar);
+
+ }
+ break;
+
+ case WDCMSG_TARGET_START:
+ /* This command returns a bogus id so it needs special
+ handling */
+ dlen = hdrlen - sizeof(*hdr);
+ if (dlen != (int)sizeof(u32)) {
+ ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
+ return;
+ }
+ memcpy(cmd->odata, hdr + 1, sizeof(u32));
+ cmd->olen = sizeof(u32);
+ cmd->res = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_STATS_UPDATE:
+ ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
+ break;
+ }
+
+skip:
+ ar5523_submit_rx_cmd(ar);
+}
+
+static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
+{
+ ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ar->rx_cmd_urb)
+ return -ENOMEM;
+
+ ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ GFP_KERNEL,
+ &ar->rx_cmd_urb->transfer_dma);
+ if (!ar->rx_cmd_buf) {
+ usb_free_urb(ar->rx_cmd_urb);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->rx_cmd_urb);
+}
+
+static void ar5523_free_rx_cmd(struct ar5523 *ar)
+{
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
+ usb_free_urb(ar->rx_cmd_urb);
+}
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar)
+{
+ int error;
+
+ usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
+ ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
+ AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
+ ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
+ if (error) {
+ if (error != -ENODEV)
+ ar5523_err(ar, "error %d when submitting rx urb\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+/*
+ * Command submitted cb
+ */
+static void ar5523_cmd_tx_cb(struct urb *urb)
+{
+ struct ar5523_tx_cmd *cmd = urb->context;
+ struct ar5523 *ar = cmd->ar;
+
+ if (urb->status) {
+ ar5523_err(ar, "Failed to TX command. Status = %d\n",
+ urb->status);
+ cmd->res = urb->status;
+ complete(&cmd->done);
+ return;
+ }
+
+ if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
+ cmd->res = 0;
+ complete(&cmd->done);
+ }
+}
+
+static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ struct ar5523_cmd_hdr *hdr;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ int xferlen, error;
+
+ /* always bulk-out a multiple of 4 bytes */
+ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
+
+ hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
+ memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
+ hdr->len = cpu_to_be32(xferlen);
+ hdr->code = cpu_to_be32(code);
+ hdr->priv = AR5523_CMD_ID;
+
+ if (flags & AR5523_CMD_FLAG_MAGIC)
+ hdr->magic = cpu_to_be32(1 << 24);
+ memcpy(hdr + 1, idata, ilen);
+
+ cmd->odata = odata;
+ cmd->olen = olen;
+ cmd->flags = flags;
+
+ ar5523_dbg(ar, "do cmd %02x\n", code);
+
+ usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
+ cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
+ cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "could not send command 0x%x, error=%d\n",
+ code, error);
+ return error;
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+ cmd->res = -ETIMEDOUT;
+ }
+ return cmd->res;
+}
+
+static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
+ int len, int flags)
+{
+ flags &= ~AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
+}
+
+static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ flags |= AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
+}
+
+static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(0); /* 0 = single write */
+ *(__be32 *)write.data = cpu_to_be32(val);
+
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ 3 * sizeof(u32), 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write register 0x%02x\n", reg);
+ return error;
+}
+
+static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
+ int len)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(len);
+ memcpy(write.data, data, len);
+
+ /* properly handle the case where len is zero (reset) */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
+ len, reg);
+ return error;
+}
+
+static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
+ int olen)
+{
+ int error;
+ __be32 which_be;
+
+ which_be = cpu_to_be32(which);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
+ &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
+ if (error != 0)
+ ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
+ return error;
+}
+
+static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
+{
+ int error;
+ __be32 cap_be, val_be;
+
+ cap_be = cpu_to_be32(cap);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be,
+ sizeof(cap_be), &val_be, sizeof(__be32),
+ AR5523_CMD_FLAG_MAGIC);
+ if (error != 0) {
+ ar5523_err(ar, "could not read capability %u\n", cap);
+ return error;
+ }
+ *val = be32_to_cpu(val_be);
+ return error;
+}
+
+static int ar5523_get_devcap(struct ar5523 *ar)
+{
+#define GETCAP(x) do { \
+ error = ar5523_get_capability(ar, x, &cap); \
+ if (error != 0) \
+ return error; \
+ ar5523_info(ar, "Cap: " \
+ "%s=0x%08x\n", #x, cap); \
+} while (0)
+ int error;
+ u32 cap;
+
+ /* collect device capabilities */
+ GETCAP(CAP_TARGET_VERSION);
+ GETCAP(CAP_TARGET_REVISION);
+ GETCAP(CAP_MAC_VERSION);
+ GETCAP(CAP_MAC_REVISION);
+ GETCAP(CAP_PHY_REVISION);
+ GETCAP(CAP_ANALOG_5GHz_REVISION);
+ GETCAP(CAP_ANALOG_2GHz_REVISION);
+
+ GETCAP(CAP_REG_DOMAIN);
+ GETCAP(CAP_REG_CAP_BITS);
+ GETCAP(CAP_WIRELESS_MODES);
+ GETCAP(CAP_CHAN_SPREAD_SUPPORT);
+ GETCAP(CAP_COMPRESS_SUPPORT);
+ GETCAP(CAP_BURST_SUPPORT);
+ GETCAP(CAP_FAST_FRAMES_SUPPORT);
+ GETCAP(CAP_CHAP_TUNING_SUPPORT);
+ GETCAP(CAP_TURBOG_SUPPORT);
+ GETCAP(CAP_TURBO_PRIME_SUPPORT);
+ GETCAP(CAP_DEVICE_TYPE);
+ GETCAP(CAP_WME_SUPPORT);
+ GETCAP(CAP_TOTAL_QUEUES);
+ GETCAP(CAP_CONNECTION_ID_MAX);
+
+ GETCAP(CAP_LOW_5GHZ_CHAN);
+ GETCAP(CAP_HIGH_5GHZ_CHAN);
+ GETCAP(CAP_LOW_2GHZ_CHAN);
+ GETCAP(CAP_HIGH_2GHZ_CHAN);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
+
+ GETCAP(CAP_CIPHER_AES_CCM);
+ GETCAP(CAP_CIPHER_TKIP);
+ GETCAP(CAP_MIC_TKIP);
+ return 0;
+}
+
+static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
+{
+ struct ar5523_cmd_ledsteady led;
+
+ led.lednum = cpu_to_be32(lednum);
+ led.ledmode = cpu_to_be32(ledmode);
+
+ ar5523_dbg(ar, "set %s led %s (steady)\n",
+ (lednum == UATH_LED_LINK) ? "link" : "activity",
+ ledmode ? "on" : "off");
+ return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
+ 0);
+}
+
+static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
+{
+ struct ar5523_cmd_rx_filter rxfilter;
+
+ rxfilter.bits = cpu_to_be32(bits);
+ rxfilter.op = cpu_to_be32(op);
+
+ ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
+ return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
+ sizeof(rxfilter), 0);
+}
+
+static int ar5523_reset_tx_queues(struct ar5523 *ar)
+{
+ __be32 qid = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "resetting Tx queue\n");
+ return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
+ &qid, sizeof(qid), 0);
+}
+
+static int ar5523_set_chan(struct ar5523 *ar)
+{
+ struct ieee80211_conf *conf = &ar->hw->conf;
+
+ struct ar5523_cmd_reset reset;
+
+ memset(&reset, 0, sizeof(reset));
+ reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
+ reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
+ reset.freq = cpu_to_be32(conf->chandef.chan->center_freq);
+ reset.maxrdpower = cpu_to_be32(50); /* XXX */
+ reset.channelchange = cpu_to_be32(1);
+ reset.keeprccontent = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
+ be32_to_cpu(reset.flags),
+ conf->chandef.chan->center_freq);
+ return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
+}
+
+static int ar5523_queue_init(struct ar5523 *ar)
+{
+ struct ar5523_cmd_txq_setup qinfo;
+
+ ar5523_dbg(ar, "setting up Tx queue\n");
+ qinfo.qid = cpu_to_be32(0);
+ qinfo.len = cpu_to_be32(sizeof(qinfo.attr));
+ qinfo.attr.priority = cpu_to_be32(0); /* XXX */
+ qinfo.attr.aifs = cpu_to_be32(3);
+ qinfo.attr.logcwmin = cpu_to_be32(4);
+ qinfo.attr.logcwmax = cpu_to_be32(10);
+ qinfo.attr.bursttime = cpu_to_be32(0);
+ qinfo.attr.mode = cpu_to_be32(0);
+ qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */
+ return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
+ sizeof(qinfo), 0);
+}
+
+static int ar5523_switch_chan(struct ar5523 *ar)
+{
+ int error;
+
+ error = ar5523_set_chan(ar);
+ if (error) {
+ ar5523_err(ar, "could not set chan, error %d\n", error);
+ goto out_err;
+ }
+
+ /* reset Tx rings */
+ error = ar5523_reset_tx_queues(ar);
+ if (error) {
+ ar5523_err(ar, "could not reset Tx queues, error %d\n",
+ error);
+ goto out_err;
+ }
+ /* set Tx rings WME properties */
+ error = ar5523_queue_init(ar);
+ if (error)
+ ar5523_err(ar, "could not init wme, error %d\n", error);
+
+out_err:
+ return error;
+}
+
+static void ar5523_rx_data_put(struct ar5523 *ar,
+ struct ar5523_rx_data *data)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_free);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+}
+
+static void ar5523_data_rx_cb(struct urb *urb)
+{
+ struct ar5523_rx_data *data = urb->context;
+ struct ar5523 *ar = data->ar;
+ struct ar5523_rx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_rx_status *rx_status;
+ u32 rxlen;
+ int usblen = urb->actual_length;
+ int hdrlen, pad;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "%s: USB err: %d\n", __func__,
+ urb->status);
+ goto skip;
+ }
+
+ if (usblen < AR5523_MIN_RXBUFSZ) {
+ ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
+ goto skip;
+ }
+
+ chunk = (struct ar5523_chunk *) data->skb->data;
+
+ if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
+ chunk->seqnum != 0) {
+ ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
+ chunk->seqnum, chunk->flags,
+ be16_to_cpu(chunk->length));
+ goto skip;
+ }
+
+ /* Rx descriptor is located at the end, 32-bit aligned */
+ desc = (struct ar5523_rx_desc *)
+ (data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
+
+ rxlen = be32_to_cpu(desc->len);
+ if (rxlen > ar->rxbufsz) {
+ ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
+ be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ if (!rxlen) {
+ ar5523_dbg(ar, "RX: rxlen is 0\n");
+ goto skip;
+ }
+
+ if (be32_to_cpu(desc->status) != 0) {
+ ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
+ be32_to_cpu(desc->status), be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ skb_reserve(data->skb, sizeof(*chunk));
+ skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
+ if (!IS_ALIGNED(hdrlen, 4)) {
+ ar5523_dbg(ar, "eek, alignment workaround activated\n");
+ pad = ALIGN(hdrlen, 4) - hdrlen;
+ memmove(data->skb->data + pad, data->skb->data, hdrlen);
+ skb_pull(data->skb, pad);
+ skb_put(data->skb, pad);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(data->skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = be32_to_cpu(desc->channel);
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->signal = -95 + be32_to_cpu(desc->rssi);
+
+ ieee80211_rx_irqsafe(hw, data->skb);
+ data->skb = NULL;
+
+skip:
+ if (data->skb) {
+ dev_kfree_skb_irq(data->skb);
+ data->skb = NULL;
+ }
+
+ ar5523_rx_data_put(ar, data);
+ if (atomic_inc_return(&ar->rx_data_free_cnt) >=
+ AR5523_RX_DATA_REFILL_COUNT &&
+ test_bit(AR5523_HW_UP, &ar->flags))
+ queue_work(ar->wq, &ar->rx_refill_work);
+}
+
+static void ar5523_rx_refill_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+
+ if (!list_empty(&ar->rx_data_free))
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ goto done;
+
+ data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
+ if (!data->skb) {
+ ar5523_err(ar, "could not allocate rx skbuff\n");
+ return;
+ }
+
+ usb_fill_bulk_urb(data->urb, ar->dev,
+ ar5523_data_rx_pipe(ar->dev), data->skb->data,
+ ar->rxbufsz, ar5523_data_rx_cb, data);
+
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_used);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+ atomic_dec(&ar->rx_data_free_cnt);
+
+ error = usb_submit_urb(data->urb, GFP_KERNEL);
+ if (error) {
+ kfree_skb(data->skb);
+ if (error != -ENODEV)
+ ar5523_err(ar, "Err sending rx data urb %d\n",
+ error);
+ ar5523_rx_data_put(ar, data);
+ atomic_inc(&ar->rx_data_free_cnt);
+ return;
+ }
+
+ } while (true);
+done:
+ return;
+}
+
+static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ if (!list_empty(&ar->rx_data_used))
+ data = (struct ar5523_rx_data *) ar->rx_data_used.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ usb_kill_urb(data->urb);
+ list_move(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ } while (data);
+}
+
+static void ar5523_free_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+
+ ar5523_cancel_rx_bufs(ar);
+ while (!list_empty(&ar->rx_data_free)) {
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ list_del(&data->list);
+ usb_free_urb(data->urb);
+ }
+}
+
+static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
+{
+ int i;
+
+ for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
+ struct ar5523_rx_data *data = &ar->rx_data[i];
+
+ data->ar = ar;
+ data->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!data->urb) {
+ ar5523_err(ar, "could not allocate rx data urb\n");
+ goto err;
+ }
+ list_add_tail(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ }
+ return 0;
+
+err:
+ ar5523_free_rx_bufs(ar);
+ return -ENOMEM;
+}
+
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
+{
+ atomic_dec(&ar->tx_nr_total);
+ if (!atomic_dec_return(&ar->tx_nr_pending)) {
+ del_timer(&ar->tx_wd_timer);
+ wake_up(&ar->tx_flush_waitq);
+ }
+
+ if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
+ ar5523_dbg(ar, "restart tx queue\n");
+ ieee80211_wake_queues(ar->hw);
+ }
+}
+
+static void ar5523_data_tx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = data->ar;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (urb->status) {
+ ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
+ ar5523_data_tx_pkt_put(ar);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+ }
+ usb_free_urb(urb);
+}
+
+static void ar5523_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = hw->priv;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "tx called\n");
+ if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
+ ar5523_dbg(ar, "tx queue full\n");
+ ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+ ieee80211_stop_queues(hw);
+ }
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_pending);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ ieee80211_queue_work(ar->hw, &ar->tx_work);
+}
+
+static void ar5523_tx_work_locked(struct ar5523 *ar)
+{
+ struct ar5523_tx_data *data;
+ struct ar5523_tx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_tx_info *txi;
+ struct urb *urb;
+ struct sk_buff *skb;
+ int error = 0, paylen;
+ u32 txqid;
+ unsigned long flags;
+
+ BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ if (!list_empty(&ar->tx_queue_pending)) {
+ data = (struct ar5523_tx_data *)
+ ar->tx_queue_pending.next;
+ list_del(&data->list);
+ } else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ txi = container_of((void *)data, struct ieee80211_tx_info,
+ driver_data);
+ txqid = 0;
+
+ skb = container_of((void *)txi, struct sk_buff, cb);
+ paylen = skb->len;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ar5523_err(ar, "Failed to allocate TX urb\n");
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ data->ar = ar;
+ data->urb = urb;
+
+ desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
+ chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
+
+ chunk->seqnum = 0;
+ chunk->flags = UATH_CFLAGS_FINAL;
+ chunk->length = cpu_to_be16(skb->len);
+
+ desc->msglen = cpu_to_be32(skb->len);
+ desc->msgid = AR5523_DATA_ID;
+ desc->buflen = cpu_to_be32(paylen);
+ desc->type = cpu_to_be32(WDCMSG_SEND);
+ desc->flags = cpu_to_be32(UATH_TX_NOTIFY);
+
+ if (test_bit(AR5523_CONNECTED, &ar->flags))
+ desc->connid = cpu_to_be32(AR5523_ID_BSS);
+ else
+ desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
+
+ if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ txqid |= UATH_TXQID_MINRATE;
+
+ desc->txqid = cpu_to_be32(txqid);
+
+ urb->transfer_flags = URB_ZERO_PACKET;
+ usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
+ skb->data, skb->len, ar5523_data_tx_cb, skb);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_submitted);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
+ atomic_inc(&ar->tx_nr_pending);
+
+ ar5523_dbg(ar, "TX Frame (%d pending)\n",
+ atomic_read(&ar->tx_nr_pending));
+ error = usb_submit_urb(urb, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "error %d when submitting tx urb\n",
+ error);
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ atomic_dec(&ar->tx_nr_pending);
+ ar5523_data_tx_pkt_put(ar);
+ usb_free_urb(urb);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } while (true);
+}
+
+static void ar5523_tx_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+ ar5523_tx_work_locked(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_tx_wd_timer(unsigned long arg)
+{
+ struct ar5523 *ar = (struct ar5523 *) arg;
+
+ ar5523_dbg(ar, "TX watchdog timer triggered\n");
+ ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
+}
+
+static void ar5523_tx_wd_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
+
+ /* Occasionally the TX queues stop responding. The only way to
+ * recover seems to be to reset the dongle.
+ */
+
+ mutex_lock(&ar->mutex);
+ ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+
+ ar5523_err(ar, "Will restart dongle.\n");
+ ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_flush_tx(struct ar5523 *ar)
+{
+ ar5523_tx_work_locked(ar);
+
+ /* Don't waste time trying to flush if USB is disconnected */
+ if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
+ return;
+ if (!wait_event_timeout(ar->tx_flush_waitq,
+ !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
+ ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+}
+
+static void ar5523_free_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
+ cmd->urb_tx->transfer_dma);
+ usb_free_urb(cmd->urb_tx);
+}
+
+static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ cmd->ar = ar;
+ init_completion(&cmd->done);
+
+ cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+ if (!cmd->urb_tx) {
+ ar5523_err(ar, "could not allocate urb\n");
+ return -ENOMEM;
+ }
+ cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
+ GFP_KERNEL,
+ &cmd->urb_tx->transfer_dma);
+ if (!cmd->buf_tx) {
+ usb_free_urb(cmd->urb_tx);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * This function is called periodically (every second) when associated to
+ * query device statistics.
+ */
+static void ar5523_stat_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+
+ /*
+ * Send request for statistics asynchronously once a second. This
+ * seems to be important. Throughput is a lot better if this is done.
+ */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
+ if (error)
+ ar5523_err(ar, "could not query stats, error %d\n", error);
+ mutex_unlock(&ar->mutex);
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
+}
+
+/*
+ * Interface routines to the mac80211 stack.
+ */
+static int ar5523_start(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+ __be32 val;
+
+ ar5523_dbg(ar, "start called\n");
+
+ mutex_lock(&ar->mutex);
+ val = cpu_to_be32(0);
+ ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
+
+ /* set MAC address */
+ ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
+ ETH_ALEN);
+
+ /* XXX honor net80211 state */
+ ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
+ ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
+ ar5523_config(ar, CFG_ABOLT, 0x0000003f);
+ ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
+
+ ar5523_config(ar, CFG_SERVICE_TYPE, 1);
+ ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
+ ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
+ ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
+ ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
+ ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
+ ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
+
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
+ &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
+ if (error) {
+ ar5523_dbg(ar, "could not start target, error %d\n", error);
+ goto err;
+ }
+ ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
+ be32_to_cpu(val));
+
+ ar5523_switch_chan(ar);
+
+ val = cpu_to_be32(TARGET_DEVICE_AWAKE);
+ ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
+ /* XXX? check */
+ ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
+
+ set_bit(AR5523_HW_UP, &ar->flags);
+ queue_work(ar->wq, &ar->rx_refill_work);
+
+ /* enable Rx */
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar,
+ UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
+ UATH_FILTER_OP_SET);
+
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
+ ar5523_dbg(ar, "start OK\n");
+
+err:
+ mutex_unlock(&ar->mutex);
+ return error;
+}
+
+static void ar5523_stop(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "stop called\n");
+
+ cancel_delayed_work_sync(&ar->stat_work);
+ mutex_lock(&ar->mutex);
+ clear_bit(AR5523_HW_UP, &ar->flags);
+
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
+
+ ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
+
+ del_timer_sync(&ar->tx_wd_timer);
+ cancel_work_sync(&ar->tx_wd_work);
+ cancel_work_sync(&ar->rx_refill_work);
+ ar5523_cancel_rx_bufs(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ar5523 *ar = hw->priv;
+ int ret;
+
+ ar5523_dbg(ar, "set_rts_threshold called\n");
+ mutex_lock(&ar->mutex);
+
+ ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "flush called\n");
+ ar5523_flush_tx(ar);
+}
+
+static int ar5523_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "add interface called\n");
+
+ if (ar->vif) {
+ ar5523_dbg(ar, "invalid add_interface\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ar->vif = vif;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void ar5523_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "remove interface called\n");
+ ar->vif = NULL;
+}
+
+static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "config called\n");
+ mutex_lock(&ar->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ar5523_dbg(ar, "Do channel switch\n");
+ ar5523_flush_tx(ar);
+ ar5523_switch_chan(ar);
+ }
+ mutex_unlock(&ar->mutex);
+ return 0;
+}
+
+static int ar5523_get_wlan_mode(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_supported_band *band;
+ int bit;
+ struct ieee80211_sta *sta;
+ u32 sta_rate_set;
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ if (!sta) {
+ ar5523_info(ar, "STA not found!\n");
+ return WLAN_MODE_11b;
+ }
+ sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (sta_rate_set & 1) {
+ int rate = band->bitrates[bit].bitrate;
+ switch (rate) {
+ case 60:
+ case 90:
+ case 120:
+ case 180:
+ case 240:
+ case 360:
+ case 480:
+ case 540:
+ return WLAN_MODE_11g;
+ }
+ }
+ sta_rate_set >>= 1;
+ }
+ return WLAN_MODE_11b;
+}
+
+static void ar5523_create_rateset(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ar5523_cmd_rateset *rs,
+ bool basic)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta *sta;
+ int bit, i = 0;
+ u32 sta_rate_set, basic_rate_set;
+
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ basic_rate_set = bss_conf->basic_rates;
+ if (!sta) {
+ ar5523_info(ar, "STA not found. Cannot set rates\n");
+ sta_rate_set = bss_conf->basic_rates;
+ } else
+ sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+
+ ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ BUG_ON(i >= AR5523_MAX_NRATES);
+ ar5523_dbg(ar, "Considering rate %d : %d\n",
+ band->bitrates[bit].hw_value, sta_rate_set & 1);
+ if (sta_rate_set & 1) {
+ rs->set[i] = band->bitrates[bit].hw_value;
+ if (basic_rate_set & 1 && basic)
+ rs->set[i] |= 0x80;
+ i++;
+ }
+ sta_rate_set >>= 1;
+ basic_rate_set >>= 1;
+ }
+
+ rs->length = i;
+}
+
+static int ar5523_set_basic_rates(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_rates rates;
+
+ memset(&rates, 0, sizeof(rates));
+ rates.connid = cpu_to_be32(2); /* XXX */
+ rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+ ar5523_create_rateset(ar, bss, &rates.rateset, true);
+
+ return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
+ sizeof(rates), 0);
+}
+
+static int ar5523_create_connection(struct ar5523 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_create_connection create;
+ int wlan_mode;
+
+ memset(&create, 0, sizeof(create));
+ create.connid = cpu_to_be32(2);
+ create.bssid = cpu_to_be32(0);
+ /* XXX packed or not? */
+ create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+
+ ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
+
+ wlan_mode = ar5523_get_wlan_mode(ar, bss);
+ create.connattr.wlanmode = cpu_to_be32(wlan_mode);
+
+ return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
+ sizeof(create), 0);
+}
+
+static int ar5523_write_associd(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_set_associd associd;
+
+ memset(&associd, 0, sizeof(associd));
+ associd.defaultrateix = cpu_to_be32(0); /* XXX */
+ associd.associd = cpu_to_be32(bss->aid);
+ associd.timoffset = cpu_to_be32(0x3b); /* XXX */
+ memcpy(associd.bssid, bss->bssid, ETH_ALEN);
+ return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
+ sizeof(associd), 0);
+}
+
+static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss,
+ u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+
+ ar5523_dbg(ar, "bss_info_changed called\n");
+ mutex_lock(&ar->mutex);
+
+ if (!(changed & BSS_CHANGED_ASSOC))
+ goto out_unlock;
+
+ if (bss->assoc) {
+ error = ar5523_create_connection(ar, vif, bss);
+ if (error) {
+ ar5523_err(ar, "could not create connection\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_set_basic_rates(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set negotiated rate set\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_write_associd(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set association\n");
+ goto out_unlock;
+ }
+
+ /* turn link LED on */
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
+ set_bit(AR5523_CONNECTED, &ar->flags);
+ ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
+
+ } else {
+ cancel_delayed_work(&ar->stat_work);
+ clear_bit(AR5523_CONNECTED, &ar->flags);
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ }
+
+out_unlock:
+ mutex_unlock(&ar->mutex);
+
+}
+
+#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_OTHER_BSS)
+
+static void ar5523_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ar5523 *ar = hw->priv;
+ u32 filter = 0;
+
+ ar5523_dbg(ar, "configure_filter called\n");
+ mutex_lock(&ar->mutex);
+ ar5523_flush_tx(ar);
+
+ *total_flags &= AR5523_SUPPORTED_FILTERS;
+
+ /* The filters seems strange. UATH_FILTER_RX_BCAST and
+ * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
+ * The only way I have found to get [mb]cast frames seems to be
+ * to set UATH_FILTER_RX_PROM. */
+ filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
+ UATH_FILTER_RX_PROM;
+
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
+
+ mutex_unlock(&ar->mutex);
+}
+
+static const struct ieee80211_ops ar5523_ops = {
+ .start = ar5523_start,
+ .stop = ar5523_stop,
+ .tx = ar5523_tx,
+ .set_rts_threshold = ar5523_set_rts_threshold,
+ .add_interface = ar5523_add_interface,
+ .remove_interface = ar5523_remove_interface,
+ .config = ar5523_hwconfig,
+ .bss_info_changed = ar5523_bss_info_changed,
+ .configure_filter = ar5523_configure_filter,
+ .flush = ar5523_flush,
+};
+
+static int ar5523_host_available(struct ar5523 *ar)
+{
+ struct ar5523_cmd_host_available setup;
+
+ /* inform target the host is available */
+ setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
+ setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
+ setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
+ setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
+ return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
+ &setup, sizeof(setup), NULL, 0, 0);
+}
+
+static int ar5523_get_devstatus(struct ar5523 *ar)
+{
+ u8 macaddr[ETH_ALEN];
+ int error;
+
+ /* retrieve MAC address */
+ error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
+ if (error) {
+ ar5523_err(ar, "could not read MAC address\n");
+ return error;
+ }
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
+
+ error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
+ &ar->serial[0], sizeof(ar->serial));
+ if (error) {
+ ar5523_err(ar, "could not read device serial number\n");
+ return error;
+ }
+ return 0;
+}
+
+#define AR5523_SANE_RXBUFSZ 2000
+
+static int ar5523_get_max_rxsz(struct ar5523 *ar)
+{
+ int error;
+ __be32 rxsize;
+
+ /* Get max rx size */
+ error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
+ sizeof(rxsize));
+ if (error != 0) {
+ ar5523_err(ar, "could not read max RX size\n");
+ return error;
+ }
+
+ ar->rxbufsz = be32_to_cpu(rxsize);
+
+ if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
+ ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
+ AR5523_SANE_RXBUFSZ);
+ ar->rxbufsz = AR5523_SANE_RXBUFSZ;
+ }
+
+ ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
+ return 0;
+}
+
+/*
+ * This is copied from rtl818x, but we should probably move this
+ * to common code as in OpenBSD.
+ */
+static const struct ieee80211_rate ar5523_rates[] = {
+ { .bitrate = 10, .hw_value = 2, },
+ { .bitrate = 20, .hw_value = 4 },
+ { .bitrate = 55, .hw_value = 11, },
+ { .bitrate = 110, .hw_value = 22, },
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+};
+
+static const struct ieee80211_channel ar5523_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+
+static int ar5523_init_modes(struct ar5523 *ar)
+{
+ BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
+ BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
+
+ memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
+ memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
+
+ ar->band.band = IEEE80211_BAND_2GHZ;
+ ar->band.channels = ar->channels;
+ ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
+ ar->band.bitrates = ar->rates;
+ ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+ return 0;
+}
+
+/*
+ * Load the MIPS R4000 microcode into the device. Once the image is loaded,
+ * the device will detach itself from the bus and reattach later with a new
+ * product Id (a la ezusb).
+ */
+static int ar5523_load_firmware(struct usb_device *dev)
+{
+ struct ar5523_fwblock *txblock, *rxblock;
+ const struct firmware *fw;
+ void *fwbuf;
+ int len, offset;
+ int foolen; /* XXX(hch): handle short transfers */
+ int error = -ENXIO;
+
+ if (reject_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
+ dev_err(&dev->dev, "no firmware found: %s\n",
+ AR5523_FIRMWARE_FILE);
+ return -ENOENT;
+ }
+
+ txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ if (!txblock)
+ goto out;
+
+ rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
+ if (!rxblock)
+ goto out_free_txblock;
+
+ fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
+ if (!fwbuf)
+ goto out_free_rxblock;
+
+ memset(txblock, 0, sizeof(struct ar5523_fwblock));
+ txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
+ txblock->total = cpu_to_be32(fw->size);
+
+ offset = 0;
+ len = fw->size;
+ while (len > 0) {
+ int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
+
+ txblock->remain = cpu_to_be32(len - mlen);
+ txblock->len = cpu_to_be32(mlen);
+
+ /* send firmware block meta-data */
+ error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
+ txblock, sizeof(*txblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block info\n");
+ goto out_free_fwbuf;
+ }
+
+ /* send firmware block data */
+ memcpy(fwbuf, fw->data + offset, mlen);
+ error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
+ fwbuf, mlen, &foolen,
+ AR5523_DATA_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block data\n");
+ goto out_free_fwbuf;
+ }
+
+ /* wait for ack from firmware */
+ error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
+ rxblock, sizeof(*rxblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not read firmware answer\n");
+ goto out_free_fwbuf;
+ }
+
+ len -= mlen;
+ offset += mlen;
+ }
+
+ /*
+ * Set the error to -ENXIO to make sure we continue probing for
+ * a driver.
+ */
+ error = -ENXIO;
+
+ out_free_fwbuf:
+ kfree(fwbuf);
+ out_free_rxblock:
+ kfree(rxblock);
+ out_free_txblock:
+ kfree(txblock);
+ out:
+ release_firmware(fw);
+ return error;
+}
+
+static int ar5523_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ieee80211_hw *hw;
+ struct ar5523 *ar;
+ int error = -ENOMEM;
+
+ /*
+ * Load firmware if the device requires it. This will return
+ * -ENXIO on success and we'll get called back afer the usb
+ * id changes to indicate that the firmware is present.
+ */
+ if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
+ return ar5523_load_firmware(dev);
+
+
+ hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
+ if (!hw)
+ goto out;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->dev = dev;
+ mutex_init(&ar->mutex);
+
+ INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
+ init_timer(&ar->tx_wd_timer);
+ setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
+ INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
+ INIT_WORK(&ar->tx_work, ar5523_tx_work);
+ INIT_LIST_HEAD(&ar->tx_queue_pending);
+ INIT_LIST_HEAD(&ar->tx_queue_submitted);
+ spin_lock_init(&ar->tx_data_list_lock);
+ atomic_set(&ar->tx_nr_total, 0);
+ atomic_set(&ar->tx_nr_pending, 0);
+ init_waitqueue_head(&ar->tx_flush_waitq);
+
+ atomic_set(&ar->rx_data_free_cnt, 0);
+ INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
+ INIT_LIST_HEAD(&ar->rx_data_free);
+ INIT_LIST_HEAD(&ar->rx_data_used);
+ spin_lock_init(&ar->rx_data_list_lock);
+
+ ar->wq = create_singlethread_workqueue("ar5523");
+ if (!ar->wq) {
+ ar5523_err(ar, "Could not create wq\n");
+ goto out_free_ar;
+ }
+
+ error = ar5523_alloc_rx_bufs(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx buffers\n");
+ goto out_free_wq;
+ }
+
+ error = ar5523_alloc_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx command buffers\n");
+ goto out_free_rx_bufs;
+ }
+
+ error = ar5523_alloc_tx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate tx command buffers\n");
+ goto out_free_rx_cmd;
+ }
+
+ error = ar5523_submit_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Failed to submit rx cmd\n");
+ goto out_free_tx_cmd;
+ }
+
+ /*
+ * We're now ready to send/receive firmware commands.
+ */
+ error = ar5523_host_available(ar);
+ if (error) {
+ ar5523_err(ar, "could not initialize adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_max_rxsz(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devcap(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devstatus(ar);
+ if (error != 0) {
+ ar5523_err(ar, "could not get device status\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
+ (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
+
+ ar->vif = NULL;
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL;
+ hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
+ sizeof(struct ar5523_chunk);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->queues = 1;
+
+ error = ar5523_init_modes(ar);
+ if (error)
+ goto out_cancel_rx_cmd;
+
+ usb_set_intfdata(intf, hw);
+
+ error = ieee80211_register_hw(hw);
+ if (error) {
+ ar5523_err(ar, "could not register device\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "Found and initialized AR5523 device\n");
+ return 0;
+
+out_cancel_rx_cmd:
+ ar5523_cancel_rx_cmd(ar);
+out_free_tx_cmd:
+ ar5523_free_tx_cmd(ar);
+out_free_rx_cmd:
+ ar5523_free_rx_cmd(ar);
+out_free_rx_bufs:
+ ar5523_free_rx_bufs(ar);
+out_free_wq:
+ destroy_workqueue(ar->wq);
+out_free_ar:
+ ieee80211_free_hw(hw);
+out:
+ return error;
+}
+
+static void ar5523_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "detaching\n");
+ set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
+
+ ieee80211_unregister_hw(hw);
+
+ ar5523_cancel_rx_cmd(ar);
+ ar5523_free_tx_cmd(ar);
+ ar5523_free_rx_cmd(ar);
+ ar5523_free_rx_bufs(ar);
+
+ destroy_workqueue(ar->wq);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+}
+
+#define AR5523_DEVICE_UG(vendor, device) \
+ { USB_DEVICE((vendor), (device)) }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_PRE_FIRMWARE }
+#define AR5523_DEVICE_UX(vendor, device) \
+ { USB_DEVICE((vendor), (device)), \
+ .driver_info = AR5523_FLAG_ABG }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
+
+static struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */
+ AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */
+ AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */
+ AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
+ AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
+ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
+ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
+ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
+ (CyberTAN Technology) */
+ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
+ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
+ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
+ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
+ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
+ AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */
+ AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ar5523_id_table);
+
+static struct usb_driver ar5523_driver = {
+ .name = "ar5523",
+ .id_table = ar5523_id_table,
+ .probe = ar5523_probe,
+ .disconnect = ar5523_disconnect,
+};
+
+module_usb_driver(ar5523_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 000000000..dc893ac75
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
+#define AR5523_FLAG_ABG (1 << 1)
+
+#define AR5523_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+
+#define AR5523_CMD_TX_PIPE 0x01
+#define AR5523_DATA_TX_PIPE 0x02
+#define AR5523_CMD_RX_PIPE 0x81
+#define AR5523_DATA_RX_PIPE 0x82
+
+#define ar5523_cmd_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
+#define ar5523_data_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
+#define ar5523_cmd_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
+#define ar5523_data_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
+
+#define AR5523_DATA_TIMEOUT 10000
+#define AR5523_CMD_TIMEOUT 1000
+
+#define AR5523_TX_DATA_COUNT 8
+#define AR5523_TX_DATA_RESTART_COUNT 2
+#define AR5523_RX_DATA_COUNT 16
+#define AR5523_RX_DATA_REFILL_COUNT 8
+
+#define AR5523_CMD_ID 1
+#define AR5523_DATA_ID 2
+
+#define AR5523_TX_WD_TIMEOUT (HZ * 2)
+#define AR5523_FLUSH_TIMEOUT (HZ * 3)
+
+enum AR5523_flags {
+ AR5523_HW_UP,
+ AR5523_USB_DISCONNECTED,
+ AR5523_CONNECTED
+};
+
+struct ar5523_tx_cmd {
+ struct ar5523 *ar;
+ struct urb *urb_tx;
+ void *buf_tx;
+ void *odata;
+ int olen;
+ int flags;
+ int res;
+ struct completion done;
+};
+
+/* This struct is placed in tx_info->driver_data. It must not be larger
+ * than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
+ */
+struct ar5523_tx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+};
+
+struct ar5523_rx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+ struct sk_buff *skb;
+};
+
+struct ar5523 {
+ struct usb_device *dev;
+ struct ieee80211_hw *hw;
+
+ unsigned long flags;
+ struct mutex mutex;
+ struct workqueue_struct *wq;
+
+ struct ar5523_tx_cmd tx_cmd;
+
+ struct delayed_work stat_work;
+
+ struct timer_list tx_wd_timer;
+ struct work_struct tx_wd_work;
+ struct work_struct tx_work;
+ struct list_head tx_queue_pending;
+ struct list_head tx_queue_submitted;
+ spinlock_t tx_data_list_lock;
+ wait_queue_head_t tx_flush_waitq;
+
+ /* Queued + Submitted TX frames */
+ atomic_t tx_nr_total;
+
+ /* Submitted TX frames */
+ atomic_t tx_nr_pending;
+
+ void *rx_cmd_buf;
+ struct urb *rx_cmd_urb;
+
+ struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
+ spinlock_t rx_data_list_lock;
+ struct list_head rx_data_free;
+ struct list_head rx_data_used;
+ atomic_t rx_data_free_cnt;
+
+ struct work_struct rx_refill_work;
+
+ unsigned int rxbufsz;
+ u8 serial[16];
+
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct ieee80211_vif *vif;
+};
+
+/* flags for sending firmware commands */
+#define AR5523_CMD_FLAG_READ (1 << 1)
+#define AR5523_CMD_FLAG_MAGIC (1 << 2)
+
+#define ar5523_dbg(ar, format, arg...) \
+ dev_dbg(&(ar)->dev->dev, format, ## arg)
+
+/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
+ * fail. Instead of dealing with them in every possible place just surpress
+ * any messages on USB disconnect.
+ */
+#define ar5523_err(ar, format, arg...) \
+do { \
+ if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
+ dev_err(&(ar)->dev->dev, format, ## arg); \
+ } \
+} while (0)
+#define ar5523_info(ar, format, arg...) \
+ dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 000000000..0fe2c803f
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* all fields are big endian */
+struct ar5523_fwblock {
+ __be32 flags;
+#define AR5523_WRITE_BLOCK (1 << 4)
+
+ __be32 len;
+#define AR5523_MAX_FWBLOCK_SIZE 2048
+
+ __be32 total;
+ __be32 remain;
+ __be32 rxtotal;
+ __be32 pad[123];
+} __packed;
+
+#define AR5523_MAX_RXCMDSZ 1024
+#define AR5523_MAX_TXCMDSZ 1024
+
+struct ar5523_cmd_hdr {
+ __be32 len;
+ __be32 code;
+/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
+/* messages from Host -> Target */
+#define WDCMSG_HOST_AVAILABLE 0x01
+#define WDCMSG_BIND 0x02
+#define WDCMSG_TARGET_RESET 0x03
+#define WDCMSG_TARGET_GET_CAPABILITY 0x04
+#define WDCMSG_TARGET_SET_CONFIG 0x05
+#define WDCMSG_TARGET_GET_STATUS 0x06
+#define WDCMSG_TARGET_GET_STATS 0x07
+#define WDCMSG_TARGET_START 0x08
+#define WDCMSG_TARGET_STOP 0x09
+#define WDCMSG_TARGET_ENABLE 0x0a
+#define WDCMSG_TARGET_DISABLE 0x0b
+#define WDCMSG_CREATE_CONNECTION 0x0c
+#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
+#define WDCMSG_DELETE_CONNECT 0x0e
+#define WDCMSG_SEND 0x0f
+#define WDCMSG_FLUSH 0x10
+/* messages from Target -> Host */
+#define WDCMSG_STATS_UPDATE 0x11
+#define WDCMSG_BMISS 0x12
+#define WDCMSG_DEVICE_AVAIL 0x13
+#define WDCMSG_SEND_COMPLETE 0x14
+#define WDCMSG_DATA_AVAIL 0x15
+#define WDCMSG_SET_PWR_MODE 0x16
+#define WDCMSG_BMISS_ACK 0x17
+#define WDCMSG_SET_LED_STEADY 0x18
+#define WDCMSG_SET_LED_BLINK 0x19
+/* more messages */
+#define WDCMSG_SETUP_BEACON_DESC 0x1a
+#define WDCMSG_BEACON_INIT 0x1b
+#define WDCMSG_RESET_KEY_CACHE 0x1c
+#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
+#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
+#define WDCMSG_SET_DECOMP_MASK 0x1f
+#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
+#define WDCMSG_SET_LED_STATE 0x21
+#define WDCMSG_WRITE_ASSOCID 0x22
+#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
+#define WDCMSG_GET_TSF 0x24
+#define WDCMSG_RESET_TSF 0x25
+#define WDCMSG_SET_ADHOC_MODE 0x26
+#define WDCMSG_SET_BASIC_RATE 0x27
+#define WDCMSG_MIB_CONTROL 0x28
+#define WDCMSG_GET_CHANNEL_DATA 0x29
+#define WDCMSG_GET_CUR_RSSI 0x2a
+#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
+#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
+#define WDCMSG_SET_POWER_MODE 0x30
+#define WDCMSG_SETUP_PSPOLL_DESC 0x31
+#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
+#define WDCMSG_RX_FILTER 0x33
+#define WDCMSG_PER_CALIBRATION 0x34
+#define WDCMSG_RESET 0x35
+#define WDCMSG_DISABLE 0x36
+#define WDCMSG_PHY_DISABLE 0x37
+#define WDCMSG_SET_TX_POWER_LIMIT 0x38
+#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
+#define WDCMSG_SETUP_TX_QUEUE 0x3a
+#define WDCMSG_RELEASE_TX_QUEUE 0x3b
+#define WDCMSG_SET_DEFAULT_KEY 0x43
+
+ __u32 priv; /* driver private data,
+ don't care about endianess */
+ __be32 magic;
+ __be32 reserved2[4];
+};
+
+struct ar5523_cmd_host_available {
+ __be32 sw_ver_major;
+ __be32 sw_ver_minor;
+ __be32 sw_ver_patch;
+ __be32 sw_ver_build;
+} __packed;
+
+#define ATH_SW_VER_MAJOR 1
+#define ATH_SW_VER_MINOR 5
+#define ATH_SW_VER_PATCH 0
+#define ATH_SW_VER_BUILD 9999
+
+struct ar5523_chunk {
+ u8 seqnum; /* sequence number for ordering */
+ u8 flags;
+#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
+#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
+#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
+ __be16 length; /* chunk size in bytes */
+ /* chunk data follows */
+} __packed;
+
+/*
+ * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
+ */
+struct ar5523_rx_desc {
+ __be32 len; /* msg length including header */
+ __be32 code; /* WDCMSG_DATA_AVAIL */
+ __be32 gennum; /* generation number */
+ __be32 status; /* start of RECEIVE_INFO */
+#define UATH_STATUS_OK 0
+#define UATH_STATUS_STOP_IN_PROGRESS 1
+#define UATH_STATUS_CRC_ERR 2
+#define UATH_STATUS_PHY_ERR 3
+#define UATH_STATUS_DECRYPT_CRC_ERR 4
+#define UATH_STATUS_DECRYPT_MIC_ERR 5
+#define UATH_STATUS_DECOMP_ERR 6
+#define UATH_STATUS_KEY_ERR 7
+#define UATH_STATUS_ERR 8
+ __be32 tstamp_low; /* low-order 32-bits of rx timestamp */
+ __be32 tstamp_high; /* high-order 32-bits of rx timestamp */
+ __be32 framelen; /* frame length */
+ __be32 rate; /* rx rate code */
+ __be32 antenna;
+ __be32 rssi;
+ __be32 channel;
+ __be32 phyerror;
+ __be32 connix; /* key table ix for bss traffic */
+ __be32 decrypterror;
+ __be32 keycachemiss;
+ __be32 pad; /* XXX? */
+} __packed;
+
+struct ar5523_tx_desc {
+ __be32 msglen;
+ u32 msgid; /* msg id (supplied by host) */
+ __be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
+ __be32 txqid; /* tx queue id and flags */
+#define UATH_TXQID_MASK 0x0f
+#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
+#define UATH_TXQID_FF 0x20 /* content is fast frame */
+ __be32 connid; /* tx connection id */
+#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
+ __be32 flags; /* non-zero if response desired */
+#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
+ __be32 buflen; /* payload length */
+} __packed;
+
+
+#define AR5523_ID_BSS 2
+#define AR5523_ID_BROADCAST 0xffffffff
+
+/* structure for command UATH_CMD_WRITE_MAC */
+struct ar5523_write_mac {
+ __be32 reg;
+ __be32 len;
+ u8 data[32];
+} __packed;
+
+struct ar5523_cmd_rateset {
+ __u8 length;
+#define AR5523_MAX_NRATES 32
+ __u8 set[AR5523_MAX_NRATES];
+};
+
+struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
+ __be32 defaultrateix;
+ __be32 associd;
+ __be32 timoffset;
+ __be32 turboprime;
+ __u8 bssid[6];
+} __packed;
+
+/* structure for command WDCMSG_RESET */
+struct ar5523_cmd_reset {
+ __be32 flags; /* channel flags */
+#define UATH_CHAN_TURBO 0x0100
+#define UATH_CHAN_CCK 0x0200
+#define UATH_CHAN_OFDM 0x0400
+#define UATH_CHAN_2GHZ 0x1000
+#define UATH_CHAN_5GHZ 0x2000
+ __be32 freq; /* channel frequency */
+ __be32 maxrdpower;
+ __be32 cfgctl;
+ __be32 twiceantennareduction;
+ __be32 channelchange;
+ __be32 keeprccontent;
+} __packed;
+
+/* structure for command WDCMSG_SET_BASIC_RATE */
+struct ar5523_cmd_rates {
+ __be32 connid;
+ __be32 keeprccontent;
+ __be32 size;
+ struct ar5523_cmd_rateset rateset;
+} __packed;
+
+enum {
+ WLAN_MODE_NONE = 0,
+ WLAN_MODE_11b,
+ WLAN_MODE_11a,
+ WLAN_MODE_11g,
+ WLAN_MODE_11a_TURBO,
+ WLAN_MODE_11g_TURBO,
+ WLAN_MODE_11a_TURBO_PRIME,
+ WLAN_MODE_11g_TURBO_PRIME,
+ WLAN_MODE_11a_XR,
+ WLAN_MODE_11g_XR,
+};
+
+struct ar5523_cmd_connection_attr {
+ __be32 longpreambleonly;
+ struct ar5523_cmd_rateset rateset;
+ __be32 wlanmode;
+} __packed;
+
+/* structure for command AR5523_CREATE_CONNECTION */
+struct ar5523_cmd_create_connection {
+ __be32 connid;
+ __be32 bssid;
+ __be32 size;
+ struct ar5523_cmd_connection_attr connattr;
+} __packed;
+
+struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
+ __be32 lednum;
+#define UATH_LED_LINK 0
+#define UATH_LED_ACTIVITY 1
+ __be32 ledmode;
+#define UATH_LED_OFF 0
+#define UATH_LED_ON 1
+} __packed;
+
+struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
+ __be32 lednum;
+ __be32 ledmode;
+ __be32 blinkrate;
+ __be32 slowmode;
+} __packed;
+
+struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
+ __be32 connected;
+} __packed;
+
+struct ar5523_cmd_txq_attr {
+ __be32 priority;
+ __be32 aifs;
+ __be32 logcwmin;
+ __be32 logcwmax;
+ __be32 bursttime;
+ __be32 mode;
+ __be32 qflags;
+} __packed;
+
+struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
+ __be32 qid;
+ __be32 len;
+ struct ar5523_cmd_txq_attr attr;
+} __packed;
+
+struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
+ __be32 bits;
+#define UATH_FILTER_RX_UCAST 0x00000001
+#define UATH_FILTER_RX_MCAST 0x00000002
+#define UATH_FILTER_RX_BCAST 0x00000004
+#define UATH_FILTER_RX_CONTROL 0x00000008
+#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
+#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
+#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
+#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
+#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
+#define UATH_FILTER_RX_PROBE_REQ 0x00000800
+ __be32 op;
+#define UATH_FILTER_OP_INIT 0x0
+#define UATH_FILTER_OP_SET 0x1
+#define UATH_FILTER_OP_CLEAR 0x2
+#define UATH_FILTER_OP_TEMP 0x3
+#define UATH_FILTER_OP_RESTORE 0x4
+} __packed;
+
+enum {
+ CFG_NONE, /* Sentinal to indicate "no config" */
+ CFG_REG_DOMAIN, /* Regulatory Domain */
+ CFG_RATE_CONTROL_ENABLE,
+ CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
+ CFG_HW_TX_RETRIES,
+ CFG_SW_TX_RETRIES,
+ CFG_SLOW_CLOCK_ENABLE,
+ CFG_COMP_PROC,
+ CFG_USER_RTS_THRESHOLD,
+ CFG_XR2NORM_RATE_THRESHOLD,
+ CFG_XRMODE_SWITCH_COUNT,
+ CFG_PROTECTION_TYPE,
+ CFG_BURST_SEQ_THRESHOLD,
+ CFG_ABOLT,
+ CFG_IQ_LOG_COUNT_MAX,
+ CFG_MODE_CTS,
+ CFG_WME_ENABLED,
+ CFG_GPRS_CBR_PERIOD,
+ CFG_SERVICE_TYPE,
+ /* MAC Address to use. Overrides EEPROM */
+ CFG_MAC_ADDR,
+ CFG_DEBUG_EAR,
+ CFG_INIT_REGS,
+ /* An ID for use in error & debug messages */
+ CFG_DEBUG_ID,
+ CFG_COMP_WIN_SZ,
+ CFG_DIVERSITY_CTL,
+ CFG_TP_SCALE,
+ CFG_TPC_HALF_DBM5,
+ CFG_TPC_HALF_DBM2,
+ CFG_OVERRD_TX_POWER,
+ CFG_USE_32KHZ_CLOCK,
+ CFG_GMODE_PROTECTION,
+ CFG_GMODE_PROTECT_RATE_INDEX,
+ CFG_GMODE_NON_ERP_PREAMBLE,
+ CFG_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ /* Sentinal to indicate "no capability" */
+ CAP_NONE,
+ CAP_ALL, /* ALL capabilities */
+ CAP_TARGET_VERSION,
+ CAP_TARGET_REVISION,
+ CAP_MAC_VERSION,
+ CAP_MAC_REVISION,
+ CAP_PHY_REVISION,
+ CAP_ANALOG_5GHz_REVISION,
+ CAP_ANALOG_2GHz_REVISION,
+ /* Target supports WDC message debug features */
+ CAP_DEBUG_WDCMSG_SUPPORT,
+
+ CAP_REG_DOMAIN,
+ CAP_COUNTRY_CODE,
+ CAP_REG_CAP_BITS,
+
+ CAP_WIRELESS_MODES,
+ CAP_CHAN_SPREAD_SUPPORT,
+ CAP_SLEEP_AFTER_BEACON_BROKEN,
+ CAP_COMPRESS_SUPPORT,
+ CAP_BURST_SUPPORT,
+ CAP_FAST_FRAMES_SUPPORT,
+ CAP_CHAP_TUNING_SUPPORT,
+ CAP_TURBOG_SUPPORT,
+ CAP_TURBO_PRIME_SUPPORT,
+ CAP_DEVICE_TYPE,
+ CAP_XR_SUPPORT,
+ CAP_WME_SUPPORT,
+ CAP_TOTAL_QUEUES,
+ CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
+
+ CAP_LOW_5GHZ_CHAN,
+ CAP_HIGH_5GHZ_CHAN,
+ CAP_LOW_2GHZ_CHAN,
+ CAP_HIGH_2GHZ_CHAN,
+
+ CAP_MIC_AES_CCM,
+ CAP_MIC_CKIP,
+ CAP_MIC_TKIP,
+ CAP_MIC_TKIP_WME,
+ CAP_CIPHER_AES_CCM,
+ CAP_CIPHER_CKIP,
+ CAP_CIPHER_TKIP,
+
+ CAP_TWICE_ANTENNAGAIN_5G,
+ CAP_TWICE_ANTENNAGAIN_2G,
+};
+
+enum {
+ ST_NONE, /* Sentinal to indicate "no status" */
+ ST_ALL,
+ ST_SERVICE_TYPE,
+ ST_WLAN_MODE,
+ ST_FREQ,
+ ST_BAND,
+ ST_LAST_RSSI,
+ ST_PS_FRAMES_DROPPED,
+ ST_CACHED_DEF_ANT,
+ ST_COUNT_OTHER_RX_ANT,
+ ST_USE_FAST_DIVERSITY,
+ ST_MAC_ADDR,
+ ST_RX_GENERATION_NUM,
+ ST_TX_QUEUE_DEPTH,
+ ST_SERIAL_NUMBER,
+ ST_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ TARGET_DEVICE_AWAKE,
+ TARGET_DEVICE_SLEEP,
+ TARGET_DEVICE_PWRDN,
+ TARGET_DEVICE_PWRSAVE,
+ TARGET_DEVICE_SUSPEND,
+ TARGET_DEVICE_RESUME,
+};
+
+/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
+#define IEEE80211_2ADDR_LEN 16
+
+#define AR5523_MIN_RXBUFSZ \
+ (((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
+ sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
new file mode 100644
index 000000000..7e9481099
--- /dev/null
+++ b/drivers/net/wireless/ath/ath.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH_H
+#define ATH_H
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+/*
+ * The key cache is used for h/w cipher state and also for
+ * tracking station state such as the current tx antenna.
+ * We also setup a mapping table between key cache slot indices
+ * and station state to short-circuit node lookups on rx.
+ * Different parts have different size key caches. We handle
+ * up to ATH_KEYMAX entries (could dynamically allocate state).
+ */
+#define ATH_KEYMAX 128 /* max key cache size we handle */
+
+static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+struct ath_ani {
+ bool caldone;
+ unsigned int longcal_timer;
+ unsigned int shortcal_timer;
+ unsigned int resetcal_timer;
+ unsigned int checkani_timer;
+ struct timer_list timer;
+};
+
+struct ath_cycle_counters {
+ u32 cycles;
+ u32 rx_busy;
+ u32 rx_frame;
+ u32 tx_frame;
+};
+
+enum ath_device_state {
+ ATH_HW_UNAVAILABLE,
+ ATH_HW_INITIALIZED,
+};
+
+enum ath_op_flags {
+ ATH_OP_INVALID,
+ ATH_OP_BEACONS,
+ ATH_OP_ANI_RUN,
+ ATH_OP_PRIM_STA_VIF,
+ ATH_OP_HW_RESET,
+ ATH_OP_SCANNING,
+ ATH_OP_MULTI_CHANNEL,
+ ATH_OP_WOW_ENABLED,
+};
+
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
+struct reg_dmn_pair_mapping {
+ u16 reg_domain;
+ u16 reg_5ghz_ctl;
+ u16 reg_2ghz_ctl;
+};
+
+struct ath_regulatory {
+ char alpha2[2];
+ enum nl80211_dfs_regions region;
+ u16 country_code;
+ u16 max_power_level;
+ u16 current_rd;
+ int16_t power_limit;
+ struct reg_dmn_pair_mapping *regpair;
+};
+
+enum ath_crypt_caps {
+ ATH_CRYPT_CAP_CIPHER_AESCCM = BIT(0),
+ ATH_CRYPT_CAP_MIC_COMBINED = BIT(1),
+};
+
+struct ath_keyval {
+ u8 kv_type;
+ u8 kv_pad;
+ u16 kv_len;
+ u8 kv_val[16]; /* TK */
+ u8 kv_mic[8]; /* Michael MIC key */
+ u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
+ * supports both MIC keys in the same key cache entry;
+ * in that case, kv_mic is the RX key) */
+};
+
+enum ath_cipher {
+ ATH_CIPHER_WEP = 0,
+ ATH_CIPHER_AES_OCB = 1,
+ ATH_CIPHER_AES_CCM = 2,
+ ATH_CIPHER_CKIP = 3,
+ ATH_CIPHER_TKIP = 4,
+ ATH_CIPHER_CLR = 5,
+ ATH_CIPHER_MIC = 127
+};
+
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @multi_read: Multiple register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @write_flush: flush buffered register writes and disable buffering
+ */
+struct ath_ops {
+ unsigned int (*read)(void *, u32 reg_offset);
+ void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*write_flush) (void *);
+ u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+ void (*enable_rmw_buffer)(void *);
+ void (*rmw_flush) (void *);
+
+};
+
+struct ath_common;
+struct ath_bus_ops;
+
+struct ath_ps_ops {
+ void (*wakeup)(struct ath_common *common);
+ void (*restore)(struct ath_common *common);
+};
+
+struct ath_common {
+ void *ah;
+ void *priv;
+ struct ieee80211_hw *hw;
+ int debug_mask;
+ enum ath_device_state state;
+ unsigned long op_flags;
+
+ struct ath_ani ani;
+
+ u16 cachelsz;
+ u16 curaid;
+ u8 macaddr[ETH_ALEN];
+ u8 curbssid[ETH_ALEN] __aligned(2);
+ u8 bssidmask[ETH_ALEN];
+
+ u32 rx_bufsize;
+
+ u32 keymax;
+ DECLARE_BITMAP(keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
+ enum ath_crypt_caps crypt_caps;
+
+ unsigned int clockrate;
+
+ spinlock_t cc_lock;
+ struct ath_cycle_counters cc_ani;
+ struct ath_cycle_counters cc_survey;
+
+ struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
+ const struct ath_ops *ops;
+ const struct ath_bus_ops *bus_ops;
+ const struct ath_ps_ops *ps_ops;
+
+ bool btcoex_enabled;
+ bool disable_ani;
+ bool bt_ant_diversity;
+
+ int last_rssi;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+};
+
+static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
+{
+ return common->ps_ops;
+}
+
+struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
+ u32 len,
+ gfp_t gfp_mask);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
+
+void ath_hw_setbssidmask(struct ath_common *common);
+void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
+int ath_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+bool ath_hw_keyreset(struct ath_common *common, u16 entry);
+void ath_hw_cycle_counters_update(struct ath_common *common);
+int32_t ath_hw_get_listen_time(struct ath_common *common);
+
+__printf(3, 4)
+void ath_printk(const char *level, const struct ath_common *common,
+ const char *fmt, ...);
+
+#define ath_emerg(common, fmt, ...) \
+ ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+#define ath_alert(common, fmt, ...) \
+ ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+#define ath_crit(common, fmt, ...) \
+ ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+#define ath_err(common, fmt, ...) \
+ ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+#define ath_warn(common, fmt, ...) \
+ ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+#define ath_notice(common, fmt, ...) \
+ ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+#define ath_info(common, fmt, ...) \
+ ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+
+/**
+ * enum ath_debug_level - atheros wireless debug level
+ *
+ * @ATH_DBG_RESET: reset processing
+ * @ATH_DBG_QUEUE: hardware queue management
+ * @ATH_DBG_EEPROM: eeprom processing
+ * @ATH_DBG_CALIBRATE: periodic calibration
+ * @ATH_DBG_INTERRUPT: interrupt processing
+ * @ATH_DBG_REGULATORY: regulatory processing
+ * @ATH_DBG_ANI: adaptive noise immunitive processing
+ * @ATH_DBG_XMIT: basic xmit operation
+ * @ATH_DBG_BEACON: beacon handling
+ * @ATH_DBG_CONFIG: configuration of the hardware
+ * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
+ * @ATH_DBG_PS: power save processing
+ * @ATH_DBG_HWTIMER: hardware timer handling
+ * @ATH_DBG_BTCOEX: bluetooth coexistance
+ * @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
+ * @ATH_DBG_WOW: Wake on Wireless
+ * @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_ANY: enable all debugging
+ *
+ * The debug level is used to control the amount and type of debugging output
+ * we want to see. Each driver has its own method for enabling debugging and
+ * modifying debug level states -- but this is typically done through a
+ * module parameter 'debug' along with a respective 'debug' debugfs file
+ * entry.
+ */
+enum ATH_DEBUG {
+ ATH_DBG_RESET = 0x00000001,
+ ATH_DBG_QUEUE = 0x00000002,
+ ATH_DBG_EEPROM = 0x00000004,
+ ATH_DBG_CALIBRATE = 0x00000008,
+ ATH_DBG_INTERRUPT = 0x00000010,
+ ATH_DBG_REGULATORY = 0x00000020,
+ ATH_DBG_ANI = 0x00000040,
+ ATH_DBG_XMIT = 0x00000080,
+ ATH_DBG_BEACON = 0x00000100,
+ ATH_DBG_CONFIG = 0x00000200,
+ ATH_DBG_FATAL = 0x00000400,
+ ATH_DBG_PS = 0x00000800,
+ ATH_DBG_BTCOEX = 0x00001000,
+ ATH_DBG_WMI = 0x00002000,
+ ATH_DBG_BSTUCK = 0x00004000,
+ ATH_DBG_MCI = 0x00008000,
+ ATH_DBG_DFS = 0x00010000,
+ ATH_DBG_WOW = 0x00020000,
+ ATH_DBG_CHAN_CTX = 0x00040000,
+ ATH_DBG_DYNACK = 0x00080000,
+ ATH_DBG_ANY = 0xffffffff
+};
+
+#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
+#define ATH_DBG_MAX_LEN 512
+
+#ifdef CONFIG_ATH_DEBUG
+
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+do { \
+ if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
+ ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
+#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
+
+#else
+
+static inline __attribute__ ((format (printf, 3, 4)))
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+ const char *fmt, ...)
+{
+}
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+ _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
+#define ATH_DBG_WARN(foo, arg...) do {} while (0)
+#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
+ int __ret_warn_once = !!(foo); \
+ unlikely(__ret_warn_once); \
+})
+
+#endif /* CONFIG_ATH_DEBUG */
+
+/** Returns string describing opmode, or NULL if unknown mode. */
+#ifdef CONFIG_ATH_DEBUG
+const char *ath_opmode_to_string(enum nl80211_iftype opmode);
+#else
+static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
+{
+ return "UNKNOWN";
+}
+#endif
+
+#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 000000000..72acb822b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,47 @@
+config ATH10K
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
+ select ATH_COMMON
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
+
+ If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_PCI
+ tristate "Atheros ath10k PCI support"
+ depends on ATH10K && PCI
+ ---help---
+ This module adds support for PCIE bus
+
+config ATH10K_DEBUG
+ bool "Atheros ath10k debugging"
+ depends on ATH10K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+ bool "Atheros ath10k debugfs support"
+ depends on ATH10K && DEBUG_FS
+ select RELAY
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_TRACING
+ bool "Atheros ath10k tracing support"
+ depends on ATH10K
+ depends on EVENT_TRACING
+ ---help---
+ Select this to ath10k use tracing infrastructure.
+
+config ATH10K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 000000000..f4dbb3e93
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,26 @@
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+ debug.o \
+ core.o \
+ htc.o \
+ htt.o \
+ htt_rx.o \
+ htt_tx.o \
+ txrx.o \
+ wmi.o \
+ wmi-tlv.o \
+ bmi.o \
+ hw.o
+
+ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o \
+ ce.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 000000000..3d29b0875
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+
+void ath10k_bmi_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
+
+ ar->bmi.done_sent = false;
+}
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+ cmd.id = __cpu_to_le32(BMI_DONE);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen = sizeof(resp.get_target_info);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to get target info from device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+ u32 address, void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+ u32 rxlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+ cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
+ cmd.read_mem.addr = __cpu_to_le32(address);
+ cmd.read_mem.len = __cpu_to_le32(rxlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+ &resp, &rxlen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(buffer, resp.read_mem.payload, rxlen);
+ address += rxlen;
+ buffer += rxlen;
+ length -= rxlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ /* copy before roundup to avoid reading beyond buffer*/
+ memcpy(cmd.write_mem.payload, buffer, txlen);
+ txlen = roundup(txlen, 4);
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
+ cmd.write_mem.addr = __cpu_to_le32(address);
+ cmd.write_mem.len = __cpu_to_le32(txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* fixup roundup() so `length` zeroes out for last chunk */
+ txlen = min(txlen, length);
+
+ address += txlen;
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+ u32 resplen = sizeof(resp.execute);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, param);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_EXECUTE);
+ cmd.execute.addr = __cpu_to_le32(address);
+ cmd.execute.param = __cpu_to_le32(param);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.execute)) {
+ ath10k_warn(ar, "invalid execute response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
+ return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd.id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd.lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd.lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
+ cmd.lz_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ u8 trailer[4] = {};
+ u32 head_len = rounddown(length, 4);
+ u32 trailer_len = length - head_len;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
+ ret = ath10k_bmi_lz_stream_start(ar, address);
+ if (ret)
+ return ret;
+
+ /* copy the last word into a zero padded buffer */
+ if (trailer_len > 0)
+ memcpy(trailer, buffer + head_len, trailer_len);
+
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ret)
+ return ret;
+
+ if (trailer_len > 0)
+ ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches.
+ */
+ ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 000000000..31a990635
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE 256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+ BMI_NO_COMMAND = 0,
+ BMI_DONE = 1,
+ BMI_READ_MEMORY = 2,
+ BMI_WRITE_MEMORY = 3,
+ BMI_EXECUTE = 4,
+ BMI_SET_APP_START = 5,
+ BMI_READ_SOC_REGISTER = 6,
+ BMI_READ_SOC_WORD = 6,
+ BMI_WRITE_SOC_REGISTER = 7,
+ BMI_WRITE_SOC_WORD = 7,
+ BMI_GET_TARGET_ID = 8,
+ BMI_GET_TARGET_INFO = 8,
+ BMI_ROMPATCH_INSTALL = 9,
+ BMI_ROMPATCH_UNINSTALL = 10,
+ BMI_ROMPATCH_ACTIVATE = 11,
+ BMI_ROMPATCH_DEACTIVATE = 12,
+ BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
+ BMI_LZ_DATA = 14,
+ BMI_NVRAM_PROCESS = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+struct bmi_cmd {
+ __le32 id; /* enum bmi_cmd_id */
+ union {
+ struct {
+ } done;
+ struct {
+ __le32 addr;
+ __le32 len;
+ } read_mem;
+ struct {
+ __le32 addr;
+ __le32 len;
+ u8 payload[0];
+ } write_mem;
+ struct {
+ __le32 addr;
+ __le32 param;
+ } execute;
+ struct {
+ __le32 addr;
+ } set_app_start;
+ struct {
+ __le32 addr;
+ } read_soc_reg;
+ struct {
+ __le32 addr;
+ __le32 value;
+ } write_soc_reg;
+ struct {
+ } get_target_info;
+ struct {
+ __le32 rom_addr;
+ __le32 ram_addr; /* or value */
+ __le32 size;
+ __le32 activate; /* 0=install, but dont activate */
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_activate;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_deactivate;
+ struct {
+ __le32 addr;
+ } lz_start;
+ struct {
+ __le32 len; /* max BMI_MAX_DATA_SIZE */
+ u8 payload[0]; /* length of @len */
+ } lz_data;
+ struct {
+ u8 name[BMI_NVRAM_SEG_NAME_SZ];
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+ };
+} __packed;
+
+union bmi_resp {
+ struct {
+ u8 payload[0];
+ } read_mem;
+ struct {
+ __le32 result;
+ } execute;
+ struct {
+ __le32 value;
+ } read_soc_reg;
+ struct {
+ __le32 len;
+ __le32 version;
+ __le32 type;
+ } get_target_info;
+ struct {
+ __le32 patch_id;
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ /* 0 = nothing executed
+ * otherwise = NVRAM segment return value */
+ __le32 result;
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+ u32 version;
+ u32 type;
+};
+
+/* in msec */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+void ath10k_bmi_start(struct ath10k *ar);
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+ void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 addr; \
+ __le32 tmp; \
+ \
+ addr = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+ if (!ret) \
+ *val = __le32_to_cpu(tmp); \
+ ret; \
+ })
+
+#define ath10k_bmi_write32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 address; \
+ __le32 v = __cpu_to_le32(val); \
+ \
+ address = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_write_memory(ar, address, \
+ (u8 *)&v, sizeof(v)); \
+ ret; \
+ })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 000000000..e508c65b6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1165 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "pci.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ * a source ring
+ * a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * recieves an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers. When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int addr)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32((ar),
+ (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
+ CE_CTRL1_DMAX_LENGTH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+ (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
+ CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u32 addr)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+ (addr & ~SRC_WATERMARK_HIGH_MASK) |
+ SRC_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+ (addr & ~SRC_WATERMARK_LOW_MASK) |
+ SRC_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+ (addr & ~DST_WATERMARK_HIGH_MASK) |
+ DST_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+ (addr & ~DST_WATERMARK_LOW_MASK) |
+ DST_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 host_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + HOST_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+ host_ie_addr & ~CE_WATERMARK_MASK);
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 misc_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + MISC_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+ misc_ie_addr | CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ u32 misc_ie_addr = ath10k_pci_read32(ar,
+ ce_ctrl_addr + MISC_IE_ADDRESS);
+
+ ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+ misc_ie_addr & ~CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int mask)
+{
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+}
+
+/*
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
+ * ath10k_ce_sendlist_send.
+ * The caller takes responsibility for any needed locking.
+ */
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc *desc, *sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ write_index);
+ sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ sdesc->addr = __cpu_to_le32(buffer);
+ sdesc->nbytes = __cpu_to_le16(nbytes);
+ sdesc->flags = __cpu_to_le16(desc_flags);
+
+ *desc = *sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ /* WORKAROUND */
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *src_ring = pipe->src_ring;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ /*
+ * This function must be called only if there is an incomplete
+ * scatter-gather transfer (before index register is updated)
+ * that needs to be cleaned up.
+ */
+ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+ return;
+
+ if (WARN_ON_ONCE(src_ring->write_index ==
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+ return;
+
+ src_ring->write_index--;
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int delta;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return delta;
+}
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+}
+
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -EIO;
+
+ desc->addr = __cpu_to_le32(paddr);
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+ struct ce_desc sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /*
+ * This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(sdesc.addr);
+ *nbytesp = nbytes;
+ *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
+
+ if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
+ *flagsp = CE_RECV_FLAG_SWAPPED;
+ else
+ *flagsp = 0;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+ per_transfer_contextp,
+ bufferp, nbytesp,
+ transfer_idp, flagsp);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ struct ce_desc *sdesc, *sbase;
+ unsigned int read_index;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ sbase = src_ring->shadow_base;
+ sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(sdesc->addr);
+ *nbytesp = __le16_to_cpu(sdesc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k_ce_ring *src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+
+ src_ring = ce_state->src_ring;
+
+ if (!src_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (write_index != sw_index) {
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = ath10k_ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp,
+ bufferp, nbytesp,
+ transfer_idp);
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ u32 ctrl_addr = ce_state->ctrl_addr;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ /* Clear the copy-complete interrupts that will be handled here. */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+ HOST_IS_COPY_COMPLETE_MASK);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ /*
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+ int ce_id;
+ u32 intr_summary;
+
+ intr_summary = CE_INTERRUPT_SUMMARY(ar);
+
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
+ if (intr_summary & (1 << ce_id))
+ intr_summary &= ~(1 << ce_id);
+ else
+ /* no intr pending on this CE */
+ continue;
+
+ ath10k_ce_per_engine_service(ar, ce_id);
+ }
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
+{
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
+
+ if ((!disable_copy_compl_intr) &&
+ (ce_state->send_cb || ce_state->recv_cb))
+ ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+ else
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+ }
+
+ return 0;
+}
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id;
+
+ /* Skip the last copy engine, CE7 the diagnostic window, as that
+ * uses polling and isn't initialized for interrupts.
+ */
+ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
+ ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
+}
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->src_nentries);
+
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->sw_index &= src_ring->nentries_mask;
+ src_ring->hw_index = src_ring->sw_index;
+
+ src_ring->write_index =
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space = PTR_ALIGN(
+ src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space = ALIGN(
+ src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ /*
+ * Also allocate a shadow src ring in regular
+ * mem to use for faster access.
+ */
+ src_ring->shadow_base_unaligned =
+ kmalloc((nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN), GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->shadow_base = PTR_ALIGN(
+ src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /*
+ * Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ memset(dest_ring->base_addr_owner_space_unaligned, 0,
+ nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
+
+ dest_ring->base_addr_owner_space = PTR_ALIGN(
+ dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space = ALIGN(
+ dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ int ret;
+
+ if (attr->src_nentries) {
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *))
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ int ret;
+
+ /*
+ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = recv_cb;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+
+ if (ce_state->src_ring) {
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 000000000..0eddb204d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 8
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN 8
+#define CE_SEND_FLAG_GATHER 0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ath10k_ce_pipe;
+
+#define CE_DESC_FLAGS_GATHER (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
+#define CE_DESC_FLAGS_META_DATA_LSB 2
+
+struct ce_desc {
+ __le32 addr;
+ __le16 nbytes;
+ __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+struct ath10k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /*
+ * For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+ /*
+ * For src ring, this is the next index not yet processed by HW.
+ * This is a cached copy of the real HW index (read index), used
+ * for avoiding reading the HW index register more often than
+ * necessary.
+ * This extends the invariant:
+ * write index >= read index >= hw_index >= sw_index
+ *
+ * For dest ring, this is currently unused.
+ */
+ /* cached copy */
+ unsigned int hw_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /*
+ * Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+ /*
+ * Start of shadow copy of descriptors, within regular memory.
+ * Aligned to descriptor-size boundary.
+ */
+ void *shadow_base_unaligned;
+ struct ce_desc *shadow_base;
+
+ /* keep last */
+ void *per_transfer_context[0];
+};
+
+struct ath10k_ce_pipe {
+ struct ath10k *ar;
+ unsigned int id;
+
+ unsigned int attr_flags;
+
+ u32 ctrl_addr;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
+
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ * ce - which copy engine to use
+ * buffer - address of buffer
+ * nbytes - number of bytes to send
+ * transfer_id - arbitrary ID; reflected to destination
+ * flags - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_send_context,
+ u32 buffer,
+ unsigned int nbytes,
+ /* 14 bits */
+ unsigned int transfer_id,
+ unsigned int flags);
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
+
+/*==================Recv=======================*/
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED 1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+/*==================CE Engine Initialization=======================*/
+
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *));
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp);
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp,
+ unsigned int *flagsp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupts(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP 1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+};
+
+#define SR_BA_ADDRESS 0x0000
+#define SR_SIZE_ADDRESS 0x0004
+#define DR_BA_ADDRESS 0x0008
+#define DR_SIZE_ADDRESS 0x000c
+#define CE_CMD_ADDRESS 0x0018
+
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
+ (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
+ CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
+ (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
+ (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
+ CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_DMAX_LENGTH_MSB 15
+#define CE_CTRL1_DMAX_LENGTH_LSB 0
+#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
+#define CE_CTRL1_DMAX_LENGTH_GET(x) \
+ (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
+#define CE_CTRL1_DMAX_LENGTH_SET(x) \
+ (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
+
+#define CE_CTRL1_ADDRESS 0x0010
+#define CE_CTRL1_HW_MASK 0x0007ffff
+#define CE_CTRL1_SW_MASK 0x0007ffff
+#define CE_CTRL1_HW_WRITE_MASK 0x00000000
+#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
+#define CE_CTRL1_RSTMASK 0xffffffff
+#define CE_CTRL1_RESET 0x00000080
+
+#define CE_CMD_HALT_STATUS_MSB 3
+#define CE_CMD_HALT_STATUS_LSB 3
+#define CE_CMD_HALT_STATUS_MASK 0x00000008
+#define CE_CMD_HALT_STATUS_GET(x) \
+ (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
+#define CE_CMD_HALT_STATUS_SET(x) \
+ (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
+#define CE_CMD_HALT_STATUS_RESET 0
+#define CE_CMD_HALT_MSB 0
+#define CE_CMD_HALT_MASK 0x00000001
+
+#define HOST_IE_COPY_COMPLETE_MSB 0
+#define HOST_IE_COPY_COMPLETE_LSB 0
+#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
+#define HOST_IE_COPY_COMPLETE_GET(x) \
+ (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
+#define HOST_IE_COPY_COMPLETE_SET(x) \
+ (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
+#define HOST_IE_COPY_COMPLETE_RESET 0
+#define HOST_IE_ADDRESS 0x002c
+
+#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
+#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
+#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
+#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
+#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
+#define HOST_IS_ADDRESS 0x0030
+
+#define MISC_IE_ADDRESS 0x0034
+
+#define MISC_IS_AXI_ERR_MASK 0x00000400
+
+#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
+#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
+#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
+#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
+#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
+
+#define MISC_IS_ADDRESS 0x0038
+
+#define SR_WR_INDEX_ADDRESS 0x003c
+
+#define DST_WR_INDEX_ADDRESS 0x0040
+
+#define CURRENT_SRRI_ADDRESS 0x0044
+
+#define CURRENT_DRRI_ADDRESS 0x0048
+
+#define SRC_WATERMARK_LOW_MSB 31
+#define SRC_WATERMARK_LOW_LSB 16
+#define SRC_WATERMARK_LOW_MASK 0xffff0000
+#define SRC_WATERMARK_LOW_GET(x) \
+ (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
+#define SRC_WATERMARK_LOW_SET(x) \
+ (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
+#define SRC_WATERMARK_LOW_RESET 0
+#define SRC_WATERMARK_HIGH_MSB 15
+#define SRC_WATERMARK_HIGH_LSB 0
+#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
+#define SRC_WATERMARK_HIGH_GET(x) \
+ (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
+#define SRC_WATERMARK_HIGH_SET(x) \
+ (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
+#define SRC_WATERMARK_HIGH_RESET 0
+#define SRC_WATERMARK_ADDRESS 0x004c
+
+#define DST_WATERMARK_LOW_LSB 16
+#define DST_WATERMARK_LOW_MASK 0xffff0000
+#define DST_WATERMARK_LOW_SET(x) \
+ (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
+#define DST_WATERMARK_LOW_RESET 0
+#define DST_WATERMARK_HIGH_MSB 15
+#define DST_WATERMARK_HIGH_LSB 0
+#define DST_WATERMARK_HIGH_MASK 0x0000ffff
+#define DST_WATERMARK_HIGH_GET(x) \
+ (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
+#define DST_WATERMARK_HIGH_SET(x) \
+ (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
+#define DST_WATERMARK_HIGH_RESET 0
+#define DST_WATERMARK_ADDRESS 0x0050
+
+static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
+{
+ return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
+ HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
+ HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
+ HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
+
+#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
+ MISC_IS_DST_ADDR_ERR_MASK | \
+ MISC_IS_SRC_LEN_ERR_MASK | \
+ MISC_IS_DST_MAX_LEN_VIO_MASK | \
+ MISC_IS_DST_RING_OVERFLOW_MASK | \
+ MISC_IS_SRC_RING_OVERFLOW_MASK)
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+ (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
+
+#define CE_INTERRUPT_SUMMARY(ar) \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
+ ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 000000000..75f536160
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,1446 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+
+unsigned int ath10k_debug_mask;
+static bool uart_print;
+static bool skip_otp;
+
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param(skip_otp, bool, 0644);
+
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .name = "qca988x hw2.0",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .fw = QCA988X_HW_2_0_FW_FILE,
+ .otp = QCA988X_HW_2_0_OTP_FILE,
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .name = "qca6174 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .fw = QCA6174_HW_2_1_FW_FILE,
+ .otp = QCA6174_HW_2_1_OTP_FILE,
+ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_3_0_VERSION,
+ .name = "qca6174 hw3.0",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .fw = QCA6174_HW_3_0_FW_FILE,
+ .otp = QCA6174_HW_3_0_OTP_FILE,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .name = "qca6174 hw3.2",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .fw = {
+ /* uses same binaries as hw3.0 */
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .fw = QCA6174_HW_3_0_FW_FILE,
+ .otp = QCA6174_HW_3_0_OTP_FILE,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ },
+};
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
+
+ complete(&ar->target_suspend);
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+ u32 param_host;
+ int ret;
+
+ /* tell target which HTC version it is used*/
+ ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION);
+ if (ret) {
+ ath10k_err(ar, "settings HTC version failed\n");
+ return ret;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
+ return ret;
+ }
+
+ /* TODO following parameters need to be re-visited. */
+ /* num_device */
+ param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ /* Firmware mode */
+ /* FIXME: Why FW_MODE_AP ??.*/
+ param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+ /* mac_addr_method */
+ param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ /* firmware_bridge */
+ param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+ /* fwsubmode */
+ param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
+ return ret;
+ }
+
+ /* We do all byte-swapping on the host */
+ ret = ath10k_bmi_write32(ar, hi_be, 0);
+ if (ret) {
+ ath10k_err(ar, "setting host CPU BE mode failed\n");
+ return ret;
+ }
+
+ /* FW descriptor/Data swap flags */
+ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+ if (ret) {
+ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = reject_firmware(&fw, filename, ar->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
+ u32 board_ext_data_addr;
+ int ret;
+
+ ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+ if (ret) {
+ ath10k_err(ar, "could not read board ext data addr (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
+ board_ext_data_addr);
+
+ if (board_ext_data_addr == 0)
+ return 0;
+
+ if (data_len != (board_data_size + board_ext_data_size)) {
+ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
+ data_len, board_data_size, board_ext_data_size);
+ return -EINVAL;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+ data + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+ (board_ext_data_size << 16) | 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data bit (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 address;
+ int ret;
+
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+ if (ret) {
+ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, address, data,
+ min_t(u32, board_data_size,
+ data_len));
+ if (ret) {
+ ath10k_err(ar, "could not write board data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (!ar->cal_file)
+ return -ENOENT;
+
+ if (IS_ERR(ar->cal_file))
+ return PTR_ERR(ar->cal_file);
+
+ ret = ath10k_download_board_data(ar, ar->cal_file->data,
+ ar->cal_file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ int data_len;
+ void *data;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ /* Device Tree is optional, don't print any warnings if
+ * there's no node for ath10k.
+ */
+ return -ENOENT;
+
+ if (!of_get_property(node, "qcom,ath10k-calibration-data",
+ &data_len)) {
+ /* The calibration data node is optional */
+ return -ENOENT;
+ }
+
+ if (data_len != QCA988X_CAL_DATA_LEN) {
+ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+ data_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
+ data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+out:
+ return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+ u32 result, address = ar->hw_params.patch_load_addr;
+ int ret;
+
+ ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
+ /* OTP is optional */
+
+ if (!ar->otp_data || !ar->otp_len) {
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ar->otp_data, ar->otp_len);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_execute(ar, address, 0, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (!skip_otp && result != 0) {
+ ath10k_err(ar, "otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+{
+ u32 address, data_len;
+ const char *mode_name;
+ const void *data;
+ int ret;
+
+ address = ar->hw_params.patch_load_addr;
+
+ switch (mode) {
+ case ATH10K_FIRMWARE_MODE_NORMAL:
+ data = ar->firmware_data;
+ data_len = ar->firmware_len;
+ mode_name = "normal";
+ break;
+ case ATH10K_FIRMWARE_MODE_UTF:
+ data = ar->testmode.utf->data;
+ data_len = ar->testmode.utf->size;
+ mode_name = "utf";
+ break;
+ default:
+ ath10k_err(ar, "unknown firmware mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot uploading firmware image %p len %d mode %s\n",
+ data, data_len, mode_name);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download %s firmware: %d\n",
+ mode_name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+ if (!IS_ERR(ar->board))
+ release_firmware(ar->board);
+
+ if (!IS_ERR(ar->otp))
+ release_firmware(ar->otp);
+
+ if (!IS_ERR(ar->firmware))
+ release_firmware(ar->firmware);
+
+ if (!IS_ERR(ar->cal_file))
+ release_firmware(ar->cal_file);
+
+ ar->board = NULL;
+ ar->board_data = NULL;
+ ar->board_len = 0;
+
+ ar->otp = NULL;
+ ar->otp_data = NULL;
+ ar->otp_len = 0;
+
+ ar->firmware = NULL;
+ ar->firmware_data = NULL;
+ ar->firmware_len = 0;
+
+ ar->cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+ char filename[100];
+
+ /*(DEBLOBBED)*/
+ scnprintf(filename, sizeof(filename), "/*(DEBLOBBED)*/",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (IS_ERR(ar->cal_file))
+ /* calibration file is optional, don't print any warnings */
+ return PTR_ERR(ar->cal_file);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+ ATH10K_FW_DIR, filename);
+
+ return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err(ar, "firmware file not defined\n");
+ return -EINVAL;
+ }
+
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err(ar, "board data file not defined");
+ return -EINVAL;
+ }
+
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
+ ath10k_err(ar, "could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
+ ar->firmware = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.fw);
+ if (IS_ERR(ar->firmware)) {
+ ret = PTR_ERR(ar->firmware);
+ ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
+ goto err;
+ }
+
+ ar->firmware_data = ar->firmware->data;
+ ar->firmware_len = ar->firmware->size;
+
+ /* OTP may be undefined. If so, don't fetch it at all */
+ if (ar->hw_params.fw.otp == NULL)
+ return 0;
+
+ ar->otp = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.otp);
+ if (IS_ERR(ar->otp)) {
+ ret = PTR_ERR(ar->otp);
+ ath10k_err(ar, "could not fetch otp (%d)\n", ret);
+ goto err;
+ }
+
+ ar->otp_data = ar->otp->data;
+ ar->otp_len = ar->otp->size;
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp, *version;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+ if (IS_ERR(ar->firmware)) {
+ ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
+ ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
+ return PTR_ERR(ar->firmware);
+ }
+
+ data = ar->firmware->data;
+ len = ar->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err(ar, "invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ break;
+
+ memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+ ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ ar->hw->wiphy->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Enabling feature bit: %i\n",
+ i);
+ __set_bit(i, ar->fw_features);
+ }
+ }
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
+ ar->fw_features,
+ sizeof(ar->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ar->firmware_data = data;
+ ar->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->otp_data = data;
+ ar->otp_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_WMI_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ ar->wmi.op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
+ ar->wmi.op_version);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!ar->firmware_data || !ar->firmware_len) {
+ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ /* now fetch the board file */
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err(ar, "board data file not defined");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ar->board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board)) {
+ ret = PTR_ERR(ar->board);
+ ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
+ ar->hw_params.fw.dir, ar->hw_params.fw.board,
+ ret);
+ goto err;
+ }
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret;
+
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+
+ ar->fw_api = 4;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 3;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 2;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 1;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_1(ar);
+ if (ret)
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_download_cal_data(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_file(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_FILE;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_DT;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find DT entry, try OTP next: %d\n",
+ ret);
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ar->cal_mode = ATH10K_CAL_MODE_OTP;
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+ return 0;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Explicitly setting UART prints to zero as target turns it on
+ * based on scratch registers.
+ */
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ if (!uart_print)
+ return 0;
+
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_info(ar, "UART prints enabled\n");
+ return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+ const struct ath10k_hw_params *uninitialized_var(hw_params);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+ hw_params = &ath10k_hw_params_list[i];
+
+ if (hw_params->id == ar->target_version)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
+ ar->target_version);
+ return -EINVAL;
+ }
+
+ ar->hw_params = *hw_params;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
+
+ return 0;
+}
+
+static void ath10k_core_restart(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ /* Place a barrier to make sure the compiler doesn't reorder
+ * CRASH_FLUSH and calling other functions.
+ */
+ barrier();
+
+ ieee80211_stop_queues(ar->hw);
+ ath10k_drain_tx(ar);
+ complete_all(&ar->scan.started);
+ complete_all(&ar->scan.completed);
+ complete_all(&ar->scan.on_channel);
+ complete_all(&ar->offchan_tx_completed);
+ complete_all(&ar->install_key_done);
+ complete_all(&ar->vdev_setup_done);
+ complete_all(&ar->thermal.wmi_sync);
+ wake_up(&ar->htt.empty_tx_wq);
+ wake_up(&ar->wmi.tx_credits_wq);
+ wake_up(&ar->peer_mapping_wq);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ath10k_hif_stop(ar);
+ ath10k_scan_finish(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+ /* this can happen if driver is being unloaded
+ * or if the crash happens during FW probing */
+ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
+ break;
+ case ATH10K_STATE_RESTARTING:
+ /* hw restart might be requested from multiple places */
+ break;
+ case ATH10K_STATE_RESTARTED:
+ ar->state = ATH10K_STATE_WEDGED;
+ /* fall through */
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "device is wedged, will not restart\n");
+ break;
+ case ATH10K_STATE_UTF:
+ ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_core_init_firmware_features(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+ return -EINVAL;
+ }
+
+ if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
+ ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+ return -EINVAL;
+ }
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_WMI_OP_VERSION.
+ */
+ if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
+ ar->fw_features))
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ else
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ } else {
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ }
+ }
+
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->max_num_peers = TARGET_NUM_PEERS;
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+{
+ int status;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ ath10k_bmi_start(ar);
+
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ status = ath10k_download_cal_data(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_download_fw(ar, mode);
+ if (status)
+ goto err;
+
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
+
+ ar->htc.htc_ops.target_send_suspend_complete =
+ ath10k_send_suspend_complete;
+
+ status = ath10k_htc_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not init HTC (%d)\n", status);
+ goto err;
+ }
+
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_wmi_attach(ar);
+ if (status) {
+ ath10k_err(ar, "WMI attach failed: %d\n", status);
+ goto err;
+ }
+
+ status = ath10k_htt_init(ar);
+ if (status) {
+ ath10k_err(ar, "failed to init htt: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_tx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+ goto err_htt_tx_detach;
+ }
+
+ status = ath10k_hif_start(ar);
+ if (status) {
+ ath10k_err(ar, "could not start HIF: %d\n", status);
+ goto err_htt_rx_detach;
+ }
+
+ status = ath10k_htc_wait_target(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_connect(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to connect htt (%d)\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_wmi_connect(ar);
+ if (status) {
+ ath10k_err(ar, "could not connect wmi: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_htc_start(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to start htc: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status <= 0) {
+ ath10k_warn(ar, "wmi service ready event not received");
+ status = -ETIMEDOUT;
+ goto err_hif_stop;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ar->hw->wiphy->fw_version);
+
+ status = ath10k_wmi_cmd_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not send WMI init command (%d)\n",
+ status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_wait_for_unified_ready(ar);
+ if (status <= 0) {
+ ath10k_err(ar, "wmi unified ready event not received\n");
+ status = -ETIMEDOUT;
+ goto err_hif_stop;
+ }
+
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
+ status = ath10k_htt_rx_ring_refill(ar);
+ if (status) {
+ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ /* we don't care about HTT in UTF mode */
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_setup(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to setup htt: %d\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_hif_stop;
+
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+
+ INIT_LIST_HEAD(&ar->arvifs);
+
+ return 0;
+
+err_hif_stop:
+ ath10k_hif_stop(ar);
+err_htt_rx_detach:
+ ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+ ath10k_htt_tx_free(&ar->htt);
+err_wmi_detach:
+ ath10k_wmi_detach(ar);
+err:
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_start);
+
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ int ret;
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+ if (ret) {
+ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+ if (ret == 0) {
+ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+void ath10k_core_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* try to suspend target */
+ if (ar->state != ATH10K_STATE_RESTARTING &&
+ ar->state != ATH10K_STATE_UTF)
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
+ ath10k_debug_stop(ar);
+ ath10k_hif_stop(ar);
+ ath10k_htt_tx_free(&ar->htt);
+ ath10k_htt_rx_free(&ar->htt);
+ ath10k_wmi_detach(ar);
+}
+EXPORT_SYMBOL(ath10k_core_stop);
+
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering */
+static int ath10k_core_probe_fw(struct ath10k *ar)
+{
+ struct bmi_target_info target_info;
+ int ret = 0;
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "could not start pci hif (%d)\n", ret);
+ return ret;
+ }
+
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+
+ ret = ath10k_init_hw_params(ar);
+ if (ret) {
+ ath10k_err(ar, "could not get hw params (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ath10k_core_fetch_firmware_files(ar);
+ if (ret) {
+ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ath10k_core_init_firmware_features(ar);
+ if (ret) {
+ ath10k_err(ar, "fatal problem with firmware features: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath10k_err(ar, "could not init core (%d)\n", ret);
+ goto err_unlock;
+ }
+
+ ath10k_print_driver_info(ar);
+ ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ath10k_hif_power_down(ar);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+err_free_firmware_files:
+ ath10k_core_free_firmware_files(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+ return ret;
+}
+
+static void ath10k_core_register_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, register_work);
+ int status;
+
+ status = ath10k_core_probe_fw(ar);
+ if (status) {
+ ath10k_err(ar, "could not probe fw (%d)\n", status);
+ goto err;
+ }
+
+ status = ath10k_mac_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
+ goto err_release_fw;
+ }
+
+ status = ath10k_debug_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to initialize debugfs\n");
+ goto err_unregister_mac;
+ }
+
+ status = ath10k_spectral_create(ar);
+ if (status) {
+ ath10k_err(ar, "failed to initialize spectral\n");
+ goto err_debug_destroy;
+ }
+
+ status = ath10k_thermal_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register thermal device: %d\n",
+ status);
+ goto err_spectral_destroy;
+ }
+
+ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+ return;
+
+err_spectral_destroy:
+ ath10k_spectral_destroy(ar);
+err_debug_destroy:
+ ath10k_debug_destroy(ar);
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_release_fw:
+ ath10k_core_free_firmware_files(ar);
+err:
+ /* TODO: It's probably a good idea to release device from the driver
+ * but calling device_release_driver() here will cause a deadlock.
+ */
+ return;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+{
+ ar->chip_id = chip_id;
+ queue_work(ar->workqueue, &ar->register_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ cancel_work_sync(&ar->register_work);
+
+ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ return;
+
+ ath10k_thermal_unregister(ar);
+ /* Stop spectral before unregistering from mac80211 to remove the
+ * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
+ * would be already be free'd recursively, leading to a double free.
+ */
+ ath10k_spectral_destroy(ar);
+
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures. */
+ ath10k_mac_unregister(ar);
+
+ ath10k_testmode_destroy(ar);
+
+ ath10k_core_free_firmware_files(ar);
+
+ ath10k_debug_unregister(ar);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+ int ret;
+
+ ar = ath10k_mac_create(priv_size);
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+ ar->dev = dev;
+ ar->hw_rev = hw_rev;
+ ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
+
+ switch (hw_rev) {
+ case ATH10K_HW_QCA988X:
+ ar->regs = &qca988x_regs;
+ break;
+ case ATH10K_HW_QCA6174:
+ ar->regs = &qca6174_regs;
+ break;
+ default:
+ ath10k_err(ar, "unsupported core hardware revision %d\n",
+ hw_rev);
+ ret = -ENOTSUPP;
+ goto err_free_mac;
+ }
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->thermal.wmi_sync);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_free_mac;
+
+ mutex_init(&ar->conf_mutex);
+ spin_lock_init(&ar->data_lock);
+
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+ init_waitqueue_head(&ar->htt.empty_tx_wq);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ INIT_WORK(&ar->register_work, ath10k_core_register_work);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_wq;
+
+ return ar;
+
+err_free_wq:
+ destroy_workqueue(ar->workqueue);
+
+err_free_mac:
+ ath10k_mac_destroy(ar);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ flush_workqueue(ar->workqueue);
+ destroy_workqueue(ar->workqueue);
+
+ ath10k_debug_destroy(ar);
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 000000000..f65310c3b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,706 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+
+#include "htt.h"
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+#include "../dfs_pattern_detector.h"
+#include "spectral.h"
+#include "thermal.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f) ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+#define ATH10K_NUM_CHANS 38
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
+
+/* number of failed packets */
+#define ATH10K_KICKOUT_THRESHOLD 50
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+struct ath10k;
+
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+};
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+ switch (bus) {
+ case ATH10K_BUS_PCI:
+ return "pci";
+ }
+
+ return "unknown";
+}
+
+struct ath10k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ u8 vdev_id;
+
+ struct {
+ u8 tid;
+ u16 freq;
+ bool is_offchan;
+ struct ath10k_htt_txbuf *txbuf;
+ u32 txbuf_paddr;
+ } __packed htt;
+
+ struct {
+ bool dtim_zero;
+ bool deliver_cab;
+ } bcn;
+} __packed;
+
+struct ath10k_skb_rxcb {
+ dma_addr_t paddr;
+ struct hlist_node hlist;
+};
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath10k_skb_rxcb *)skb->cb;
+}
+
+#define ATH10K_RXCB_SKB(rxcb) \
+ container_of((void *)rxcb, struct sk_buff, cb)
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+ return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+ bool done_sent;
+};
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct ath10k_wmi {
+ enum ath10k_fw_wmi_op_version op_version;
+ enum ath10k_htc_ep_id eid;
+ struct completion service_ready;
+ struct completion unified_ready;
+ wait_queue_head_t tx_credits_wq;
+ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
+ const struct wmi_ops *ops;
+
+ u32 num_mem_chunks;
+ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+};
+
+struct ath10k_fw_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 peer_rssi;
+ u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
+};
+
+struct ath10k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[4];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[4];
+ u32 num_tx_frames_failures[4];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[10];
+ u32 beacon_rssi_history[10];
+};
+
+struct ath10k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count;
+ u32 rx_frame_count;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+};
+
+struct ath10k_fw_stats {
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head peers;
+};
+
+struct ath10k_dfs_stats {
+ u32 phy_errors;
+ u32 pulses_total;
+ u32 pulses_detected;
+ u32 pulses_discarded;
+ u32 radar_detected;
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+ struct list_head list;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+ /* protected by ar->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+struct ath10k_sta {
+ struct ath10k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+
+enum ath10k_beacon_state {
+ ATH10K_BEACON_SCHEDULED = 0,
+ ATH10K_BEACON_SENDING,
+ ATH10K_BEACON_SENT,
+};
+
+struct ath10k_vif {
+ struct list_head list;
+
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ struct sk_buff *beacon;
+ /* protected by data_lock */
+ enum ath10k_beacon_state beacon_state;
+ void *beacon_buf;
+ dma_addr_t beacon_paddr;
+
+ struct ath10k *ar;
+ struct ieee80211_vif *vif;
+
+ bool is_started;
+ bool is_up;
+ bool spectral_enabled;
+ bool ps;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+
+ struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+ s8 def_wep_key_idx;
+
+ u16 tx_seq_no;
+
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ u8 fixed_rate;
+ u8 fixed_nss;
+ u8 force_sgi;
+ bool use_cts_prot;
+ int num_legacy_stations;
+ int txpower;
+ struct wmi_wmm_params_all_arg wmm_params;
+};
+
+struct ath10k_vif_iter {
+ u32 vdev_id;
+ struct ath10k_vif *arvif;
+};
+
+/* used for crash-dump storage, protected by data-lock */
+struct ath10k_fw_crash_data {
+ bool crashed_since_read;
+
+ uuid_le uuid;
+ struct timespec timestamp;
+ __le32 registers[REG_DUMP_COUNT_QCA988X];
+};
+
+struct ath10k_debug {
+ struct dentry *debugfs_phy;
+
+ struct ath10k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
+ struct ath10k_dfs_stats dfs_stats;
+ struct ath_dfs_pool_stats dfs_pool_stats;
+
+ /* protected by conf_mutex */
+ u32 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+ u32 pktlog_filter;
+ u32 reg_addr;
+ u32 nf_cal_period;
+
+ u8 htt_max_amsdu;
+ u8 htt_max_ampdu;
+
+ struct ath10k_fw_crash_data *fw_crash_data;
+};
+
+enum ath10k_state {
+ ATH10K_STATE_OFF = 0,
+ ATH10K_STATE_ON,
+
+ /* When doing firmware recovery the device is first powered down.
+ * mac80211 is supposed to call in to start() hook later on. It is
+ * however possible that driver unloading and firmware crash overlap.
+ * mac80211 can wait on conf_mutex in stop() while the device is
+ * stopped in ath10k_core_restart() work holding conf_mutex. The state
+ * RESTARTED means that the device is up and mac80211 has started hw
+ * reconfiguration. Once mac80211 is done with the reconfiguration we
+ * set the state to STATE_ON in reconfig_complete(). */
+ ATH10K_STATE_RESTARTING,
+ ATH10K_STATE_RESTARTED,
+
+ /* The device has crashed while restarting hw. This state is like ON
+ * but commands are blocked in HTC and -ECOMM response is given. This
+ * prevents completion timeouts and makes the driver more responsive to
+ * userspace commands. This is also prevents recursive recovery. */
+ ATH10K_STATE_WEDGED,
+
+ /* factory tests */
+ ATH10K_STATE_UTF,
+};
+
+enum ath10k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH10K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH10K_FIRMWARE_MODE_UTF,
+};
+
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* Firmware from 10X branch. Deprecated, don't use in new code. */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* Firmware does not support P2P */
+ ATH10K_FW_FEATURE_NO_P2P = 3,
+
+ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
+ * bit is required to be set as well. Deprecated, don't use in new
+ * code.
+ */
+ ATH10K_FW_FEATURE_WMI_10_2 = 4,
+
+ /* Some firmware revisions lack proper multi-interface client powersave
+ * implementation. Enabling PS could result in connection drops,
+ * traffic stalls, etc.
+ */
+ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
+enum ath10k_dev_flags {
+ /* Indicates that ath10k device is during CAC phase of DFS */
+ ATH10K_CAC_RUNNING,
+ ATH10K_FLAG_CORE_REGISTERED,
+
+ /* Device has crashed and needs to restart. This indicates any pending
+ * waiters should immediately cancel instead of waiting for a time out.
+ */
+ ATH10K_FLAG_CRASH_FLUSH,
+};
+
+enum ath10k_cal_mode {
+ ATH10K_CAL_MODE_FILE,
+ ATH10K_CAL_MODE_OTP,
+ ATH10K_CAL_MODE_DT,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+ switch (mode) {
+ case ATH10K_CAL_MODE_FILE:
+ return "file";
+ case ATH10K_CAL_MODE_OTP:
+ return "otp";
+ case ATH10K_CAL_MODE_DT:
+ return "dt";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_scan_state {
+ ATH10K_SCAN_IDLE,
+ ATH10K_SCAN_STARTING,
+ ATH10K_SCAN_RUNNING,
+ ATH10K_SCAN_ABORTING,
+};
+
+static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
+{
+ switch (state) {
+ case ATH10K_SCAN_IDLE:
+ return "idle";
+ case ATH10K_SCAN_STARTING:
+ return "starting";
+ case ATH10K_SCAN_RUNNING:
+ return "running";
+ case ATH10K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+struct ath10k {
+ struct ath_common ath_common;
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ u8 mac_addr[ETH_ALEN];
+
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id;
+ u32 target_version;
+ u8 fw_version_major;
+ u32 fw_version_minor;
+ u16 fw_version_release;
+ u16 fw_version_build;
+ u32 phy_capability;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 num_rf_chains;
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ bool p2p;
+
+ struct {
+ enum ath10k_bus bus;
+ const struct ath10k_hif_ops *ops;
+ } hif;
+
+ struct completion target_suspend;
+
+ const struct ath10k_hw_regs *regs;
+ struct ath10k_bmi bmi;
+ struct ath10k_wmi wmi;
+ struct ath10k_htc htc;
+ struct ath10k_htt htt;
+
+ struct ath10k_hw_params {
+ u32 id;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *fw;
+ const char *otp;
+ const char *board;
+ size_t board_size;
+ size_t board_ext_size;
+ } fw;
+ } hw_params;
+
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
+ const struct firmware *otp;
+ const void *otp_data;
+ size_t otp_len;
+
+ const struct firmware *firmware;
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const struct firmware *cal_file;
+
+ int fw_api;
+ enum ath10k_cal_mode cal_mode;
+
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath10k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ } mac;
+
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ /* current operating channel definition */
+ struct cfg80211_chan_def chandef;
+
+ unsigned long long free_vdev_map;
+ bool monitor;
+ int monitor_vdev_id;
+ bool monitor_started;
+ unsigned int filter_flags;
+ unsigned long dev_flags;
+ u32 dfs_block_radar_events;
+
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
+ /* Protected by conf-mutex */
+ u8 supp_tx_chainmask;
+ u8 supp_rx_chainmask;
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+
+ struct completion install_key_done;
+
+ struct completion vdev_setup_done;
+
+ struct workqueue_struct *workqueue;
+
+ /* prevents concurrent FW reconfiguration */
+ struct mutex conf_mutex;
+
+ /* protects shared structure data */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+
+ /* protected by conf_mutex */
+ int num_peers;
+ int num_stations;
+
+ int max_num_peers;
+ int max_num_stations;
+ int max_num_vdevs;
+
+ struct work_struct offchan_tx_work;
+ struct sk_buff_head offchan_tx_queue;
+ struct completion offchan_tx_completed;
+ struct sk_buff *offchan_tx_skb;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ enum ath10k_state state;
+
+ struct work_struct register_work;
+ struct work_struct restart_work;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+ struct survey_info survey[ATH10K_NUM_CHANS];
+
+ struct dfs_pattern_detector *dfs_detector;
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+ struct ath10k_debug debug;
+#endif
+
+ struct {
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+
+ /* spectral_mode and spec_config are protected by conf_mutex */
+ enum ath10k_spectral_mode mode;
+ struct ath10k_spec_scan config;
+ } spectral;
+
+ struct {
+ /* protected by conf_mutex */
+ const struct firmware *utf;
+ DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
+ enum ath10k_fw_wmi_op_version orig_wmi_op_version;
+
+ /* protected by data_lock */
+ bool utf_monitor;
+ } testmode;
+
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ u32 fw_warm_reset_counter;
+ u32 fw_cold_reset_counter;
+ } stats;
+
+ struct ath10k_thermal thermal;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
+void ath10k_core_stop(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
+void ath10k_core_unregister(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 000000000..301081db1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,2155 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/utsname.h>
+
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include "wmi-ops.h"
+
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ u8 uuid[16];
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[0];
+} __packed;
+
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_info(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_info);
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->chip_id,
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
+ ar->htt.target_version_major,
+ ar->htt.target_version_minor,
+ ar->wmi.op_version,
+ ath10k_cal_mode_str(ar->cal_mode),
+ ar->max_num_stations);
+ ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+ config_enabled(CONFIG_ATH10K_DEBUG),
+ config_enabled(CONFIG_ATH10K_DEBUGFS),
+ config_enabled(CONFIG_ATH10K_TRACING),
+ config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
+ config_enabled(CONFIG_NL80211_TESTMODE));
+}
+EXPORT_SYMBOL(ath10k_print_driver_info);
+
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_err(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_err);
+
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_warn(ar, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 4096;
+ const char *name;
+ ssize_t ret_cnt;
+ bool enabled;
+ int i;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < WMI_SERVICE_MAX; i++) {
+ enabled = test_bit(i, ar->wmi.svc_map);
+ name = wmi_service_name(i);
+
+ if (!name) {
+ if (enabled)
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s (bit %d)\n",
+ "unknown", "enabled", i);
+
+ continue;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s\n",
+ name, enabled ? "enabled" : "-");
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+ .read = ath10k_read_wmi_services,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_fw_stats stats = {};
+ bool is_start, is_started, is_end;
+ size_t num_peers;
+ size_t num_vdevs;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.peers);
+
+ spin_lock_bh(&ar->data_lock);
+ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+ if (ret) {
+ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+ goto unlock;
+ }
+
+ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
+ * splits the stats data and delivers it in a ping-pong fashion of
+ * request cmd-update event.
+ *
+ * However there is no explicit end-of-data. Instead start-of-data is
+ * used as an implicit one. This works as follows:
+ * a) discard stat update events until one with pdev stats is
+ * delivered - this skips session started at end of (b)
+ * b) consume stat update events until another one with pdev stats is
+ * delivered which is treated as end-of-data and is itself discarded
+ */
+
+ if (ar->debug.fw_stats_done) {
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+ goto free;
+ }
+
+ num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+
+ if (is_start)
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+ if (is_end)
+ ar->debug.fw_stats_done = true;
+
+ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+ if (is_started && !is_end) {
+ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+ /* Although this is unlikely impose a sane limit to
+ * prevent firmware from DoS-ing the host.
+ */
+ ath10k_warn(ar, "dropping fw peer stats\n");
+ goto free;
+ }
+
+ if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_warn(ar, "dropping fw vdev stats\n");
+ goto free;
+ }
+
+ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+ }
+
+ complete(&ar->debug.fw_stats_complete);
+
+free:
+ /* In some cases lists have been spliced and cleared. Free up
+ * resources if that is not the case.
+ */
+ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
+ ath10k_debug_fw_stats_peers_free(&stats.peers);
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ unsigned long timeout;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ timeout = jiffies + msecs_to_jiffies(1*HZ);
+
+ ath10k_debug_fw_stats_reset(ar);
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath10k_wmi_request_stats(ar,
+ WMI_STAT_PDEV |
+ WMI_STAT_VDEV |
+ WMI_STAT_PEER);
+ if (ret) {
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1*HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
+static void ath10k_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+ int i;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", pdev->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", pdev->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", pdev->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", pdev->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", pdev->mib_int_count);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Sched self tiggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Pdev continous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RSSI", peer->peer_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer TX rate", peer->peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate", peer->peer_rx_rate);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_fw_stats = {
+ .open = ath10k_fw_stats_open,
+ .release = ath10k_fw_stats_release,
+ .read = ath10k_fw_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret, len, buf_len;
+ char *buf;
+
+ buf_len = 500;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_warm_reset_counter\t\t%d\n",
+ ar->stats.fw_warm_reset_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_cold_reset_counter\t\t%d\n",
+ ar->stats.fw_cold_reset_counter);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
+ .open = simple_open,
+ .read = ath10k_debug_fw_reset_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* This is a clean assert crash in firmware. */
+static int ath10k_debug_fw_assert(struct ath10k *ar)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ /* big enough number so that firmware asserts */
+ cmd->vdev_id = __cpu_to_le32(0x7ffe);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
+ "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
+ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n') {
+ buf[count - 1] = 0;
+ count--;
+ }
+
+ if (!strcmp(buf, "soft")) {
+ ath10k_info(ar, "simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info(ar, "simulating hard firmware crash\n");
+ /* 0x7fff is vdev id, and it is always out of range for all
+ * firmware variants in order to force a firmware crash.
+ */
+ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
+ ar->wmi.vdev_param->rts_threshold,
+ 0);
+ } else if (!strcmp(buf, "assert")) {
+ ath10k_info(ar, "simulating firmware assert crash\n");
+ ret = ath10k_debug_fw_assert(ar);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath10k_info(ar, "user requested hw restart\n");
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath10k_read_simulate_fw_crash,
+ .write = ath10k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ crash_data->crashed_since_read = true;
+ uuid_le_gen(&crash_data->uuid);
+ getnstimeofday(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
+
+static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ int hdr_len = sizeof(*dump_data);
+ unsigned int len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!crash_data->crashed_since_read) {
+ spin_unlock_bh(&ar->data_lock);
+ vfree(buf);
+ return NULL;
+ }
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
+ dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ /* Gather crash-dump */
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ ar->debug.fw_crash_data->crashed_since_read = false;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return dump_data;
+}
+
+static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ struct ath10k_dump_file_data *dump;
+
+ dump = ath10k_build_dump_file(ar);
+ if (!dump)
+ return -ENODATA;
+
+ file->private_data = dump;
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_crash_dump_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k_dump_file_data *dump_file = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ dump_file,
+ le32_to_cpu(dump_file->len));
+}
+
+static int ath10k_fw_crash_dump_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations fops_fw_crash_dump = {
+ .open = ath10k_fw_crash_dump_open,
+ .read = ath10k_fw_crash_dump_read,
+ .release = ath10k_fw_crash_dump_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_addr_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+ u32 reg_addr;
+
+ mutex_lock(&ar->conf_mutex);
+ reg_addr = ar->debug.reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(reg_addr, 4))
+ return -EFAULT;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.reg_addr = reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+ .read = ath10k_reg_addr_read,
+ .write = ath10k_reg_addr_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[48];
+ unsigned int len;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ reg_val = ath10k_hif_read32(ar, reg_addr);
+ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+ if (ret)
+ goto exit;
+
+ ath10k_hif_write32(ar, reg_addr, reg_val);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+ .read = ath10k_reg_value_read,
+ .write = ath10k_reg_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ ret = copy_to_user(user_buf, buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ count -= ret;
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+ .read = ath10k_mem_value_read,
+ .write = ath10k_mem_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[64];
+ u8 amsdu = 3, ampdu = 64;
+ unsigned int len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->debug.htt_max_amsdu)
+ amsdu = ar->debug.htt_max_amsdu;
+
+ if (ar->debug.htt_max_ampdu)
+ ampdu = ar->debug.htt_max_ampdu;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int res;
+ char buf[64];
+ unsigned int amsdu, ampdu;
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
+
+ res = sscanf(buf, "%u %u", &amsdu, &ampdu);
+
+ if (res != 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
+ if (res)
+ goto out;
+
+ res = count;
+ ar->debug.htt_max_amsdu = amsdu;
+ ar->debug.htt_max_ampdu = ampdu;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return res;
+}
+
+static const struct file_operations fops_htt_max_amsdu_ampdu = {
+ .read = ath10k_read_htt_max_amsdu_ampdu,
+ .write = ath10k_write_htt_max_amsdu_ampdu,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[64];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ char buf[64];
+ unsigned int log_level, mask;
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
+
+ ret = sscanf(buf, "%x %u", &mask, &log_level);
+
+ if (!ret)
+ return -EINVAL;
+
+ if (ret == 1)
+ /* default if user did not specify */
+ log_level = ATH10K_DBGLOG_LEVEL_WARN;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.fw_dbglog_mask = mask;
+ ar->debug.fw_dbglog_level = log_level;
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ar->debug.fw_dbglog_level);
+ if (ret) {
+ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+/* TODO: Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally cooresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_noise_floor",
+ "d_cycle_count",
+ "d_phy_error",
+ "d_rts_bad",
+ "d_rts_good",
+ "d_tx_power", /* in .5 dbM I think */
+ "d_rx_crc_err", /* fcs_bad */
+ "d_no_beacon",
+ "d_tx_mpdus_queued",
+ "d_tx_msdu_queued",
+ "d_tx_msdu_dropped",
+ "d_local_enqued",
+ "d_local_freed",
+ "d_tx_ppdu_hw_queued",
+ "d_tx_ppdu_reaped",
+ "d_tx_fifo_underrun",
+ "d_tx_ppdu_abort",
+ "d_tx_mpdu_requed",
+ "d_tx_excessive_retries",
+ "d_tx_hw_rate",
+ "d_tx_dropped_sw_retries",
+ "d_tx_illegal_rate",
+ "d_tx_continuous_xretries",
+ "d_tx_timeout",
+ "d_tx_mpdu_txop_limit",
+ "d_pdev_resets",
+ "d_rx_mid_ppdu_route_change",
+ "d_rx_status",
+ "d_rx_extra_frags_ring0",
+ "d_rx_extra_frags_ring1",
+ "d_rx_extra_frags_ring2",
+ "d_rx_extra_frags_ring3",
+ "d_rx_msdu_htt",
+ "d_rx_mpdu_htt",
+ "d_rx_msdu_stack",
+ "d_rx_mpdu_stack",
+ "d_rx_phy_err",
+ "d_rx_phy_err_drops",
+ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
+ "d_fw_crash_count",
+ "d_fw_warm_reset_count",
+ "d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH10K_SSTATS_LEN;
+
+ return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath10k *ar = hw->priv;
+ static const struct ath10k_fw_stats_pdev zero_stats = {};
+ const struct ath10k_fw_stats_pdev *pdev_stats;
+ int i = 0, ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ /* just print a warning and try to use older results */
+ ath10k_warn(ar,
+ "failed to get fw stats for ethtool: %d\n",
+ ret);
+ }
+ }
+
+ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+ struct ath10k_fw_stats_pdev,
+ list);
+ if (!pdev_stats) {
+ /* no results available so just return zeroes */
+ pdev_stats = &zero_stats;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+ data[i++] = 0; /* tx bytes */
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = 0; /* rx bytes */
+ data[i++] = pdev_stats->ch_noise_floor;
+ data[i++] = pdev_stats->cycle_count;
+ data[i++] = pdev_stats->phy_err_count;
+ data[i++] = pdev_stats->rts_bad;
+ data[i++] = pdev_stats->rts_good;
+ data[i++] = pdev_stats->chan_tx_power;
+ data[i++] = pdev_stats->fcs_bad;
+ data[i++] = pdev_stats->no_beacons;
+ data[i++] = pdev_stats->mpdu_enqued;
+ data[i++] = pdev_stats->msdu_enqued;
+ data[i++] = pdev_stats->wmm_drop;
+ data[i++] = pdev_stats->local_enqued;
+ data[i++] = pdev_stats->local_freed;
+ data[i++] = pdev_stats->hw_queued;
+ data[i++] = pdev_stats->hw_reaped;
+ data[i++] = pdev_stats->underrun;
+ data[i++] = pdev_stats->tx_abort;
+ data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->tx_ko;
+ data[i++] = pdev_stats->data_rc;
+ data[i++] = pdev_stats->sw_retry_failure;
+ data[i++] = pdev_stats->illgl_rate_phy_err;
+ data[i++] = pdev_stats->pdev_cont_xretry;
+ data[i++] = pdev_stats->pdev_tx_timeout;
+ data[i++] = pdev_stats->txop_ovf;
+ data[i++] = pdev_stats->pdev_resets;
+ data[i++] = pdev_stats->mid_ppdu_route_change;
+ data[i++] = pdev_stats->status_rcvd;
+ data[i++] = pdev_stats->r0_frags;
+ data[i++] = pdev_stats->r1_frags;
+ data[i++] = pdev_stats->r2_frags;
+ data[i++] = pdev_stats->r3_frags;
+ data[i++] = pdev_stats->htt_msdus;
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = pdev_stats->loc_msdus;
+ data[i++] = pdev_stats->loc_mpdus;
+ data[i++] = pdev_stats->phy_errs;
+ data[i++] = pdev_stats->phy_err_drop;
+ data[i++] = pdev_stats->mpdu_errs;
+ data[i++] = ar->stats.fw_crash_counter;
+ data[i++] = ar->stats.fw_warm_reset_counter;
+ data[i++] = ar->stats.fw_cold_reset_counter;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .read = ath10k_read_fw_dbglog,
+ .write = ath10k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf;
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto err;
+ }
+
+ buf = vmalloc(QCA988X_CAL_DATA_LEN);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
+ goto err_vfree;
+ }
+
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ QCA988X_CAL_DATA_LEN);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+ goto err_vfree;
+ }
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_vfree:
+ vfree(buf);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ void *buf = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, QCA988X_CAL_DATA_LEN);
+}
+
+static int ath10k_debug_cal_data_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+ .release = ath10k_debug_cal_data_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_nf_cal_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n",
+ ar->debug.nf_cal_period);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_nf_cal_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long period;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &period);
+ if (ret)
+ return ret;
+
+ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
+ return -EINVAL;
+
+ /* there's no way to switch back to the firmware default */
+ if (period == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.nf_cal_period = period;
+
+ if (ar->state != ATH10K_STATE_ON) {
+ /* firmware is not running, nothing else to do */
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret) {
+ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_nf_cal_period = {
+ .read = ath10k_read_nf_cal_period,
+ .write = ath10k_write_nf_cal_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
+ ret);
+
+ if (ar->debug.fw_dbglog_mask) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ATH10K_DBGLOG_LEVEL_WARN);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to enable dbglog during start: %d",
+ ret);
+ }
+
+ if (ar->debug.pktlog_filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->debug.pktlog_filter);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ }
+
+ if (ar->debug.nf_cal_period) {
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer(). */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+
+ ar->debug.htt_max_amsdu = 0;
+ ar->debug.htt_max_ampdu = 0;
+
+ ath10k_wmi_pdev_pktlog_disable(ar);
+}
+
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+
+ ieee80211_radar_detected(ar->hw);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath10k_write_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval = 0, len = 0;
+ const int size = 8000;
+ struct ath10k *ar = file->private_data;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!ar->dfs_detector) {
+ len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+ goto exit;
+ }
+
+ ar->debug.dfs_pool_stats =
+ ar->dfs_detector->get_stats(ar->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ ATH10K_DFS_STAT("reported phy errors", phy_errors);
+ ATH10K_DFS_STAT("pulse events reported", pulses_total);
+ ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+ ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+ ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+ ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+ ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+ ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+ ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+ ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+ ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = ath10k_read_dfs_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter && (filter != ar->debug.pktlog_filter)) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ ar->debug.pktlog_filter = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.pktlog_filter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath10k_read_pktlog_filter,
+ .write = ath10k_write_pktlog_filter,
+ .open = simple_open
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+ ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
+ if (!ar->debug.fw_crash_data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+
+ return 0;
+}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ vfree(ar->debug.fw_crash_data);
+ ar->debug.fw_crash_data = NULL;
+
+ ath10k_debug_fw_stats_reset(ar);
+}
+
+int ath10k_debug_register(struct ath10k *ar)
+{
+ ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+ ar->hw->wiphy->debugfsdir);
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
+ if (IS_ERR(ar->debug.debugfs_phy))
+ return PTR_ERR(ar->debug.debugfs_phy);
+
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
+ init_completion(&ar->debug.fw_stats_complete);
+
+ debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+ &fops_fw_stats);
+
+ debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_reset_stats);
+
+ debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+ &fops_wmi_services);
+
+ debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_simulate_fw_crash);
+
+ debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_crash_dump);
+
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_addr);
+
+ debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_reg_value);
+
+ debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_mem_value);
+
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_htt_stats_mask);
+
+ debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_htt_max_amsdu_ampdu);
+
+ debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_fw_dbglog);
+
+ debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_simulate_radar);
+
+ debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+ ar->debug.debugfs_phy,
+ &ar->dfs_block_radar_events);
+
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_dfs_stats);
+ }
+
+ debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+
+ return 0;
+}
+
+void ath10k_debug_unregister(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath10k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
+
+ trace_ath10k_log_dbg(ar, mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ unsigned int linebuflen;
+ const void *ptr;
+
+ if (ath10k_debug_mask & mask) {
+ if (msg)
+ ath10k_dbg(ar, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+ }
+ }
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 000000000..a12b8323f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+ ATH10K_DBG_PCI = 0x00000001,
+ ATH10K_DBG_WMI = 0x00000002,
+ ATH10K_DBG_HTC = 0x00000004,
+ ATH10K_DBG_HTT = 0x00000008,
+ ATH10K_DBG_MAC = 0x00000010,
+ ATH10K_DBG_BOOT = 0x00000020,
+ ATH10K_DBG_PCI_DUMP = 0x00000040,
+ ATH10K_DBG_HTT_DUMP = 0x00000080,
+ ATH10K_DBG_MGMT = 0x00000100,
+ ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
+ ATH10K_DBG_REGULATORY = 0x00000800,
+ ATH10K_DBG_TESTMODE = 0x00001000,
+ ATH10K_DBG_WMI_PRINT = 0x00002000,
+ ATH10K_DBG_ANY = 0xffffffff,
+};
+
+enum ath10k_pktlog_filter {
+ ATH10K_PKTLOG_RX = 0x000000001,
+ ATH10K_PKTLOG_TX = 0x000000002,
+ ATH10K_PKTLOG_RCFIND = 0x000000004,
+ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
+ ATH10K_PKTLOG_ANY = 0x00000001f,
+};
+
+enum ath10k_dbg_aggr_mode {
+ ATH10K_DBG_AGGR_MODE_AUTO,
+ ATH10K_DBG_AGGR_MODE_MANUAL,
+ ATH10K_DBG_AGGR_MODE_MAX,
+};
+
+extern unsigned int ath10k_debug_mask;
+
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+void ath10k_print_driver_info(struct ath10k *ar);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
+int ath10k_debug_register(struct ath10k *ar);
+void ath10k_debug_unregister(struct ath10k *ar);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
+
+void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
+ int len)
+{
+}
+
+static inline struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+#ifdef CONFIG_MAC80211_DEBUGFS
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *fmt, ...);
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 000000000..95b5c4937
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi-ops.h"
+#include "debug.h"
+
+static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
+ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (aggr_mode == arsta->aggr_mode)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath10k_dbg_sta_read_aggr_mode,
+ .write = ath10k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath10k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath10k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64];
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = '\0';
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath10k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
+ &fops_aggr_mode);
+ debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 000000000..0c92e0251
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+#include "debug.h"
+
+struct ath10k_hif_sg_item {
+ u16 transfer_id;
+ void *transfer_context; /* NULL = tx completion callback not called */
+ void *vaddr; /* for debugging mostly */
+ u32 paddr;
+ u16 len;
+};
+
+struct ath10k_hif_cb {
+ int (*tx_completion)(struct ath10k *ar,
+ struct sk_buff *wbuf);
+ int (*rx_completion)(struct ath10k *ar,
+ struct sk_buff *wbuf);
+};
+
+struct ath10k_hif_ops {
+ /* send a scatter-gather list to the target */
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+
+ /* read firmware memory through the diagnose interface */
+ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+
+ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+ int nbytes);
+ /*
+ * API to handle HIF-specific BMI message exchanges, this API is
+ * synchronous and only allowed to be called from a context that
+ * can block (sleep)
+ */
+ int (*exchange_bmi_msg)(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len);
+
+ /* Post BMI phase, after FW is loaded. Starts regular operation */
+ int (*start)(struct ath10k *ar);
+
+ /* Clean up what start() did. This does not revert to BMI phase. If
+ * desired so, call power_down() and power_up() */
+ void (*stop)(struct ath10k *ar);
+
+ int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe,
+ int *ul_is_polled, int *dl_is_polled);
+
+ void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+ /*
+ * Check if prior sends have completed.
+ *
+ * Check whether the pipe in question has any completed
+ * sends that have not yet been processed.
+ * This function is only relevant for HIF pipes that are configured
+ * to be polled rather than interrupt-driven.
+ */
+ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+ void (*set_callbacks)(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks);
+
+ u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+ u32 (*read32)(struct ath10k *ar, u32 address);
+
+ void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
+ /* Power up the device and enter BMI transfer mode for FW download */
+ int (*power_up)(struct ath10k *ar);
+
+ /* Power down the device and free up resources. stop() must be called
+ * before this if start() was called earlier */
+ void (*power_down)(struct ath10k *ar);
+
+ int (*suspend)(struct ath10k *ar);
+ int (*resume)(struct ath10k *ar);
+};
+
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items,
+ int n_items)
+{
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
+}
+
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ if (!ar->hif.ops->diag_write)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len)
+{
+ return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+ response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+ return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+ return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe,
+ int *ul_is_polled,
+ int *dl_is_polled)
+{
+ return ar->hif.ops->map_service_to_pipe(ar, service_id,
+ ul_pipe, dl_pipe,
+ ul_is_polled, dl_is_polled);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe_id, int force)
+{
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
+{
+ ar->hif.ops->set_callbacks(ar, callbacks);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+ u8 pipe_id)
+{
+ return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+ return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+ ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+ if (!ar->hif.ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+ if (!ar->hif.ops->resume)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->resume(ar);
+}
+
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+ if (!ar->hif.ops->read32) {
+ ath10k_warn(ar, "hif read32 not supported\n");
+ return 0xdeaddead;
+ }
+
+ return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+ u32 address, u32 data)
+{
+ if (!ar->hif.ops->write32) {
+ ath10k_warn(ar, "hif write32 not supported\n");
+ return;
+ }
+
+ ar->hif.ops->write32(ar, address, data);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 000000000..2fd9e1802
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,874 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
+ int force)
+{
+ /*
+ * Check whether HIF has any prior sends that have finished,
+ * have not had the post-processing done.
+ */
+ ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
+}
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = ep->htc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ep->eid, skb);
+
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+ if (!ep->ep_ops.ep_tx_complete) {
+ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+
+/* assumes tx_lock is held */
+static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
+{
+ struct ath10k *ar = ep->htc->ar;
+
+ if (!ep->tx_credit_flow_enabled)
+ return false;
+ if (ep->tx_credits >= ep->tx_credits_per_max_message)
+ return false;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
+ ep->eid);
+ return true;
+}
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *hdr;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+
+ hdr->eid = ep->eid;
+ hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->seq_no = ep->seq_no++;
+
+ if (ath10k_htc_ep_need_credit_update(ep))
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_hif_sg_item sg_item;
+ struct device *dev = htc->ar->dev;
+ int credits = 0;
+ int ret;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret)
+ goto err_credits;
+
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+ sg_item.vaddr = skb->data;
+ sg_item.paddr = skb_cb->paddr;
+ sg_item.len = skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
+}
+
+static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_htc_ep *ep;
+
+ if (WARN_ON_ONCE(!skb))
+ return 0;
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ ep = &htc->endpoint[skb_cb->eid];
+
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* the skb now belongs to the completion handler */
+
+ return 0;
+}
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+ const struct ath10k_htc_credit_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath10k_warn(ar, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH10K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid)
+{
+ struct ath10k *ar = htc->ar;
+ int status = 0;
+ struct ath10k_htc_record *record;
+ u8 *orig_buffer;
+ int orig_length;
+ size_t len;
+
+ orig_buffer = buffer;
+ orig_length = length;
+
+ while (length > 0) {
+ record = (struct ath10k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath10k_warn(ar, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH10K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath10k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath10k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ if (status)
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
+ orig_buffer, orig_length);
+
+ return status;
+}
+
+static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_hdr *hdr;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = hdr->eid;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
+ hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ /*
+ * If this endpoint that received a message from the target has
+ * a to-target HIF pipe whose send completions are polled rather
+ * than interrupt-driven, this is a good point to ask HIF to check
+ * whether it has any completed sends to handle.
+ */
+ if (ep->ul_is_polled)
+ ath10k_htc_send_complete_check(ep, 1);
+
+ payload_len = __le16_to_cpu(hdr->len);
+
+ if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+ hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
+ "", hdr, sizeof(*hdr));
+ status = -EINVAL;
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = hdr->trailer_len;
+ min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath10k_warn(ar, "Invalid trailer length: %d\n",
+ trailer_len);
+ status = -EPROTO;
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath10k_htc_process_trailer(htc, trailer,
+ trailer_len, hdr->eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (((int)payload_len - (int)trailer_len) <= 0)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH10K_HTC_EP_0) {
+ struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+ switch (__le16_to_cpu(msg->hdr.message_id)) {
+ default:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /*
+ * this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath10k_warn(ar, "HTC rx ctrl still processing\n");
+ status = -EINVAL;
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ htc->htc_ops.target_send_suspend_complete(ar);
+ }
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ar, skb);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+
+ return status;
+}
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint. */
+ ath10k_warn(ar, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH10K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH10K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ }
+
+ return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_svc_tx_credits *entry;
+
+ entry = &htc->service_tx_alloc[0];
+
+ /*
+ * for PCIE allocate all credists/HTC buffers to WMI.
+ * no buffers are used/required for data. data always
+ * remains on host.
+ */
+ entry++;
+ entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+ entry->credit_allocation = htc->total_transmit_credits;
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+ u16 service_id)
+{
+ u8 allocation = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+ if (htc->service_tx_alloc[i].service_id == service_id)
+ allocation =
+ htc->service_tx_alloc[i].credit_allocation;
+ }
+
+ return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ int i, status = 0;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k_htc_msg *msg;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (status == 0) {
+ /* Workaround: In some cases the PCI HIF doesn't
+ * receive interrupt for the control response message
+ * even if the buffer was completed. It is suspected
+ * iomap writes unmasking PCI CE irqs aren't propagated
+ * properly in KVM PCI-passthrough sometimes.
+ */
+ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(htc->ar, i, 1);
+
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (status == 0)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ credit_count = __le16_to_cpu(msg->ready.credit_count);
+ credit_size = __le16_to_cpu(msg->ready.credit_size);
+
+ if (message_id != ATH10K_HTC_MSG_READY_ID) {
+ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits,
+ htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath10k_err(ar, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath10k_htc_setup_target_buffer_assignments(htc);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_err(ar, "could not connect to htc service (%d)\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_msg *msg;
+ struct ath10k_htc_conn_svc *req_msg;
+ struct ath10k_htc_conn_svc_response resp_msg_dummy;
+ struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+ enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH10K_HTC_EP_0;
+ max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath10k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb) {
+ ath10k_err(ar, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+ flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ status = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (status == 0) {
+ ath10k_err(ar, "Service connect timeout: %d\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ resp_msg = &msg->connect_service_response;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ service_id = __le16_to_cpu(resp_msg->service_id);
+
+ if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(msg->hdr) +
+ sizeof(msg->connect_service_response))) {
+ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+ htc_service_name(service_id),
+ resp_msg->status, resp_msg->eid);
+
+ conn_resp->connect_resp_code = resp_msg->status;
+
+ /* check response status */
+ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ resp_msg->status);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+ max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+ if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+ ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
+ ep->tx_credits_per_max_message = ep->max_ep_message_len /
+ htc->target_credit_size;
+
+ if (ep->max_ep_message_len % htc->target_credit_size)
+ ep->tx_credits_per_max_message++;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath10k_hif_map_service_to_pipe(htc->ar,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id,
+ &ep->ul_is_polled,
+ &ep->dl_is_polled);
+ if (status)
+ return status;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc ep %d ul polled %d dl polled %d\n",
+ ep->eid, ep->ul_is_polled, ep->dl_is_polled);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath10k_htc_msg *msg;
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+/* registered target arrival callback from the HIF layer */
+int ath10k_htc_init(struct ath10k *ar)
+{
+ struct ath10k_hif_cb htc_callbacks;
+ struct ath10k_htc_ep *ep = NULL;
+ struct ath10k_htc *htc = &ar->htc;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath10k_htc_reset_endpoint_states(htc);
+
+ /* setup HIF layer callbacks */
+ htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
+ htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
+ htc->ar = ar;
+
+ /* Get HIF default pipe for HTC message exchange */
+ ep = &htc->endpoint[ATH10K_HTC_EP_0];
+
+ ath10k_hif_set_callbacks(ar, &htc_callbacks);
+ ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 000000000..527179c0e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+enum ath10k_htc_tx_flags {
+ ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath10k_htc_hdr {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+ __le16 len;
+ union {
+ u8 trailer_len; /* for rx */
+ u8 control_byte0;
+ } __packed;
+ union {
+ u8 seq_no; /* for tx */
+ u8 control_byte1;
+ } __packed;
+ u8 pad0;
+ u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+ ATH10K_HTC_MSG_READY_ID = 1,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath10k_htc_version {
+ ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+ ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
+};
+
+enum ath10k_htc_conn_svc_status {
+ ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+ __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+ __le16 credit_count;
+ __le16 credit_size;
+ u8 max_endpoints;
+ u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+ struct ath10k_htc_ready base;
+ u8 htc_version; /* @enum ath10k_htc_version */
+ u8 max_msgs_per_htc_bundle;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+ __le16 service_id;
+ __le16 flags; /* @enum ath10k_htc_conn_flags */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+ __le16 service_id;
+ u8 status; /* @enum ath10k_htc_conn_svc_status */
+ u8 eid;
+ __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+ u8 pad0;
+ u8 pad1;
+ __le32 flags; /* @enum htc_setup_complete_flags */
+ u8 max_msgs_per_bundled_recv;
+ u8 pad2;
+ u8 pad3;
+ u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+ struct ath10k_ath10k_htc_msg_hdr hdr;
+ union {
+ /* host-to-target */
+ struct ath10k_htc_conn_svc connect_service;
+ struct ath10k_htc_ready ready;
+ struct ath10k_htc_ready_extended ready_ext;
+ struct ath10k_htc_unknown unknown;
+ struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+ /* target-to-host */
+ struct ath10k_htc_conn_svc_response connect_service_response;
+ };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+ ATH10K_HTC_RECORD_NULL = 0,
+ ATH10K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+ u8 id; /* @enum ath10k_ath10k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_record {
+ struct ath10k_ath10k_htc_record_hdr hdr;
+ union {
+ struct ath10k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+ struct ath10k_htc_hdr hdr;
+ union {
+ struct ath10k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+ ATH10K_HTC_SVC_GRP_RSVD = 0,
+ ATH10K_HTC_SVC_GRP_WMI = 1,
+ ATH10K_HTC_SVC_GRP_NMI = 2,
+ ATH10K_HTC_SVC_GRP_HTT = 3,
+
+ ATH10K_HTC_SVC_GRP_TEST = 254,
+ ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
+
+ ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+ ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+ ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+ ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+ ATH10K_HTC_EP_UNUSED = -1,
+ ATH10K_HTC_EP_0 = 0,
+ ATH10K_HTC_EP_1 = 1,
+ ATH10K_HTC_EP_2,
+ ATH10K_HTC_EP_3,
+ ATH10K_HTC_EP_4,
+ ATH10K_HTC_EP_5,
+ ATH10K_HTC_EP_6,
+ ATH10K_HTC_EP_7,
+ ATH10K_HTC_EP_8,
+ ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath10k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+
+struct ath10k_htc_ep {
+ struct ath10k_htc *htc;
+ enum ath10k_htc_ep_id eid;
+ enum ath10k_htc_svc_id service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+ int ul_is_polled; /* call HIF to get tx completions */
+ int dl_is_polled; /* call HIF to fetch rx (not implemented) */
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ int tx_credit_size;
+ int tx_credits_per_max_message;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath10k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath10k_htc {
+ struct ath10k *ar;
+ struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath10k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
+ int target_credit_size;
+};
+
+int ath10k_htc_init(struct ath10k *ar);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 000000000..4f59ab923
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+int ath10k_htt_connect(struct ath10k_htt *htt)
+{
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ htt->eid = conn_resp.eid;
+
+ return 0;
+}
+
+int ath10k_htt_init(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+
+ htt->ar = ar;
+
+ /*
+ * Prefetch enough data to satisfy target
+ * classification engine.
+ * This is for LL chips. HL chips will probably
+ * transfer all frame in the tx fragment.
+ */
+ htt->prefetch_len =
+ 36 + /* 802.11 + qos + ht */
+ 4 + /* 802.1q */
+ 8 + /* llc snap */
+ 2; /* ip4 dscp or ip6 priority */
+
+ return 0;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_setup(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int status;
+
+ init_completion(&htt->target_version_received);
+
+ status = ath10k_htt_h2t_ver_req_msg(htt);
+ if (status)
+ return status;
+
+ status = wait_for_completion_timeout(&htt->target_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (status == 0) {
+ ath10k_warn(ar, "htt version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ status = ath10k_htt_verify_version(htt);
+ if (status)
+ return status;
+
+ return ath10k_htt_send_rx_ring_cfg_ll(htt);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 000000000..874bf44ff
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,1431 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/hashtable.h>
+#include <net/mac80211.h>
+
+#include "htc.h"
+#include "rx_desc.h"
+
+enum htt_dbg_stats_type {
+ HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+ HTT_DBG_STATS_RX_REORDER = 1 << 1,
+ HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
+ HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
+ HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
+ /* bits 5-23 currently reserved */
+
+ HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_TX_FRM = 1,
+ HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
+ HTT_H2T_MSG_TYPE_STATS_REQ = 3,
+ HTT_H2T_MSG_TYPE_SYNC = 4,
+ HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
+ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything. */
+ HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+
+ HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+ u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU. The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral. Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+ __le32 paddr;
+ __le32 len;
+} __packed;
+
+enum htt_data_tx_desc_flags0 {
+ HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+ HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
+ HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
+ HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
+ HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
+ HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
+ HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+ HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+ HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+ HTT_DATA_TX_EXT_TID_MGMT = 17,
+ HTT_DATA_TX_EXT_TID_INVALID = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ * for special kinds of tids
+ * postponed: only for HL hosts. indicates if this is a resend
+ * (HL hosts manage queues on the host )
+ * more_in_batch: only for HL hosts. indicates if more packets are
+ * pending. this allows target to wait and aggregate
+ * freq: 0 means home channel of given vdev. intended for offchannel
+ */
+struct htt_data_tx_desc {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le32 frags_paddr;
+ __le16 peerid;
+ __le16 freq;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+ HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+ HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+ HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
+ HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
+ HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
+ HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
+ HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
+ HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
+ HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+ HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
+ HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
+ HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+ HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
+ HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
+ HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
+ HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
+};
+
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+
+struct htt_rx_ring_setup_ring {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+ u8 num_rings; /* supported values: 1, 2 */
+ __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+ u8 upload_types[3];
+ u8 rsvd0;
+ u8 reset_types[3];
+ struct {
+ u8 mpdu_bytes;
+ u8 mpdu_num_msdus;
+ u8 msdu_bytes;
+ } __packed;
+ u8 stat_type;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+ u8 sync_count;
+ __le16 rsvd0;
+} __packed;
+
+struct htt_aggr_conf {
+ u8 max_num_ampdu_subframes;
+ /* amsdu_subframes is limited by 0x1F mask */
+ u8 max_num_amsdu_subframes;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+
+struct htt_mgmt_tx_desc {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+ __le32 msdu_paddr;
+ __le32 desc_id;
+ __le32 len;
+ __le32 vdev_id;
+ u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+} __packed;
+
+enum htt_mgmt_tx_status {
+ HTT_MGMT_TX_STATUS_OK = 0,
+ HTT_MGMT_TX_STATUS_RETRY = 1,
+ HTT_MGMT_TX_STATUS_DROP = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ /* 0x13 reservd */
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+
+ /* FIXME: Do not depend on this event id. Numbering of this event id is
+ * broken across different firmware revisions and HTT version fails to
+ * indicate this.
+ */
+ HTT_T2H_MSG_TYPE_TEST,
+
+ /* keep this last */
+ HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+ u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB 0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+ u8 minor;
+ u8 major;
+ u8 rsvd0;
+} __packed;
+
+struct htt_mgmt_tx_completion {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+ __le32 desc_id;
+ __le32 status;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
+
+struct htt_rx_indication_hdr {
+ u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
+
+enum htt_rx_legacy_rate {
+ HTT_RX_OFDM_48 = 0,
+ HTT_RX_OFDM_24 = 1,
+ HTT_RX_OFDM_12,
+ HTT_RX_OFDM_6,
+ HTT_RX_OFDM_54,
+ HTT_RX_OFDM_36,
+ HTT_RX_OFDM_18,
+ HTT_RX_OFDM_9,
+
+ /* long preamble */
+ HTT_RX_CCK_11_LP = 0,
+ HTT_RX_CCK_5_5_LP = 1,
+ HTT_RX_CCK_2_LP,
+ HTT_RX_CCK_1_LP,
+ /* short preamble */
+ HTT_RX_CCK_11_SP,
+ HTT_RX_CCK_5_5_SP,
+ HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+ HTT_RX_LEGACY_RATE_OFDM = 0,
+ HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+ HTT_RX_LEGACY = 0x4,
+ HTT_RX_HT = 0x8,
+ HTT_RX_HT_WITH_TXBF = 0x9,
+ HTT_RX_VHT = 0xC,
+ HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+ u8 combined_rssi;
+ u8 sub_usec_timestamp;
+ u8 phy_err_code;
+ u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+ struct {
+ u8 pri20_db;
+ u8 ext20_db;
+ u8 ext40_db;
+ u8 ext80_db;
+ } __packed rssi_chains[4];
+ __le32 tsf;
+ __le32 usec_timestamp;
+ __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+ __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+ HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+ HTT_RX_IND_MPDU_STATUS_OK,
+ HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+ HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+ HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+ HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+ /* only accept EAPOL frames */
+ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+ HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+ /* Non-data in promiscous mode */
+ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+ HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+ HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+ HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+ HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+ /*
+ * MISC: discard for unspecified reasons.
+ * Leave this enum value last.
+ */
+ HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+ u8 mpdu_count;
+ u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+ __le16 fw_rx_desc_bytes;
+ u8 pad0;
+ u8 pad1;
+};
+
+struct htt_rx_indication {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+
+ /*
+ * the following fields are both dynamically sized, so
+ * take care addressing them
+ */
+
+ /* the size of this is %fw_rx_desc_bytes */
+ struct fw_rx_desc_base fw_desc;
+
+ /*
+ * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+ * and has %num_mpdu_ranges elements.
+ */
+ struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+ return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+ HTT_RX_FLUSH_MPDU_DISCARD = 0,
+ HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ * [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+ __le16 peer_id;
+ u8 tid;
+ u8 rsvd0;
+ u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+ u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+ u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 rsvd0;
+ u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+ u8 rsvd0;
+ __le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+ HTT_SECURITY_NONE,
+ HTT_SECURITY_WEP128,
+ HTT_SECURITY_WEP104,
+ HTT_SECURITY_WEP40,
+ HTT_SECURITY_TKIP,
+ HTT_SECURITY_TKIP_NOMIC,
+ HTT_SECURITY_AES_CCMP,
+ HTT_SECURITY_WAPI,
+
+ HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB 0
+ HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+ union {
+ /* dont use bitfields; undefined behaviour */
+ u8 flags; /* %htt_security_flags */
+ struct {
+ u8 security_type:7, /* %htt_security_types */
+ is_unicast:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ u8 michael_key[8];
+ u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK 0x000F
+#define HTT_RX_BA_INFO0_TID_LSB 0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
+
+struct htt_rx_addba {
+ u8 window_size;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+ u8 rsvd0;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+ HTT_DATA_TX_STATUS_OK = 0,
+ HTT_DATA_TX_STATUS_DISCARD = 1,
+ HTT_DATA_TX_STATUS_NO_ACK = 2,
+ HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
+ HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB 0
+#define HTT_DATA_TX_TID_MASK 0x78
+#define HTT_DATA_TX_TID_LSB 3
+ HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+ union {
+ u8 flags;
+ struct {
+ u8 status:3,
+ tid:4,
+ tid_invalid:1;
+ } __packed;
+ } __packed;
+ u8 num_msdus;
+ u8 rsvd0;
+ __le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+ u32 hdr;
+ u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+ u32 rate_code;
+ u32 rate_code_flags;
+ u32 flags;
+ u32 num_enqued; /* 1 for non-AMPDU */
+ u32 num_retries;
+ u32 num_failed; /* for AMPDU */
+ u32 ack_rssi;
+ u32 time_stamp;
+ u32 is_probe;
+};
+
+struct htt_rc_update {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 num_elems;
+ u8 rsvd0;
+ struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+ union {
+ u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+ struct {
+ u8 ext_tid:5,
+ flush_valid:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+ __le16 fw_rx_desc_bytes;
+ __le16 rsvd0;
+
+ u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
+
+struct htt_rx_pn_ind {
+ __le16 peer_id;
+ u8 tid;
+ u8 seqno_start;
+ u8 seqno_end;
+ u8 pn_ie_count;
+ u8 reserved;
+ u8 pn_ies[0];
+} __packed;
+
+struct htt_rx_offload_msdu {
+ __le16 msdu_len;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 tid;
+ u8 fw_desc;
+ u8 payload[0];
+} __packed;
+
+struct htt_rx_offload_ind {
+ u8 reserved;
+ __le16 msdu_count;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc {
+ __le32 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_ind {
+ u8 info;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 reserved;
+ __le16 msdu_count;
+ struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+} __packed;
+
+#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
+#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | num chars | num ints | msg type |
+ * |-----------------------------------------------------------|
+ * | int 0 |
+ * |-----------------------------------------------------------|
+ * | int 1 |
+ * |-----------------------------------------------------------|
+ * | ... |
+ * |-----------------------------------------------------------|
+ * | char 3 | char 2 | char 1 | char 0 |
+ * |-----------------------------------------------------------|
+ * | | | ... | char 4 |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_TEST
+ * - NUM_INTS
+ * Bits 15:8
+ * Purpose: indicate how many 32-bit integers follow the message header
+ * - NUM_CHARS
+ * Bits 31:16
+ * Purpose: indicate how many 8-bit charaters follow the series of integers
+ */
+struct htt_rx_test {
+ u8 num_ints;
+ __le16 num_chars;
+
+ /* payload consists of 2 lists:
+ * a) num_ints * sizeof(__le32)
+ * b) num_chars * sizeof(u8) aligned to 4bytes */
+ u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+ return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+ return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | | | | msg type |
+ * |-----------------------------------------------------------|
+ * | payload |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+ u8 pad[3];
+ u8 payload[0];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+ /* Non QoS MPDUs received */
+ __le32 deliver_non_qos;
+
+ /* MPDUs received in-order */
+ __le32 deliver_in_order;
+
+ /* Flush due to reorder timer expired */
+ __le32 deliver_flush_timeout;
+
+ /* Flush due to move out of window */
+ __le32 deliver_flush_oow;
+
+ /* Flush due to DELBA */
+ __le32 deliver_flush_delba;
+
+ /* MPDUs dropped due to FCS error */
+ __le32 fcs_error;
+
+ /* MPDUs dropped due to monitor mode non-data packet */
+ __le32 mgmt_ctrl;
+
+ /* MPDUs dropped due to invalid peer */
+ __le32 invalid_peer;
+
+ /* MPDUs dropped due to duplication (non aggregation) */
+ __le32 dup_non_aggr;
+
+ /* MPDUs dropped due to processed before */
+ __le32 dup_past;
+
+ /* MPDUs dropped due to duplicate in reorder queue */
+ __le32 dup_in_reorder;
+
+ /* Reorder timeout happened */
+ __le32 reorder_timeout;
+
+ /* invalid bar ssn */
+ __le32 invalid_bar_ssn;
+
+ /* reorder reset due to bar ssn */
+ __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+ __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+ struct htt_dbg_stats_wal_tx_stats tx_stats;
+ struct htt_dbg_stats_wal_rx_stats rx_stats;
+ struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+ __le32 mcs[10];
+ __le32 sgi[10];
+ __le32 nss[4];
+ __le32 stbc[10];
+ __le32 bw[3];
+ __le32 pream[6];
+ __le32 ldpc;
+ __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present - The requested stats have been delivered in full.
+ * This indicates that either the stats information was contained
+ * in its entirety within this message, or else this message
+ * completes the delivery of the requested stats info that was
+ * partially delivered through earlier STATS_CONF messages.
+ * partial - The requested stats have been delivered in part.
+ * One or more subsequent STATS_CONF messages with the same
+ * cookie value will be sent to deliver the remainder of the
+ * information.
+ * error - The requested stats could not be delivered, for example due
+ * to a shortage of memory to construct a message holding the
+ * requested stats.
+ * invalid - The requested stat type is either not recognized, or the
+ * target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ * elements are present within a series of stats info elems
+ * (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+ HTT_DBG_STATS_STATUS_PRESENT = 0,
+ HTT_DBG_STATS_STATUS_PARTIAL = 1,
+ HTT_DBG_STATS_STATUS_ERROR = 2,
+ HTT_DBG_STATS_STATUS_INVALID = 3,
+ HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | reserved | msg type |
+ * |------------------------------------------------------------|
+ * | cookie LSBs |
+ * |------------------------------------------------------------|
+ * | cookie MSBs |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | n/a | reserved | 111 | n/a |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a statistics upload confirmation message
+ * Value: 0x9
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 4:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_stats_type
+ * - STATUS
+ * Bits 7:5
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ * the completion of the stats entry series
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes. Even if the length is not an integer multiple of 4, the
+ * subsequent stats entry header will begin on a 4-byte aligned
+ * boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
+
+struct htt_stats_conf_item {
+ union {
+ u8 info;
+ struct {
+ u8 stat_type:5; /* %HTT_DBG_STATS_ */
+ u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+ } __packed;
+ } __packed;
+ u8 pad;
+ __le16 length;
+ u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+ u8 pad[3];
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+
+ /* each item has variable length! */
+ struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+ const struct htt_stats_conf_item *item)
+{
+ return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
+ * |------------------------------------------------------------|
+ * | BANK0_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | BANK0_MAX_ID | BANK0_MIN_ID |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_MAX_ID | BANK15_MIN_ID |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Value: 0x6
+ * - BANKx_BASE_ADDRESS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ * bank physical/bus address.
+ * - BANKx_MIN_ID
+ * Bits 15:0
+ * Purpose: Provide a mechanism to specify the min index that needs to
+ * mapped.
+ * - BANKx_MAX_ID
+ * Bits 31:16
+ * Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+ __le16 bank_min_id;
+ __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
+
+struct htt_frag_desc_bank_cfg {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+} __packed;
+
+union htt_rx_pn_t {
+ /* WEP: 24-bit PN */
+ u32 pn24;
+
+ /* TKIP or CCMP: 48-bit PN */
+ u_int64_t pn48;
+
+ /* WAPI: 128-bit PN */
+ u_int64_t pn128[2];
+};
+
+struct htt_cmd {
+ struct htt_cmd_hdr hdr;
+ union {
+ struct htt_ver_req ver_req;
+ struct htt_mgmt_tx_desc mgmt_tx;
+ struct htt_data_tx_desc data_tx;
+ struct htt_rx_ring_setup rx_setup;
+ struct htt_stats_req stats_req;
+ struct htt_oob_sync_req oob_sync_req;
+ struct htt_aggr_conf aggr_conf;
+ struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ };
+} __packed;
+
+struct htt_resp {
+ struct htt_resp_hdr hdr;
+ union {
+ struct htt_ver_resp ver_resp;
+ struct htt_mgmt_tx_completion mgmt_tx_completion;
+ struct htt_data_tx_completion data_tx_completion;
+ struct htt_rx_indication rx_ind;
+ struct htt_rx_fragment_indication rx_frag_ind;
+ struct htt_rx_peer_map peer_map;
+ struct htt_rx_peer_unmap peer_unmap;
+ struct htt_rx_flush rx_flush;
+ struct htt_rx_addba rx_addba;
+ struct htt_rx_delba rx_delba;
+ struct htt_security_indication security_indication;
+ struct htt_rc_update rc_update;
+ struct htt_rx_test rx_test;
+ struct htt_pktlog_msg pktlog_msg;
+ struct htt_stats_conf stats_conf;
+ struct htt_rx_pn_ind rx_pn_ind;
+ struct htt_rx_offload_ind rx_offload_ind;
+ struct htt_rx_in_ord_ind rx_in_ord_ind;
+ };
+} __packed;
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+ u32 msdu_id;
+ bool discard;
+ bool no_ack;
+};
+
+struct htt_peer_map_event {
+ u8 vdev_id;
+ u16 peer_id;
+ u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+ u16 peer_id;
+};
+
+struct ath10k_htt_txbuf {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc cmd_tx;
+} __packed;
+
+struct ath10k_htt {
+ struct ath10k *ar;
+ enum ath10k_htc_ep_id eid;
+
+ u8 target_version_major;
+ u8 target_version_minor;
+ struct completion target_version_received;
+
+ struct {
+ /*
+ * Ring of network buffer objects - This ring is
+ * used exclusively by the host SW. This ring
+ * mirrors the dev_addrs_ring that is shared
+ * between the host SW and the MAC HW. The host SW
+ * uses this netbufs ring to locate the network
+ * buffer objects whose data buffers the HW has
+ * filled.
+ */
+ struct sk_buff **netbufs_ring;
+
+ /* This is used only with firmware supporting IN_ORD_IND.
+ *
+ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
+ * buffer ring from which buffer addresses are copied by the
+ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
+ * pointing to specific (re-ordered) buffers.
+ *
+ * FIXME: With kernel generic hashing functions there's a lot
+ * of hash collisions for sk_buffs.
+ */
+ bool in_ord_rx;
+ DECLARE_HASHTABLE(skb_table, 4);
+
+ /*
+ * Ring of buffer addresses -
+ * This ring holds the "physical" device address of the
+ * rx buffers the host SW provides for the MAC HW to
+ * fill.
+ */
+ __le32 *paddrs_ring;
+
+ /*
+ * Base address of ring, as a "physical" device address
+ * rather than a CPU address.
+ */
+ dma_addr_t base_paddr;
+
+ /* how many elems in the ring (power of 2) */
+ int size;
+
+ /* size - 1 */
+ unsigned size_mask;
+
+ /* how many rx buffers to keep in the ring */
+ int fill_level;
+
+ /* how many rx buffers (full+empty) are in the ring */
+ int fill_cnt;
+
+ /*
+ * alloc_idx - where HTT SW has deposited empty buffers
+ * This is allocated in consistent mem, so that the FW can
+ * read this variable, and program the HW's FW_IDX reg with
+ * the value of this shadow register.
+ */
+ struct {
+ __le32 *vaddr;
+ dma_addr_t paddr;
+ } alloc_idx;
+
+ /* where HTT SW has processed bufs filled by rx MAC DMA */
+ struct {
+ unsigned msdu_payld;
+ } sw_rd_idx;
+
+ /*
+ * refill_retry_timer - timer triggered when the ring is
+ * not refilled to the level expected
+ */
+ struct timer_list refill_retry_timer;
+
+ /* Protects access to all rx ring buffer state variables */
+ spinlock_t lock;
+ } rx_ring;
+
+ unsigned int prefetch_len;
+
+ /* Protects access to pending_tx, num_pending_tx */
+ spinlock_t tx_lock;
+ int max_num_pending_tx;
+ int num_pending_tx;
+ struct idr pending_tx;
+ wait_queue_head_t empty_tx_wq;
+ struct dma_pool *tx_pool;
+
+ /* set if host-fw communication goes haywire
+ * used to avoid further failures */
+ bool rx_confused;
+ struct tasklet_struct rx_replenish_task;
+
+ /* This is used to group tx/rx completions separately and process them
+ * in batches to reduce cache stalls */
+ struct tasklet_struct txrx_compl_task;
+ struct sk_buff_head tx_compl_q;
+ struct sk_buff_head rx_compl_q;
+ struct sk_buff_head rx_in_ord_compl_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
+};
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring. */
+struct htt_rx_desc {
+ union {
+ /* This field is filled on the host using the msdu buffer
+ * from htt_rx_indication */
+ struct fw_rx_desc_base fw_desc;
+ u32 pad;
+ } __packed;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_rx_ring_refill(struct ath10k *ar);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu);
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 000000000..01a2b384f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,2065 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+#include "trace.h"
+#include "mac.h"
+
+#include <linux/log2.h>
+
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+static void ath10k_htt_txrx_compl_task(unsigned long ptr);
+
+static struct sk_buff *
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+{
+ struct ath10k_skb_rxcb *rxcb;
+
+ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
+ if (rxcb->paddr == paddr)
+ return ATH10K_RXCB_SKB(rxcb);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_rxcb *rxcb;
+ struct hlist_node *n;
+ int i;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
+ skb = ATH10K_RXCB_SKB(rxcb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ hash_del(&rxcb->hlist);
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ htt->rx_ring.fill_cnt = 0;
+ hash_init(htt->rx_ring.skb_table);
+ memset(htt->rx_ring.netbufs_ring, 0,
+ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ struct htt_rx_desc *rx_desc;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0, idx;
+
+ /* The Full Rx Reorder firmware has no way of telling the host
+ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
+ * To keep things simple make sure ring is always half empty. This
+ * guarantees there'll be no replenishment overruns possible.
+ */
+ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
+ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+ while (num > 0) {
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+ skb->data);
+
+ /* Clear rx_desc attention word before posting to Rx ring */
+ rx_desc = (struct htt_rx_desc *)skb->data;
+ rx_desc->attention.flags = __cpu_to_le32(0);
+
+ paddr = dma_map_single(htt->ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ rxcb->paddr = paddr;
+ htt->rx_ring.netbufs_ring[idx] = skb;
+ htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+ htt->rx_ring.fill_cnt++;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_add(htt->rx_ring.skb_table,
+ &ATH10K_SKB_RXCB(skb)->hlist,
+ (u32)paddr);
+ }
+
+ num--;
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ }
+
+fail:
+ /*
+ * Make sure the rx buffer is updated before available buffer
+ * index to avoid any potential rx ring corruption.
+ */
+ mb();
+ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
+ return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ lockdep_assert_held(&htt->rx_ring.lock);
+ return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+ int ret, num_deficit, num_to_fill;
+
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the average and stability. */
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
+ ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+ if (ret == -ENOMEM) {
+ /*
+ * Failed to fill it to the desired level -
+ * we'll start a timer and try again next time.
+ * As long as enough buffers are left in the ring for
+ * another A-MPDU rx, no special recovery is needed.
+ */
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ tasklet_schedule(&htt->rx_replenish_task);
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+int ath10k_htt_rx_ring_refill(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret;
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
+ htt->rx_ring.fill_cnt));
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ if (ret)
+ ath10k_htt_rx_ring_free(htt);
+
+ return ret;
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
+ del_timer_sync(&htt->rx_ring.refill_retry_timer);
+ tasklet_kill(&htt->rx_replenish_task);
+ tasklet_kill(&htt->txrx_compl_task);
+
+ skb_queue_purge(&htt->tx_compl_q);
+ skb_queue_purge(&htt->rx_compl_q);
+ skb_queue_purge(&htt->rx_in_ord_compl_q);
+
+ ath10k_htt_rx_ring_free(htt);
+
+ dma_free_coherent(htt->ar->dev,
+ (htt->rx_ring.size *
+ sizeof(htt->rx_ring.paddrs_ring)),
+ htt->rx_ring.paddrs_ring,
+ htt->rx_ring.base_paddr);
+
+ dma_free_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ htt->rx_ring.alloc_idx.vaddr,
+ htt->rx_ring.alloc_idx.paddr);
+
+ kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int idx;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_ring.fill_cnt == 0) {
+ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
+ return NULL;
+ }
+
+ idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
+ htt->rx_ring.paddrs_ring[idx] = 0;
+
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+ u8 **fw_desc, int *fw_desc_len,
+ struct sk_buff_head *amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ int msdu_len, msdu_chaining = 0;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rx_desc;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ for (;;) {
+ int last_msdu, msdu_len_invalid, msdu_chained;
+
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+
+ rx_desc = (struct htt_rx_desc *)msdu->data;
+
+ /* FIXME: we must report msdu payload since this is what caller
+ * expects now */
+ skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+ /*
+ * Sanity check - confirm the HW is finished filling in the
+ * rx data.
+ * If the HW and SW are working correctly, then it's guaranteed
+ * that the HW's MAC DMA is done before this point in the SW.
+ * To prevent the case that we handle a stale Rx descriptor,
+ * just assert for now until we have a way to recover.
+ */
+ if (!(__le32_to_cpu(rx_desc->attention.flags)
+ & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ __skb_queue_purge(amsdu);
+ return -EIO;
+ }
+
+ /*
+ * Copy the FW rx descriptor for this MSDU from the rx
+ * indication message into the MSDU's netbuf. HL uses the
+ * same rx indication message definition as LL, and simply
+ * appends new info (fields from the HW rx desc, and the
+ * MSDU payload itself). So, the offset into the rx
+ * indication message only has to account for the standard
+ * offset of the per-MSDU FW rx desc info within the
+ * message, and how many bytes of the per-MSDU FW rx desc
+ * info have already been consumed. (And the endianness of
+ * the host, since for a big-endian host, the rx ind
+ * message contents, including the per-MSDU rx desc bytes,
+ * were byteswapped during upload.)
+ */
+ if (*fw_desc_len > 0) {
+ rx_desc->fw_desc.info0 = **fw_desc;
+ /*
+ * The target is expected to only provide the basic
+ * per-MSDU rx descriptors. Just to be sure, verify
+ * that the target has not attached extension data
+ * (e.g. LRO flow ID).
+ */
+
+ /* or more, if there's extension data */
+ (*fw_desc)++;
+ (*fw_desc_len)--;
+ } else {
+ /*
+ * When an oversized AMSDU happened, FW will lost
+ * some of MSDU status - in this case, the FW
+ * descriptors provided will be less than the
+ * actual MSDUs inside this MPDU. Mark the FW
+ * descriptors so that it will still deliver to
+ * upper stack, if no CRC error for this MPDU.
+ *
+ * FIX THIS - the FW descriptors are actually for
+ * MSDUs in the end of this A-MSDU instead of the
+ * beginning.
+ */
+ rx_desc->fw_desc.info0 = 0;
+ }
+
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+ RX_MSDU_START_INFO0_MSDU_LENGTH);
+ msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+ if (msdu_len_invalid)
+ msdu_len = 0;
+
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ msdu_len -= msdu->len;
+
+ /* Note: Chained buffers do not contain rx descriptor */
+ while (msdu_chained--) {
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= msdu->len;
+ msdu_chaining = 1;
+ }
+
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+ RX_MSDU_END_INFO0_LAST_MSDU;
+
+ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
+ sizeof(*rx_desc) - sizeof(u32));
+
+ if (last_msdu)
+ break;
+ }
+
+ if (skb_queue_empty(amsdu))
+ msdu_chaining = -1;
+
+ /*
+ * Don't refill the ring yet.
+ *
+ * First, the elements popped here are still in use - it is not
+ * safe to overwrite them until the matching call to
+ * mpdu_desc_list_next. Second, for efficiency it is preferable to
+ * refill the rx ring with 1 PPDU's worth of rx buffers (something
+ * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+ * (something like 3 buffers). Consequently, we'll rely on the txrx
+ * SW to tell us when it is done pulling all the PPDU's rx buffers
+ * out of the rx ring, and then refill it just once.
+ */
+
+ return msdu_chaining;
+}
+
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
+ u32 paddr)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
+ if (!msdu)
+ return NULL;
+
+ rxcb = ATH10K_SKB_RXCB(msdu);
+ hash_del(&rxcb->hlist);
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u32 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
+
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ dma_addr_t paddr;
+ void *vaddr;
+ size_t size;
+ struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+ htt->rx_confused = false;
+
+ /* XXX: The fill level could be changed during runtime in response to
+ * the host processing latency. Is this really worth it?
+ */
+ htt->rx_ring.size = HTT_RX_RING_SIZE;
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+ htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+
+ if (!is_power_of_2(htt->rx_ring.size)) {
+ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
+ return -EINVAL;
+ }
+
+ htt->rx_ring.netbufs_ring =
+ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!htt->rx_ring.netbufs_ring)
+ goto err_netbuf;
+
+ size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+
+ vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
+ if (!vaddr)
+ goto err_dma_ring;
+
+ htt->rx_ring.paddrs_ring = vaddr;
+ htt->rx_ring.base_paddr = paddr;
+
+ vaddr = dma_alloc_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ &paddr, GFP_DMA);
+ if (!vaddr)
+ goto err_dma_idx;
+
+ htt->rx_ring.alloc_idx.vaddr = vaddr;
+ htt->rx_ring.alloc_idx.paddr = paddr;
+ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
+ *htt->rx_ring.alloc_idx.vaddr = 0;
+
+ /* Initialize the Rx refill retry timer */
+ setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+
+ spin_lock_init(&htt->rx_ring.lock);
+
+ htt->rx_ring.fill_cnt = 0;
+ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ hash_init(htt->rx_ring.skb_table);
+
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+ (unsigned long)htt);
+
+ skb_queue_head_init(&htt->tx_compl_q);
+ skb_queue_head_init(&htt->rx_compl_q);
+ skb_queue_head_init(&htt->rx_in_ord_compl_q);
+
+ tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
+ (unsigned long)htt);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+ htt->rx_ring.size, htt->rx_ring.fill_level);
+ return 0;
+
+err_dma_idx:
+ dma_free_coherent(htt->ar->dev,
+ (htt->rx_ring.size *
+ sizeof(htt->rx_ring.paddrs_ring)),
+ htt->rx_ring.paddrs_ring,
+ htt->rx_ring.base_paddr);
+err_dma_ring:
+ kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+ return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+#define MICHAEL_MIC_LEN 8
+
+static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+static const u8 rx_legacy_rate_idx[] = {
+ 3, /* 0x00 - 11Mbps */
+ 2, /* 0x01 - 5.5Mbps */
+ 1, /* 0x02 - 2Mbps */
+ 0, /* 0x03 - 1Mbps */
+ 3, /* 0x04 - 11Mbps */
+ 2, /* 0x05 - 5.5Mbps */
+ 1, /* 0x06 - 2Mbps */
+ 0, /* 0x07 - 1Mbps */
+ 10, /* 0x08 - 48Mbps */
+ 8, /* 0x09 - 24Mbps */
+ 6, /* 0x0A - 12Mbps */
+ 4, /* 0x0B - 6Mbps */
+ 11, /* 0x0C - 54Mbps */
+ 9, /* 0x0D - 36Mbps */
+ 7, /* 0x0E - 18Mbps */
+ 5, /* 0x0F - 9Mbps */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ enum ieee80211_band band;
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ u8 preamble = 0;
+ u32 info1, info2, info3;
+
+ /* Band value can't be set as undefined but freq can be 0 - use that to
+ * determine whether band is provided.
+ *
+ * FIXME: Perhaps this can go away if CCK rate reporting is a little
+ * reworked?
+ */
+ if (!status->freq)
+ return;
+
+ band = status->band;
+ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+
+ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
+ rate_idx = 0;
+
+ if (rate < 0x08 || rate > 0x0F)
+ break;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (cck)
+ rate &= ~BIT(3);
+ rate_idx = rx_legacy_rate_idx[rate];
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate_idx = rx_legacy_rate_idx[rate];
+ /* We are using same rate table registering
+ HW - ath10k_rates[]. In case of 5GHz skip
+ CCK rates, so -4 here */
+ rate_idx -= 4;
+ break;
+ default:
+ break;
+ }
+
+ status->rate_idx = rate_idx;
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info2 and info3 */
+ mcs = info2 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info2 >> 7) & 1;
+ sgi = (info3 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->flag |= RX_FLAG_HT;
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+ if (bw)
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
+ TODO check this */
+ mcs = (info3 >> 4) & 0x0F;
+ nss = ((info2 >> 10) & 0x07) + 1;
+ bw = info2 & 3;
+ sgi = info3 & 1;
+
+ status->rate_idx = mcs;
+ status->vht_nss = nss;
+
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ switch (bw) {
+ /* 20MHZ */
+ case 0:
+ break;
+ /* 40MHZ */
+ case 1:
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ /* 80MHZ */
+ case 2:
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ }
+
+ status->flag |= RX_FLAG_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: Get real NF */
+ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_comb;
+ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+ * means all prior MSDUs in a PPDU are reported to mac80211 without the
+ * TSF. Is it worth holding frames until end of PPDU is known?
+ *
+ * FIXME: Can we get/compute 64bit TSF?
+ */
+ status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ bool is_first_ppdu;
+ bool is_last_ppdu;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ is_first_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+ is_last_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+ if (is_first_ppdu) {
+ /* New PPDU starts so clear out the old per-PPDU status. */
+ status->freq = 0;
+ status->rate_idx = 0;
+ status->vht_nss = 0;
+ status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
+ status->flag &= ~(RX_FLAG_HT |
+ RX_FLAG_VHT |
+ RX_FLAG_SHORT_GI |
+ RX_FLAG_40MHZ |
+ RX_FLAG_MACTIME_END);
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_signal(ar, status, rxd);
+ ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_rates(ar, status, rxd);
+ }
+
+ if (is_last_ppdu)
+ ath10k_htt_rx_h_mactime(ar, status, rxd);
+}
+
+static const char * const tid_to_ac[] = {
+ "BE",
+ "BK",
+ "BK",
+ "BE",
+ "VI",
+ "VI",
+ "VO",
+ "VO",
+};
+
+static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ if (tid < 8)
+ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
+ else
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ ath10k_dbg(ar, ATH10K_DBG_DATA,
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ skb,
+ skb->len,
+ ieee80211_get_SA(hdr),
+ ath10k_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ status->flag == 0 ? "legacy" : "",
+ status->flag & RX_FLAG_HT ? "ht" : "",
+ status->flag & RX_FLAG_VHT ? "vht" : "",
+ status->flag & RX_FLAG_40MHZ ? "40" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+ status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->vht_nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_rx_payload(ar, skb->data, skb->len);
+
+ ieee80211_rx(ar->hw, skb);
+}
+
+static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+{
+ /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
+ return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+}
+
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len;
+ size_t crypto_len;
+ bool is_first;
+ bool is_last;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Delivered decapped frame:
+ * [802.11 header]
+ * [crypto param] <-- can be trimmed if !fcs_err &&
+ * !decrypt_err && !peer_idx_invalid
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ * [payload]
+ * [FCS] <-- at end, needs to be trimmed
+ */
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!is_first)))
+ return;
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+ return;
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ /* In most cases this will be true for sniffed frames. It makes sense
+ * to deliver them as-is without stripping the crypto param. This would
+ * also make sense for software based decryption (which is not
+ * implemented in ath10k).
+ *
+ * If there's no error then the frame is decrypted. At least that is
+ * the case for frames that come in via fragmented rx indication.
+ */
+ if (!is_decrypted)
+ return;
+
+ /* The payload is decrypted so strip crypto params. Start from tail
+ * since hdr is used to compute some stuff.
+ */
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+
+ /* MMIC */
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ skb_trim(msdu, msdu->len - 8);
+
+ /* Head */
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+
+ /* Delivered decapped frame:
+ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ *
+ * Note: The nwifi header doesn't have QoS Control and is
+ * (always?) a 3addr frame.
+ *
+ * Note2: There's no A-MSDU subframe header. Even if it's part
+ * of an A-MSDU.
+ */
+
+ /* pull decapped header and copy SA & DA */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, hdr_len);
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+ struct sk_buff *msdu,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_first, is_last, is_amsdu;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+ is_amsdu = !(is_first && is_last);
+
+ rfc1042 = hdr;
+
+ if (is_first) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += round_up(hdr_len, 4) +
+ round_up(crypto_len, 4);
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ void *rfc1042;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+
+ /* Delivered decapped frame:
+ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+ * [payload]
+ */
+
+ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+ sizeof(struct rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64])
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+
+ /* Delivered decapped frame:
+ * [amsdu header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ * [payload]
+ */
+
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+ struct ieee80211_hdr *hdr;
+
+ /* First msdu's decapped header:
+ * [802.11 header] <-- padded to 4 bytes long
+ * [crypto param] <-- padded to 4 bytes long
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ *
+ * Other (2nd, 3rd, ..) msdu's decapped header:
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ */
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ switch (decap) {
+ case RX_MSDU_DECAP_RAW:
+ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+ is_decrypted);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+ break;
+ }
+}
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags, info;
+ bool is_ip4, is_ip6;
+ bool is_tcp, is_udp;
+ bool ip_csum_ok, tcpudp_csum_ok;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+ info = __le32_to_cpu(rxd->msdu_start.info1);
+
+ is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+ is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+ is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+ is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+ ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+ tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+ if (!is_ip4 && !is_ip6)
+ return CHECKSUM_NONE;
+ if (!is_tcp && !is_udp)
+ return CHECKSUM_NONE;
+ if (!ip_csum_ok)
+ return CHECKSUM_NONE;
+ if (!tcpudp_csum_ok)
+ return CHECKSUM_NONE;
+
+ return CHECKSUM_UNNECESSARY;
+}
+
+static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *first;
+ struct sk_buff *last;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ u8 first_hdr[64];
+ u8 *qos;
+ size_t hdr_len;
+ bool has_fcs_err;
+ bool has_crypto_err;
+ bool has_tkip_err;
+ bool has_peer_idx_invalid;
+ bool is_decrypted;
+ u32 attention;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+ * decapped header. It'll be used for undecapping of each MSDU.
+ */
+ hdr = (void *)rxd->rx_hdr_status;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ memcpy(first_hdr, hdr, hdr_len);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ hdr = (void *)first_hdr;
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last = skb_peek_tail(amsdu);
+ rxd = (void *)last->data - sizeof(*rxd);
+ attention = __le32_to_cpu(rxd->attention.flags);
+
+ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+ /* Note: If hardware captures an encrypted frame that it can't decrypt,
+ * e.g. due to fcs error, missing peer or invalid key data it will
+ * report the frame as raw.
+ */
+ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+ !has_fcs_err &&
+ !has_crypto_err &&
+ !has_peer_idx_invalid);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (has_fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (has_tkip_err)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted)
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+
+ skb_queue_walk(amsdu, msdu) {
+ ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ is_decrypted);
+
+ /* Undecapping involves copying the original 802.11 header back
+ * to sk_buff. If frame is protected and hardware has decrypted
+ * it then remove the protected bit.
+ */
+ if (!is_decrypted)
+ continue;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *msdu;
+
+ while ((msdu = __skb_dequeue(amsdu))) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu))
+ status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
+ ath10k_process_rx(ar, status, msdu);
+ }
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
+{
+ struct sk_buff *skb, *first;
+ int space;
+ int total_len = 0;
+
+ /* TODO: Might could optimize this by using
+ * skb_try_coalesce or similar method to
+ * decrease copying, or maybe get mac80211 to
+ * provide a way to just receive a list of
+ * skb?
+ */
+
+ first = __skb_dequeue(amsdu);
+
+ /* Allocate total length all at once. */
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
+
+ space = total_len - skb_tailroom(first);
+ if ((space > 0) &&
+ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
+ /* TODO: bump some rx-oom error stat */
+ /* put it back together so we can free the
+ * whole list at once.
+ */
+ __skb_queue_head(amsdu, first);
+ return -1;
+ }
+
+ /* Walk list again, copying contents into
+ * msdu_head
+ */
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ __skb_queue_head(amsdu, first);
+ return 0;
+}
+
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ bool chained)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ if (!chained)
+ return;
+
+ /* FIXME: Current unchaining logic can only handle simple case of raw
+ * msdu chaining. If decapping is other than raw the chaining may be
+ * more complex and this isn't handled by the current code. Don't even
+ * try re-constructing such frames - it'll be pretty much garbage.
+ */
+ if (decap != RX_MSDU_DECAP_RAW ||
+ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ __skb_queue_purge(amsdu);
+ return;
+ }
+
+ ath10k_unchain_msdu(amsdu);
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ bool is_mgmt;
+ bool has_fcs_err;
+
+ msdu = skb_peek(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ /* FIXME: It might be a good idea to do some fuzzy-testing to drop
+ * invalid/dangerous frames.
+ */
+
+ if (!rx_status->freq) {
+ ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
+ return false;
+ }
+
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+ has_fcs_err = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
+
+ /* Management frames are handled via WMI events. The pros of such
+ * approach is that channel is explicitly provided in WMI events
+ * whereas HTT doesn't provide channel information for Rxed frames.
+ *
+ * However some firmware revisions don't report corrupted frames via
+ * WMI so don't drop them.
+ */
+ if (is_mgmt && !has_fcs_err) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (skb_queue_empty(amsdu))
+ return;
+
+ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+ return;
+
+ __skb_queue_purge(amsdu);
+}
+
+static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct sk_buff_head amsdu;
+ int num_mpdu_ranges;
+ int fw_desc_len;
+ u8 *fw_desc;
+ int i, ret, mpdu_count = 0;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_confused)
+ return;
+
+ fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
+ fw_desc = (u8 *)&rx->fw_desc;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ rx, sizeof(*rx) +
+ (sizeof(struct htt_rx_indication_mpdu_range) *
+ num_mpdu_ranges));
+
+ for (i = 0; i < num_mpdu_ranges; i++)
+ mpdu_count += mpdu_ranges[i].mpdu_count;
+
+ while (mpdu_count--) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
+ &fw_desc_len, &amsdu);
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ break;
+ }
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+ }
+
+ tasklet_schedule(&htt->rx_replenish_task);
+}
+
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *frag)
+{
+ struct ath10k *ar = htt->ar;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
+ struct sk_buff_head amsdu;
+ int ret;
+ u8 *fw_desc;
+ int fw_desc_len;
+
+ fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
+ fw_desc = (u8 *)frag->fw_msdu_rx_desc;
+
+ __skb_queue_head_init(&amsdu);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+ &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ tasklet_schedule(&htt->rx_replenish_task);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+
+ if (ret) {
+ ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ret);
+ __skb_queue_purge(&amsdu);
+ return;
+ }
+
+ if (skb_queue_len(&amsdu) != 1) {
+ ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
+ __skb_queue_purge(&amsdu);
+ return;
+ }
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+
+ if (fw_desc_len > 0) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "expecting more fragmented rx in one indication %d\n",
+ fw_desc_len);
+ }
+}
+
+static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+ __le16 msdu_id;
+ int i;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.no_ack = true;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+ tx_done.discard = true;
+ break;
+ default:
+ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
+ tx_done.discard = true;
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+ msdu_id = resp->data_tx_completion.msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
+}
+
+static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_addba *ev = &resp->rx_addba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx addba tid %hu peer_id %hu size %hhu\n",
+ tid, peer_id, ev->window_size);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
+ peer->addr, tid, ev->window_size);
+
+ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_delba *ev = &resp->rx_delba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx delba tid %hu peer_id %hu\n",
+ tid, peer_id);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx stop rx ba session sta %pM tid %hu\n",
+ peer->addr, tid);
+
+ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+ struct sk_buff_head *amsdu)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+
+ if (skb_queue_empty(list))
+ return -ENOBUFS;
+
+ if (WARN_ON(!skb_queue_empty(amsdu)))
+ return -EINVAL;
+
+ while ((msdu = __skb_dequeue(list))) {
+ __skb_queue_tail(amsdu, msdu);
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
+ break;
+ }
+
+ msdu = skb_peek_tail(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
+ skb_queue_splice_init(amsdu, list);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ /* Offloaded frames are already decrypted but firmware insists they are
+ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
+ * will drop the frame.
+ */
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
+
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct htt_rx_offload_msdu *rx;
+ struct sk_buff *msdu;
+ size_t offset;
+
+ while ((msdu = __skb_dequeue(list))) {
+ /* Offloaded frames don't have Rx descriptor. Instead they have
+ * a short meta information header.
+ */
+
+ rx = (void *)msdu->data;
+
+ skb_put(msdu, sizeof(*rx));
+ skb_pull(msdu, sizeof(*rx));
+
+ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
+ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
+
+ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
+ * actual payload is unaligned. Align the frame. Otherwise
+ * mac80211 complains. This shouldn't reduce performance much
+ * because these offloaded frames are rare.
+ */
+ offset = 4 - ((unsigned long)msdu->data & 3);
+ skb_put(msdu, offset);
+ memmove(msdu->data + offset, msdu->data, msdu->len);
+ skb_pull(msdu, offset);
+
+ /* FIXME: The frame is NWifi. Re-construct QoS Control
+ * if possible later.
+ */
+
+ memset(status, 0, sizeof(*status));
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
+ ath10k_htt_rx_h_channel(ar, status);
+ ath10k_process_rx(ar, status, msdu);
+ }
+}
+
+static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (void *)skb->data;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct sk_buff_head list;
+ struct sk_buff_head amsdu;
+ u16 peer_id;
+ u16 msdu_count;
+ u8 vdev_id;
+ u8 tid;
+ bool offload;
+ bool frag;
+ int ret;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_confused)
+ return;
+
+ skb_pull(skb, sizeof(resp->hdr));
+ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
+
+ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
+ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
+ vdev_id = resp->rx_in_ord_ind.vdev_id;
+ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
+ offload = !!(resp->rx_in_ord_ind.info &
+ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
+ vdev_id, peer_id, tid, offload, frag, msdu_count);
+
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+ ath10k_warn(ar, "dropping invalid in order rx indication\n");
+ return;
+ }
+
+ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
+ * extracted and processed.
+ */
+ __skb_queue_head_init(&list);
+ ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+ if (ret < 0) {
+ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
+ htt->rx_confused = true;
+ return;
+ }
+
+ /* Offloaded frames are very different and need to be handled
+ * separately.
+ */
+ if (offload)
+ ath10k_htt_rx_h_rx_offload(ar, &list);
+
+ while (!skb_queue_empty(&list)) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ switch (ret) {
+ case 0:
+ /* Note: The in-order indication may report interleaved
+ * frames from different PPDUs meaning reported rx rate
+ * to mac80211 isn't accurate/reliable. It's still
+ * better to report something than nothing though. This
+ * should still give an idea about rx rate to the user.
+ */
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+ break;
+ case -EAGAIN:
+ /* fall through */
+ default:
+ /* Should not happen. */
+ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
+ htt->rx_confused = true;
+ __skb_queue_purge(&list);
+ return;
+ }
+ }
+
+ tasklet_schedule(&htt->rx_replenish_task);
+}
+
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+
+ /* confirm alignment */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+ resp->hdr.msg_type);
+ switch (resp->hdr.msg_type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+ htt->target_version_major = resp->ver_resp.major;
+ htt->target_version_minor = resp->ver_resp.minor;
+ complete(&htt->target_version_received);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ spin_lock_bh(&htt->rx_ring.lock);
+ __skb_queue_tail(&htt->rx_compl_q, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
+ case HTT_T2H_MSG_TYPE_PEER_MAP: {
+ struct htt_peer_map_event ev = {
+ .vdev_id = resp->peer_map.vdev_id,
+ .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+ };
+ memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+ ath10k_peer_map_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+ struct htt_peer_unmap_event ev = {
+ .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+ };
+ ath10k_peer_unmap_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+ struct htt_tx_done tx_done = {};
+ int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+
+ tx_done.msdu_id =
+ __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+ switch (status) {
+ case HTT_MGMT_TX_STATUS_OK:
+ break;
+ case HTT_MGMT_TX_STATUS_RETRY:
+ tx_done.no_ack = true;
+ break;
+ case HTT_MGMT_TX_STATUS_DROP:
+ tx_done.discard = true;
+ break;
+ }
+
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ spin_unlock_bh(&htt->tx_lock);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ spin_lock_bh(&htt->tx_lock);
+ __skb_queue_tail(&htt->tx_compl_q, skb);
+ spin_unlock_bh(&htt->tx_lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
+ case HTT_T2H_MSG_TYPE_SEC_IND: {
+ struct ath10k *ar = htt->ar;
+ struct htt_security_indication *ev = &resp->security_indication;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "sec ind peer_id %d unicast %d type %d\n",
+ __le16_to_cpu(ev->peer_id),
+ !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+ MS(ev->flags, HTT_SECURITY_TYPE));
+ complete(&ar->install_key_done);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TEST:
+ /* FIX THIS */
+ break;
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(ar, skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ /* Firmware can return tx frames if it's unable to fully
+ * process them and suspects host may be able to fix it. ath10k
+ * sends all tx frames as already inspected so this shouldn't
+ * happen unless fw has a bug.
+ */
+ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
+ break;
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ ath10k_htt_rx_addba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
+ ath10k_htt_rx_delba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG: {
+ struct ath10k_pktlog_hdr *hdr =
+ (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
+
+ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+ sizeof(*hdr) +
+ __le16_to_cpu(hdr->size));
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
+ /* Ignore this event because mac80211 takes care of Rx
+ * aggregation reordering.
+ */
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
+ spin_lock_bh(&htt->rx_ring.lock);
+ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
+ }
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ /* FIXME: This WMI-TLV event is overlapping with 10.2
+ * CHAN_CHANGE - both being 0xF. Neither is being used in
+ * practice so no immediate action is necessary. Nevertheless
+ * HTT may need an abstraction layer like WMI has one day.
+ */
+ break;
+ default:
+ ath10k_warn(ar, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ break;
+ };
+
+ /* Free the indication buffer */
+ dev_kfree_skb_any(skb);
+}
+
+static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ struct ath10k *ar = htt->ar;
+ struct htt_resp *resp;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&htt->tx_lock);
+ while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+ ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->tx_lock);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+ resp = (struct htt_resp *)skb->data;
+ ath10k_htt_rx_handler(htt, &resp->rx_ind);
+ dev_kfree_skb_any(skb);
+ }
+
+ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ ath10k_htt_rx_in_ord_ind(ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 000000000..cbd2bc9e6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ htt->num_pending_tx--;
+ if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+ ieee80211_wake_queues(htt->ar->hw);
+}
+
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ spin_lock_bh(&htt->tx_lock);
+ __ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+}
+
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+ int ret = 0;
+
+ spin_lock_bh(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ieee80211_stop_queues(htt->ar->hw);
+
+exit:
+ spin_unlock_bh(&htt->tx_lock);
+ return ret;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
+
+ return ret;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+ struct ath10k *ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+
+ idr_remove(&htt->pending_tx, msdu_id);
+}
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+ htt->max_num_pending_tx);
+
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
+
+ htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
+ sizeof(struct ath10k_htt_txbuf), 4, 0);
+ if (!htt->tx_pool) {
+ idr_destroy(&htt->pending_tx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
+{
+ struct ath10k *ar = ctx;
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {0};
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
+
+ tx_done.discard = 1;
+ tx_done.msdu_id = msdu_id;
+
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ spin_unlock_bh(&htt->tx_lock);
+
+ return 0;
+}
+
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
+{
+ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+ idr_destroy(&htt->pending_tx);
+ dma_pool_destroy(htt->tx_pool);
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0;
+ int ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->ver_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup.hdr.num_rings = 1;
+
+ /* FIXME: do we need all of this? */
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr =
+ __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+
+#undef desc_offset
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_aggr_conf *aggr_conf;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len;
+ int ret;
+
+ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+ return -EINVAL;
+
+ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr);
+ len += sizeof(cmd->aggr_conf);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+ aggr_conf = &cmd->aggr_conf;
+ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ aggr_conf->max_num_amsdu_subframes,
+ aggr_conf->max_num_ampdu_subframes);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct sk_buff *txdesc = NULL;
+ struct htt_cmd *cmd;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = skb_cb->vdev_id;
+ int len = 0;
+ int msdu_id = -1;
+ int res;
+
+ res = ath10k_htt_tx_inc_pending(htt);
+ if (res)
+ goto err;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->mgmt_tx);
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ spin_unlock_bh(&htt->tx_lock);
+
+ txdesc = ath10k_htc_alloc_skb(ar, len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res)
+ goto err_free_txdesc;
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
+ cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+ cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
+ cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
+ cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->mgmt_tx.hdr, msdu->data,
+ min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+ skb_cb->htt.txbuf = NULL;
+
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
+ return res;
+}
+
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct htt_data_tx_desc_frag *frags;
+ u8 vdev_id = skb_cb->vdev_id;
+ u8 tid = skb_cb->htt.tid;
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ dma_addr_t paddr;
+ u32 frags_paddr;
+ bool use_frags;
+
+ res = ath10k_htt_tx_inc_pending(htt);
+ if (res)
+ goto err;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0) {
+ spin_unlock_bh(&htt->tx_lock);
+ goto err_tx_dec;
+ }
+ msdu_id = res;
+ spin_unlock_bh(&htt->tx_lock);
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+ * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+ * fragment list host driver specifies directly frame pointer. */
+ use_frags = htt->target_version_major < 3 ||
+ !ieee80211_is_mgmt(hdr->frame_control);
+
+ skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
+ &paddr);
+ if (!skb_cb->htt.txbuf) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+ skb_cb->htt.txbuf_paddr = paddr;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control))
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res)
+ goto err_free_txbuf;
+
+ if (likely(use_frags)) {
+ frags = skb_cb->htt.txbuf->frags;
+
+ frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
+ frags[0].len = __cpu_to_le32(msdu->len);
+ frags[1].paddr = 0;
+ frags[1].len = 0;
+
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ frags_paddr = skb_cb->htt.txbuf_paddr;
+ } else {
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+ frags_paddr = skb_cb->paddr;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance. */
+
+ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
+ skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx) +
+ prefetch_len);
+ skb_cb->htt.txbuf->htc_hdr.flags = 0;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
+ skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
+ skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, frags_paddr,
+ (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
+ sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
+ sizeof(skb_cb->htt.txbuf->frags);
+ sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txbuf:
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+ ath10k_htt_tx_dec_pending(htt);
+err:
+ return res;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 000000000..839a8791f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include "hw.h"
+
+const struct ath10k_hw_regs qca988x_regs = {
+ .rtc_state_cold_reset_mask = 0x00000400,
+ .rtc_soc_base_address = 0x00004000,
+ .rtc_wmac_base_address = 0x00005000,
+ .soc_core_base_address = 0x00009000,
+ .ce_wrapper_base_address = 0x00057000,
+ .ce0_base_address = 0x00057400,
+ .ce1_base_address = 0x00057800,
+ .ce2_base_address = 0x00057c00,
+ .ce3_base_address = 0x00058000,
+ .ce4_base_address = 0x00058400,
+ .ce5_base_address = 0x00058800,
+ .ce6_base_address = 0x00058c00,
+ .ce7_base_address = 0x00059000,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00040000,
+ .soc_chip_id_address = 0x00ec,
+ .scratch_3_address = 0x0030,
+};
+
+const struct ath10k_hw_regs qca6174_regs = {
+ .rtc_state_cold_reset_mask = 0x00002000,
+ .rtc_soc_base_address = 0x00000800,
+ .rtc_wmac_base_address = 0x00001000,
+ .soc_core_base_address = 0x0003a000,
+ .ce_wrapper_base_address = 0x00034000,
+ .ce0_base_address = 0x00034400,
+ .ce1_base_address = 0x00034800,
+ .ce2_base_address = 0x00034c00,
+ .ce3_base_address = 0x00035000,
+ .ce4_base_address = 0x00035400,
+ .ce5_base_address = 0x00035800,
+ .ce6_base_address = 0x00035c00,
+ .ce7_base_address = 0x00036000,
+ .soc_reset_control_si0_rst_mask = 0x00000000,
+ .soc_reset_control_ce_rst_mask = 0x00000001,
+ .soc_chip_id_address = 0x000f0,
+ .scratch_3_address = 0x0028,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 000000000..05f7a3393
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+#define ATH10K_FW_DIR "ath10k"
+
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
+#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_FILE "/*(DEBLOBBED)*/"
+#define QCA988X_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA6174 target BMI version signatures */
+#define QCA6174_HW_1_0_VERSION 0x05000000
+#define QCA6174_HW_1_1_VERSION 0x05000001
+#define QCA6174_HW_1_3_VERSION 0x05000003
+#define QCA6174_HW_2_1_VERSION 0x05010000
+#define QCA6174_HW_3_0_VERSION 0x05020000
+#define QCA6174_HW_3_2_VERSION 0x05030000
+
+enum qca6174_pci_rev {
+ QCA6174_PCI_REV_1_1 = 0x11,
+ QCA6174_PCI_REV_1_3 = 0x13,
+ QCA6174_PCI_REV_2_0 = 0x20,
+ QCA6174_PCI_REV_3_0 = 0x30,
+};
+
+enum qca6174_chip_id_rev {
+ QCA6174_HW_1_0_CHIP_ID_REV = 0,
+ QCA6174_HW_1_1_CHIP_ID_REV = 1,
+ QCA6174_HW_1_3_CHIP_ID_REV = 2,
+ QCA6174_HW_2_1_CHIP_ID_REV = 4,
+ QCA6174_HW_2_2_CHIP_ID_REV = 5,
+ QCA6174_HW_3_0_CHIP_ID_REV = 8,
+ QCA6174_HW_3_1_CHIP_ID_REV = 9,
+ QCA6174_HW_3_2_CHIP_ID_REV = 10,
+};
+
+#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
+#define QCA6174_HW_2_1_FW_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_2_1_OTP_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_2_1_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
+
+#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
+#define QCA6174_HW_3_0_FW_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_3_0_OTP_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_3_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+
+#define ATH10K_FW_API2_FILE "/*(DEBLOBBED)*/"
+#define ATH10K_FW_API3_FILE "/*(DEBLOBBED)*/"
+
+/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
+#define ATH10K_FW_API4_FILE "/*(DEBLOBBED)*/"
+
+#define ATH10K_FW_UTF_FILE "/*(DEBLOBBED)*/"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+
+#define REG_DUMP_COUNT_QCA988X 60
+
+#define QCA988X_CAL_DATA_LEN 2116
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+
+ /* WMI "operations" interface version, 32 bit value. Supported from
+ * FW API 4 and above.
+ */
+ ATH10K_FW_IE_WMI_OP_VERSION = 5,
+};
+
+enum ath10k_fw_wmi_op_version {
+ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
+ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
+ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
+ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
+ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+
+ /* keep last */
+ ATH10K_FW_WMI_OP_VERSION_MAX,
+};
+
+enum ath10k_hw_rev {
+ ATH10K_HW_QCA988X,
+ ATH10K_HW_QCA6174,
+};
+
+struct ath10k_hw_regs {
+ u32 rtc_state_cold_reset_mask;
+ u32 rtc_soc_base_address;
+ u32 rtc_wmac_base_address;
+ u32 soc_core_base_address;
+ u32 ce_wrapper_base_address;
+ u32 ce0_base_address;
+ u32 ce1_base_address;
+ u32 ce2_base_address;
+ u32 ce3_base_address;
+ u32 ce4_base_address;
+ u32 ce5_base_address;
+ u32 ce6_base_address;
+ u32 ce7_base_address;
+ u32 soc_reset_control_si0_rst_mask;
+ u32 soc_reset_control_ce_rst_mask;
+ u32 soc_chip_id_address;
+ u32 scratch_3_address;
+};
+
+extern const struct ath10k_hw_regs qca988x_regs;
+extern const struct ath10k_hw_regs qca6174_regs;
+
+#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+
+/* Known pecularities:
+ * - current FW doesn't support raw rx mode (last tested v599)
+ * - current FW dumps upon raw tx mode (last tested v599)
+ * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ * - raw have FCS, nwifi doesn't
+ * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ * param, llc/snap) are aligned to 4byte boundaries each */
+enum ath10k_hw_txrx_mode {
+ ATH10K_HW_TXRX_RAW = 0,
+ ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
+};
+
+enum ath10k_mcast2ucast_mode {
+ ATH10K_MCAST2UCAST_DISABLED = 0,
+ ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type;
+ __le16 size;
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
+/* Target specific defines for MAIN firmware */
+#define TARGET_NUM_VDEVS 8
+#define TARGET_NUM_PEER_AST 2
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 0
+#define TARGET_MAC_AGGR_DELIM 0
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_STATIONS 16
+#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
+ (TARGET_NUM_VDEVS))
+#define TARGET_NUM_OFFLOAD_PEERS 0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 0
+#define TARGET_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES 0
+
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS_MAX 256
+#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
+
+/* 10.2 parameters */
+#define TARGET_10_2_DMA_BURST_SIZE 1
+
+/* Target specific defines for WMI-TLV firmware */
+#define TARGET_TLV_NUM_VDEVS 3
+#define TARGET_TLV_NUM_STATIONS 32
+#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
+ (TARGET_TLV_NUM_VDEVS) + \
+ 2)
+#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
+#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+
+/* Number of Copy Engines supported */
+#define CE_COUNT 8
+
+/*
+ * Total number of PCIe MSI interrupts requested for all interrupt sources.
+ * PCIe standard forces this to be a power of 2.
+ * Some Host OS's limit MSI requests that can be granted to 8
+ * so for now we abide by this limit and avoid requesting more
+ * than that.
+ */
+#define MSI_NUM_REQUEST_LOG2 3
+#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW 0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL 1
+#define MSI_ASSIGN_CE_MAX 7
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON 3
+
+#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
+#define RTC_STATE_V_LSB 0
+#define RTC_STATE_V_MASK 0x00000007
+#define RTC_STATE_ADDRESS 0x0000
+#define PCIE_SOC_WAKE_V_MASK 0x00000001
+#define PCIE_SOC_WAKE_ADDRESS 0x0004
+#define PCIE_SOC_WAKE_RESET 0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS 0x0008
+
+#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
+#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
+#define MAC_COEX_BASE_ADDRESS 0x00006000
+#define BT_COEX_BASE_ADDRESS 0x00007000
+#define SOC_PCIE_BASE_ADDRESS 0x00008000
+#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
+#define WLAN_UART_BASE_ADDRESS 0x0000c000
+#define WLAN_SI_BASE_ADDRESS 0x00010000
+#define WLAN_GPIO_BASE_ADDRESS 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define WLAN_MAC_BASE_ADDRESS 0x00020000
+#define EFUSE_BASE_ADDRESS 0x00030000
+#define FPGA_REG_BASE_ADDRESS 0x00039000
+#define WLAN_UART2_BASE_ADDRESS 0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
+#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
+#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
+#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
+#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
+#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
+#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
+#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
+#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
+#define DBI_BASE_ADDRESS 0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+
+#define SOC_RESET_CONTROL_ADDRESS 0x00000000
+#define SOC_RESET_CONTROL_OFFSET 0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
+#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
+#define SOC_CPU_CLOCK_OFFSET 0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB 0
+#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
+#define SOC_LPO_CAL_OFFSET 0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB 20
+#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
+
+#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
+
+#define CLOCK_GPIO_OFFSET 0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
+
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA1_OFFSET 0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
+#define CORE_CTRL_ADDRESS 0x0000
+#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CAUSE_ADDRESS 0x000c
+#define PCIE_INTR_CLR_ADDRESS 0x0014
+#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
+#define CPU_INTR_ADDRESS 0x0010
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_IND_EVENT_PENDING 1
+#define FW_IND_INITIALIZED 2
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK 0x00000400
+#define PCIE_INTR_CE_MASK_ALL 0x0007f800
+
+#define DRAM_BASE_ADDRESS 0x00400000
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK MISSING
+#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET 0x18
+#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK MISSING
+#define INT_STATUS_ENABLE_CPU_LSB MISSING
+#define INT_STATUS_ENABLE_CPU_MASK MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define INT_STATUS_ENABLE_ADDRESS MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define HOST_INT_STATUS_ADDRESS MISSING
+#define CPU_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
+#define COUNT_DEC_ADDRESS MISSING
+#define HOST_INT_STATUS_CPU_MASK MISSING
+#define HOST_INT_STATUS_CPU_LSB MISSING
+#define HOST_INT_STATUS_ERROR_MASK MISSING
+#define HOST_INT_STATUS_ERROR_LSB MISSING
+#define HOST_INT_STATUS_COUNTER_MASK MISSING
+#define HOST_INT_STATUS_COUNTER_LSB MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
+#define WINDOW_DATA_ADDRESS MISSING
+#define WINDOW_READ_ADDR_ADDRESS MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 000000000..973485bd4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,5628 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+#include "testmode.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, bool def_idx)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ arg.key_flags = WMI_KEY_PAIRWISE;
+ else
+ arg.key_flags = WMI_KEY_GROUP;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ arg.key_cipher = WMI_CIPHER_WEP;
+ /* AP/IBSS mode requires self-key to be groupwise
+ * Otherwise pairwise key must be set */
+ if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
+ arg.key_flags = WMI_KEY_PAIRWISE;
+
+ if (def_idx)
+ arg.key_flags |= WMI_KEY_TX_USAGE;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* this one needs to be done in software */
+ return 1;
+ default:
+ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd == DISABLE_KEY) {
+ arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_data = NULL;
+ }
+
+ return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, bool def_idx)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+ bool def_idx;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+ if (arvif->wep_keys[i] == NULL)
+ continue;
+ /* set TX_USAGE flag for default key id */
+ if (arvif->def_wep_key_idx == i)
+ def_idx = true;
+ else
+ def_idx = false;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+ addr, def_idx);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = arvif->wep_keys[i];
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == NULL)
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, false);
+ if (ret && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return first_errno;
+}
+
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx)
+{
+ struct ath10k_peer *peer;
+ int i;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ /* We don't know which vdev this peer belongs to,
+ * since WMI doesn't give us that information.
+ *
+ * FIXME: multi-bss needs to be handled.
+ */
+ peer = ath10k_peer_find(ar, 0, addr);
+ if (!peer)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ u8 addr[ETH_ALEN];
+ int first_errno = 0;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (;;) {
+ /* since ath10k_install_key we can't hold data_lock all the
+ * time, so we try to remove the keys incrementally */
+ spin_lock_bh(&ar->data_lock);
+ i = 0;
+ list_for_each_entry(peer, &ar->peers, list) {
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == key) {
+ ether_addr_copy(addr, peer->addr);
+ peer->keys[i] = NULL;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(peer->keys))
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (i == ARRAY_SIZE(peer->keys))
+ break;
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+ if (ret && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
+ addr, ret);
+ }
+
+ return first_errno;
+}
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ switch (chandef->chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
+ phymode = MODE_11B;
+ else
+ phymode = MODE_11G;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NG_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NG_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ phymode = MODE_11A;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NA_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NA_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ phymode = MODE_11AC_VHT80;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(phymode == MODE_UNKNOWN);
+ return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ 1 microsecond */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers >= ar->max_num_peers)
+ return -ENOBUFS;
+
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ ar->num_peers++;
+
+ return 0;
+}
+
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ param = ar->wmi.pdev_param->sta_kickout_th;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ ATH10K_KICKOUT_THRESHOLD);
+ if (ret) {
+ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
+}
+
+static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ if (value != 0xFFFFFFFF)
+ value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
+ ATH10K_FRAGMT_THRESHOLD_MIN,
+ ATH10K_FRAGMT_THRESHOLD_MAX);
+
+ vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!arvif->beacon)
+ return;
+
+ if (!arvif->beacon_buf)
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+
+ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
+ arvif->beacon_state != ATH10K_BEACON_SENT))
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_mac_vif_beacon_free(arvif);
+
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+}
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ ret = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
+{
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct ieee80211_channel *channel = chandef->chan;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+
+ /* TODO setup this dynamically, what in case we
+ don't have any vifs? */
+ arg.channel.mode = chan_to_phymode(chandef);
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ar->monitor_vdev_id = vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
+ return 0;
+
+vdev_stop:
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ return ret;
+}
+
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
+{
+ int bit, ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+ WMI_VDEV_TYPE_MONITOR,
+ 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_create(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ar->monitor_started = false;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
+
+ return 0;
+}
+
+static int ath10k_monitor_recalc(struct ath10k *ar)
+{
+ bool should_start;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ should_start = ar->monitor ||
+ ar->filter_flags & FIF_PROMISC_IN_BSS ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac monitor recalc started? %d should? %d\n",
+ ar->monitor_started, should_start);
+
+ if (should_start == ar->monitor_started)
+ return 0;
+
+ if (should_start)
+ return ath10k_monitor_start(ar);
+
+ return ath10k_monitor_stop(ar);
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* CAC is not running - do nothing */
+ if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+ return 0;
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
+
+ return 0;
+}
+
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_stop_cac(ar);
+
+ if (!ar->radar_enabled)
+ return;
+
+ if (ar->num_started_vdevs > 0)
+ return;
+
+ ret = ath10k_start_cac(ar);
+ if (ret) {
+ /*
+ * Not possible to start CAC on current channel so starting
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
+ * by indicating that radar was detected.
+ */
+ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
+ ieee80211_radar_detected(ar->hw);
+ }
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ if (restart)
+ ret = ath10k_wmi_vdev_restart(ar, &arg);
+ else
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to synchronize setup for vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+ return ath10k_vdev_start_restart(arvif, false);
+}
+
+static int ath10k_vdev_restart(struct ath10k_vif *arvif)
+{
+ return ath10k_vdev_start_restart(arvif, true);
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return 0;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next;
+ const u8 *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
+ kfree_skb(bcn);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as configured above)
+ * so remove it from the base beacon template to avoid duplicate P2P
+ * IEs in beacon frames.
+ */
+ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+
+ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
+ 0, NULL, 0);
+ kfree_skb(bcn);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct sk_buff *prb;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ prb = ieee80211_proberesp_get(hw, vif);
+ if (!prb) {
+ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
+ kfree_skb(prb);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ath10k_vdev_stop(arvif);
+
+ arvif->is_started = false;
+ arvif->is_up = false;
+
+ spin_lock_bh(&arvif->ar->data_lock);
+ ath10k_mac_vif_beacon_free(arvif);
+ spin_unlock_bh(&arvif->ar->data_lock);
+
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret)
+ return;
+
+ arvif->aid = 0;
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_vdev_stop(arvif);
+ return;
+ }
+
+ arvif->is_started = true;
+ arvif->is_up = true;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ const u8 self_peer[ETH_ALEN])
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->ibss_joined) {
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
+ if (ret)
+ ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
+ self_peer, arvif->vdev_id, ret);
+
+ if (is_zero_ether_addr(arvif->bssid))
+ return;
+
+ eth_zero_addr(arvif->bssid);
+
+ return;
+ }
+
+ ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
+ if (ret) {
+ ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
+ self_peer, arvif->vdev_id, ret);
+ return;
+ }
+
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
+ ATH10K_DEFAULT_ATIM);
+ if (ret)
+ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
+ else
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+
+ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
+ else
+ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+
+ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int num = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->ps)
+ num++;
+
+ return num;
+}
+
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int ps_timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+ ar->fw_features)) {
+ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+ arvif->vdev_id);
+ enable_ps = false;
+ }
+
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ ps_timeout = conf->dynamic_ps_timeout;
+ if (ps_timeout == 0) {
+ /* Firmware doesn't like 0 */
+ ps_timeout = ieee80211_tu_to_usec(
+ vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ ps_timeout);
+ if (ret) {
+ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
+ return 0;
+
+ /* Some firmware revisions have a bug and ignore the `enabled` field.
+ * Instead use the interval to disable the keepalive.
+ */
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
+ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
+
+ ret = ath10k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+ struct ieee80211_vif *vif)
+{
+ /* Some firmware revisions have unstable STA powersave when listen
+ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+ * generate NullFunc frames properly even if buffered frames have been
+ * indicated in Beacon TIM. Firmware would seldom wake up to pull
+ * buffered frames. Often pinging the device from AP would simply fail.
+ *
+ * As a workaround set it to 1.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 1;
+
+ return ar->hw->conf.listen_interval;
+}
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ether_addr_copy(arg->addr, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_aid = sta->aid;
+ arg->peer_flags |= WMI_PEER_AUTH;
+ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
+ arg->peer_num_spatial_streams = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
+ info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+ arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ }
+
+ if (wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+ arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ u32 ratemask;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rateset->rates[rateset->num_rates] = rates->hw_value;
+ rateset->num_rates++;
+ }
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int i, n;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ arg->peer_flags |= WMI_PEER_HT;
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->peer_flags |= WMI_PEER_LDPC;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->peer_flags |= WMI_PEER_40MHZ;
+ arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+ arg->peer_flags |= WMI_PEER_STBC;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->peer_flags |= WMI_PEER_STBC;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+ for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
+ if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+ arg->peer_ht_rates.rates[n++] = i;
+
+ /*
+ * This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = sta->rx_nss;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_num_spatial_streams);
+}
+
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ u32 uapsd = 0;
+ u32 max_sp = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (sta->wme && sta->uapsd_queues) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* TODO setup this based on STA listen interval and
+ beacon interval. Currently we don't know
+ sta->listen_interval - mac80211 patch required.
+ Currently use 10 seconds */
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+ 10);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 ampdu_factor;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ arg->peer_flags |= WMI_PEER_VHT;
+
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ arg->peer_flags |= WMI_PEER_VHT_2G;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller. */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->peer_flags |= WMI_PEER_80MHZ;
+
+ arg->peer_vht_rates.rx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->peer_vht_rates.rx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->peer_vht_rates.tx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->peer_vht_rates.tx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+
+ if (sta->wme && sta->uapsd_queues) {
+ arg->peer_flags |= WMI_PEER_APSD;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (vif->bss_conf.qos)
+ arg->peer_flags |= WMI_PEER_QOS;
+ break;
+ case WMI_VDEV_TYPE_IBSS:
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
+}
+
+static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+{
+ /* First 4 rates in ath10k_rates are CCK (11b) rates. */
+ return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ switch (ar->hw->conf.chandef.chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath10k_mac_sta_has_11g_rates(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+
+ break;
+ case IEEE80211_BAND_5GHZ:
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AC_VHT80;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, arg);
+ ath10k_peer_assoc_h_rates(ar, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
+
+ return 0;
+}
+
+static const u32 ath10k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath10k_smps_map))
+ return -EINVAL;
+
+ return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+ WMI_PEER_SMPS_STATE,
+ ath10k_smps_map[smps]);
+}
+
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_vht_cap vht_cap)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+ u32 param;
+ u32 value;
+
+ if (!(ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ return 0;
+
+ param = ar->wmi.vdev_param->txbf;
+ value = 0;
+
+ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+ return 0;
+
+ /* The following logic is correct. If a remote STA advertises support
+ * for being a beamformer then we should enable us being a beamformee.
+ */
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+ value, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ /* ap_sta must be accessed only within rcu section which must be left
+ * before calling ath10k_setup_peer_smps() which might sleep. */
+ ht_cap = ap_sta->ht_cap;
+ vht_cap = ap_sta->vht_cap;
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+ arvif->vdev_id, bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = bss_conf->aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ /* Workaround: Some firmware revisions (tested with qca6174
+ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+ * poked with peer param command.
+ */
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+ WMI_PEER_DUMMY_VAR, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ return;
+ }
+}
+
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta_vht_cap vht_cap = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->def_wep_key_idx = -1;
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = false;
+}
+
+static int ath10k_station_assoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ peer_arg.peer_reassoc = reassoc;
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (!reassoc) {
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ /* Plumb cached keys only for static WEP */
+ if (arvif->def_wep_key_idx != -1) {
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath10k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_supported_band **bands;
+ enum ieee80211_band band;
+ struct ieee80211_channel *channel;
+ struct wmi_scan_chan_list_arg arg = {0};
+ struct wmi_channel_arg *ch;
+ bool passive;
+ int len;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ arg.n_channels++;
+ }
+ }
+
+ len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+ arg.channels = kzalloc(len, GFP_KERNEL);
+ if (!arg.channels)
+ return -ENOMEM;
+
+ ch = arg.channels;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ ch->allow_ht = true;
+
+ /* FIXME: when should we really allow VHT? */
+ ch->allow_vht = true;
+
+ ch->allow_ibss =
+ !(channel->flags & IEEE80211_CHAN_NO_IR);
+
+ ch->ht40plus =
+ !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+ ch->chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ passive = channel->flags & IEEE80211_CHAN_NO_IR;
+ ch->passive = passive;
+
+ ch->freq = channel->center_freq;
+ ch->band_center_freq1 = channel->center_freq;
+ ch->min_power = 0;
+ ch->max_power = channel->max_power * 2;
+ ch->max_reg_power = channel->max_reg_power * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain * 2;
+ ch->reg_class_id = 0; /* FIXME */
+
+ /* FIXME: why use only legacy modes, why not any
+ * HT/VHT modes? Would that even make any
+ * difference? */
+ if (channel->band == IEEE80211_BAND_2GHZ)
+ ch->mode = MODE_11G;
+ else
+ ch->mode = MODE_11A;
+
+ if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
+ ch->freq, ch->max_power, ch->max_reg_power,
+ ch->max_antenna_gain, ch->mode);
+
+ ch++;
+ }
+ }
+
+ ret = ath10k_wmi_scan_chan_list(ar, &arg);
+ kfree(arg.channels);
+
+ return ret;
+}
+
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
+static void ath10k_regd_update(struct ath10k *ar)
+{
+ struct reg_dmn_pair_mapping *regpair;
+ int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_update_channel_list(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
+
+ regpair = ar->ath_common.regulatory.regpair;
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
+ /* Target allows setting up per-band regdomain but ath_common provides
+ * a combined one only */
+ ret = ath10k_wmi_pdev_set_regdomain(ar,
+ regpair->reg_domain,
+ regpair->reg_domain, /* 2ghz */
+ regpair->reg_domain, /* 5ghz */
+ regpair->reg_2ghz_ctl,
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
+ if (ret)
+ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath10k *ar = hw->priv;
+ bool result;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ request->dfs_region);
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+ request->dfs_region);
+ if (!result)
+ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
+ request->dfs_region);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON)
+ ath10k_regd_update(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
+{
+ if (vif)
+ return ath10k_vif_to_arvif(vif)->vdev_id;
+
+ if (ar->monitor_started)
+ return ar->monitor_vdev_id;
+
+ ath10k_warn(ar, "failed to resolve vdev id\n");
+ return 0;
+}
+
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
+ */
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
+ * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
+ * used only for CQM purposes (e.g. hostapd station keepalive ping) so
+ * it is safe to downgrade to NullFunc.
+ */
+ hdr = (void *)skb->data;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ }
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ /* This is case only for P2P_GO */
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+ arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ memcpy(skb_put(skb, arvif->u.ap.noa_len),
+ arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+{
+ /* FIXME: Not really sure since when the behaviour changed. At some
+ * point new firmware stopped requiring creation of peer entries for
+ * offchannel tx (and actually creating them causes issues with wmi-htc
+ * tx credit replenishment and reliability). Assuming it's at least 3.4
+ * because that's when the `freq` was introduced to TX_FRM HTT command.
+ */
+ return !(ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4);
+}
+
+static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int ret = 0;
+
+ if (ar->htt.target_version_major >= 3) {
+ /* Since HTT 3.0 there is no separate mgmt tx command */
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ goto exit;
+ }
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features)) {
+ if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+ ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "reached WMI management transmit queue limit\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ } else {
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+ }
+ } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features) &&
+ ieee80211_is_nullfunc(hdr->frame_control)) {
+ /* FW does not report tx status properly for NullFunc frames
+ * unless they are sent through mgmt tx path. mac80211 sends
+ * those frames when it detects link/beacon loss and depends
+ * on the tx status to be correct. */
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+ } else {
+ ret = ath10k_htt_tx(&ar->htt, skb);
+ }
+
+exit:
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+ struct ath10k_peer *peer;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ const u8 *peer_addr;
+ int vdev_id;
+ int ret;
+
+ /* FW requirement: We must create a peer before FW will send out
+ * an offchannel frame. Otherwise the frame will be stuck and
+ * never transmitted. We delete the peer upon tx completion.
+ * It is unlikely that a peer for offchannel tx will already be
+ * present. However it may be in some rare cases so account for that.
+ * Otherwise we might remove a legitimate peer and break stuff. */
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+ skb);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ peer_addr = ieee80211_get_DA(hdr);
+ vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer)
+ /* FIXME: should this use ath10k_warn()? */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+ peer_addr, vdev_id);
+
+ if (!peer) {
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = skb;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_tx_htt(ar, skb);
+
+ ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
+ 3 * HZ);
+ if (ret == 0)
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+ skb);
+
+ if (!peer) {
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ }
+}
+
+/************/
+/* Scanning */
+/************/
+
+void __ath10k_scan_finish(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ break;
+ case ATH10K_SCAN_RUNNING:
+ if (ar->scan.is_roc)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ /* fall through */
+ case ATH10K_SCAN_ABORTING:
+ if (!ar->scan.is_roc)
+ ieee80211_scan_completed(ar->hw,
+ (ar->scan.state ==
+ ATH10K_SCAN_ABORTING));
+ /* fall through */
+ case ATH10K_SCAN_STARTING:
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ath10k_offchan_tx_purge(ar);
+ cancel_delayed_work(&ar->scan.timeout);
+ complete_all(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath10k_scan_finish(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_scan_stop(struct ath10k *ar)
+{
+ struct wmi_stop_scan_arg arg = {
+ .req_id = 1, /* FIXME */
+ .req_type = WMI_SCAN_STOP_ONE,
+ .u.scan_id = ATH10K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_stop_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH10K_SCAN_IDLE)
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_scan_abort(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ ar->scan.state = ATH10K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_start_scan(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+ if (ret == 0) {
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH10K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Add a 200ms margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg->max_scan_time+200));
+ return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+ ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+ ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+
+ /* it makes no sense to process injected frames like that */
+ if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ spin_lock_bh(&ar->data_lock);
+ ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
+ ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ath10k_mac_need_offchan_tx_work(ar)) {
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
+ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return;
+ }
+ }
+
+ ath10k_tx_htt(ar, skb);
+}
+
+/* Must not be called with conf_mutex held as workers can use that also. */
+void ath10k_drain_tx(struct ath10k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
+ cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
+void ath10k_halt(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->filter_flags = 0;
+ ar->monitor = false;
+
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ ar->monitor_started = false;
+
+ ath10k_scan_finish(ar);
+ ath10k_peer_cleanup_all(ar);
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->cfg_tx_chainmask) {
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+ } else {
+ *tx_ant = ar->supp_tx_chainmask;
+ *rx_ant = ar->supp_rx_chainmask;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+ /* It is not clear that allowing gaps in chainmask
+ * is helpful. Probably it will not do what user
+ * is hoping for, so warn in that case.
+ */
+ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+ return;
+
+ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ dbg, cm);
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_check_chain_mask(ar, tx_ant, "tx");
+ ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ return 0;
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+ tx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+ rx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret = 0;
+
+ /*
+ * This makes sense only when restarting hw. It is harmless to call
+ * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * commands will be submitted while restarting.
+ */
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ar->state = ATH10K_STATE_ON;
+ break;
+ case ATH10K_STATE_RESTARTING:
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_RESTARTED;
+ break;
+ case ATH10K_STATE_ON:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_WEDGED:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ case ATH10K_STATE_UTF:
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "Could not init hif: %d\n", ret);
+ goto err_off;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath10k_err(ar, "Could not init core: %d\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ if (ar->cfg_tx_chainmask)
+ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
+ ar->cfg_rx_chainmask);
+
+ /*
+ * By default FW set ARP frames ac to voice (6). In that case ARP
+ * exchange is not working properly for UAPSD enabled AP. ARP requests
+ * which arrives with access category 0 are processed by network stack
+ * and send back with access category 0, but FW changes access category
+ * to 6. Set ARP frames access category to best effort (0) solves
+ * this problem.
+ */
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->arp_ac_override, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->num_started_vdevs = 0;
+ ath10k_regd_update(ar);
+
+ ath10k_spectral_start(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_core_stop:
+ ath10k_core_stop(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_off:
+ ar->state = ATH10K_STATE_OFF;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_OFF) {
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_OFF;
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->restart_work);
+}
+
+static int ath10k_config_ps(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const char *chandef_get_width(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return "20 (noht)";
+ case NL80211_CHAN_WIDTH_20:
+ return "20";
+ case NL80211_CHAN_WIDTH_40:
+ return "40";
+ case NL80211_CHAN_WIDTH_80:
+ return "80";
+ case NL80211_CHAN_WIDTH_80P80:
+ return "80+80";
+ case NL80211_CHAN_WIDTH_160:
+ return "160";
+ case NL80211_CHAN_WIDTH_5:
+ return "5";
+ case NL80211_CHAN_WIDTH_10:
+ return "10";
+ }
+ return "?";
+}
+
+static void ath10k_config_chan(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
+ ar->chandef.chan->center_freq,
+ ar->chandef.center_freq1,
+ ar->chandef.center_freq2,
+ chandef_get_width(ar->chandef.width));
+
+ /* First stop monitor interface. Some FW versions crash if there's a
+ * lone monitor interface. */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (!arvif->is_up)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* all vdevs are downed now - attempt to restart and re-up them */
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_vdev_restart(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ if (!arvif->is_up)
+ continue;
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ ath10k_monitor_recalc(ar);
+}
+
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ WARN_ON(arvif->txpower < 0);
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (WARN_ON(txpower == -1))
+ return -EINVAL;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac config channel %dMHz flags 0x%x radar %d\n",
+ conf->chandef.chan->center_freq,
+ conf->chandef.chan->flags,
+ conf->radar_enabled);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->rx_channel = conf->chandef.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->radar_enabled = conf->radar_enabled;
+ ath10k_recalc_radar_detection(ar);
+
+ if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
+ ar->chandef = conf->chandef;
+ ath10k_config_chan(ar);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ ath10k_config_ps(ar);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+ if ((chain_mask & 0x15) == 0x15)
+ return 4;
+ else if ((chain_mask & 0x7) == 0x7)
+ return 3;
+ else if ((chain_mask & 0x3) == 0x3)
+ return 2;
+ return 1;
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ enum wmi_sta_powersave_param param;
+ int ret = 0;
+ u32 value;
+ int bit;
+ u32 vdev_param;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ bit = __ffs64(ar->free_vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+ bit, ar->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Some firmware revisions don't wait for beacon tx completion before
+ * sending another SWBA event. This could lead to hardware using old
+ * (freed) beacon data in some cases, e.g. tx credit starvation
+ * combined with missed TBTT. This is very very rare.
+ *
+ * On non-IOMMU-enabled hosts this could be a possible security issue
+ * because hw could beacon some random data on the air. On
+ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+ * device would crash.
+ *
+ * Since there are no beacon tx completions (implicit nor explicit)
+ * propagated to host the only workaround for this is to allocate a
+ * DMA-coherent buffer for a lifetime of a vif and use it for all
+ * beacon tx commands. Worst case for this approach is some beacons may
+ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ if (!arvif->beacon_buf) {
+ ret = -ENOMEM;
+ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ arvif->beacon_buf ? "single-buf" : "per-skb");
+
+ ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+ arvif->vdev_subtype, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ list_add(&arvif->list, &ar->arvifs);
+
+ /* It makes no sense to have firmware do keepalives. mac80211 already
+ * takes care of this with idle connection polling.
+ */
+ ret = ath10k_mac_vif_disable_keepalive(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ arvif->def_wep_key_idx = -1;
+
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ if (ar->cfg_tx_chainmask) {
+ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+ ret);
+ goto err_vdev_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ ret = ath10k_mac_set_kickout(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+ if (ret) {
+ ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ goto err_peer_delete;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ list_del(&arvif->list);
+
+err:
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_spectral_vif_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ list_del(&arvif->list);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ kfree(arvif->u.ap.noa_data);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
+ arvif->vdev_id);
+
+ ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ /* Some firmware revisions don't notify host about self-peer removal
+ * until after associated vdev is deleted.
+ */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ath10k_peer_cleanup(ar, arvif->vdev_id);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret = 0;
+ u32 vdev_param, pdev_param, slottime, preamble;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_IBSS)
+ ath10k_control_ibss(arvif, info, vif->addr);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->beacon_interval);
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
+ WMI_BEACON_STAGGERED_MODE);
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update beacon template: %d\n",
+ ret);
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->dtim_period);
+ if (ret)
+ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath10k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ arvif->use_cts_prot = info->use_cts_prot;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ arvif->vdev_id, info->use_cts_prot);
+
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ slottime);
+ if (ret)
+ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ preamble);
+ if (ret)
+ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc) {
+ /* Workaround: Make sure monitor vdev is not running
+ * when associating to prevent some firmware revisions
+ * (e.g. 10.1 and 10.2) from crashing.
+ */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+ ath10k_bss_assoc(hw, vif, info);
+ ath10k_monitor_recalc(ar);
+ } else {
+ ath10k_bss_disassoc(hw, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ arvif->ps = vif->bss_conf.ps;
+
+ ret = ath10k_config_ps(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct wmi_start_scan_arg arg;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+
+ if (!req->no_cck)
+ arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+
+ if (req->ie_len) {
+ arg.ie_len = req->ie_len;
+ memcpy(arg.ie, req->ie, arg.ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.n_ssids = req->n_ssids;
+ for (i = 0; i < arg.n_ssids; i++) {
+ arg.ssids[i].len = req->ssids[i].ssid_len;
+ arg.ssids[i].ssid = req->ssids[i].ssid;
+ }
+ } else {
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.n_channels = req->n_channels;
+ for (i = 0; i < arg.n_channels; i++)
+ arg.channels[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *key)
+{
+ u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+ int ret;
+
+ /* 10.1 firmware branch requires default key index to be set to group
+ * key index after installing it. Otherwise FW/HW Txes corrupted
+ * frames with multi-vif APs. This is not required for main firmware
+ * branch (e.g. 636).
+ *
+ * FIXME: This has been tested only in AP. It remains unknown if this
+ * is required for multi-vif STA interfaces on 10.1 */
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ return;
+
+ if (cmd != SET_KEY)
+ return;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ key->keyidx);
+ if (ret)
+ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
+ const u8 *peer_addr;
+ bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104;
+ bool def_idx = false;
+ int ret = 0;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now. */
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore */
+ goto exit;
+ }
+ }
+
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+
+ if (cmd == DISABLE_KEY)
+ ath10k_clear_vdev_key(arvif, key);
+ }
+
+ /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
+ * static WEP, do not set this flag for the keys whose key id
+ * is greater than default key id.
+ */
+ if (arvif->def_wep_key_idx == -1)
+ def_idx = true;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ goto exit;
+ }
+
+ ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (peer == NULL)
+ /* impossible unless FW goes crazy */
+ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int keyidx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int ret;
+
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath10k *ar;
+ struct ath10k_vif *arvif;
+ struct ath10k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u32 changed, bw, nss, smps;
+ int err;
+
+ arsta = container_of(wk, struct ath10k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+ sta->addr, bw);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_CHAN_WIDTH, bw);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_SMPS_STATE, smps);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+ changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+ if (err)
+ ath10k_warn(ar, "failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ int ret = 0;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ }
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ /*
+ * New station addition.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+ arvif->vdev_id, sta->addr,
+ ar->num_stations + 1, ar->max_num_stations,
+ ar->num_peers + 1, ar->max_num_peers);
+
+ ret = ath10k_mac_inc_num_stations(arvif);
+ if (ret) {
+ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(arvif->is_started);
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr));
+ ath10k_mac_dec_num_stations(arvif);
+ goto exit;
+ }
+
+ arvif->is_started = true;
+ }
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ /*
+ * Existing station deletion.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM (sta gone)\n",
+ arvif->vdev_id, sta->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ WARN_ON(!arvif->is_started);
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+ }
+
+ ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+
+ ath10k_mac_dec_num_stations(arvif);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * New association.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * Disassociation.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
+ ret = ath10k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_sta_uapsd_auto_trig_arg arg = {};
+ u32 prio = 0, acc = 0;
+ u32 value = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ prio = 7;
+ acc = 3;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ prio = 5;
+ acc = 2;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ prio = 2;
+ acc = 1;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ prio = 0;
+ acc = 0;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
+ /* Only userspace can make an educated decision when to send
+ * trigger frame. The following effectively disables u-UAPSD
+ * autotrigger in firmware (which is enabled by default
+ * provided the autotrigger service is available).
+ */
+
+ arg.wmm_ac = acc;
+ arg.user_priority = prio;
+ arg.service_interval = 0;
+ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+
+ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
+ arvif->bssid, &arg, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+
+ /*
+ * The channel time duration programmed in the HW is in absolute
+ * microseconds, while mac80211 gives the txop in units of
+ * 32 microseconds.
+ */
+ p->txop = params->txop * 32;
+
+ if (ar->wmi.ops->gen_vdev_wmm_conf) {
+ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+ } else {
+ /* This won't work well with multi-interface cases but it's
+ * better than nothing.
+ */
+ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+ if (ret)
+ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct wmi_start_scan_arg arg;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+ arg.n_channels = 1;
+ arg.channels[0] = chan->center_freq;
+ arg.dwell_time_active = duration;
+ arg.dwell_time_passive = duration;
+ arg.max_scan_time = 2 * duration;
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto exit;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ bool skip;
+ int ret;
+
+ /* mac80211 doesn't care if we really xmit queued frames or not
+ * we'll collect those frames either way if we stop/delete vdevs */
+ if (drop)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED)
+ goto skip;
+
+ ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+ bool empty;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ empty = (ar->htt.num_pending_tx == 0);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ skip = (ar->state == ATH10K_STATE_WEDGED) ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH,
+ &ar->dev_flags);
+
+ (empty || skip);
+ }), ATH10K_FLUSH_TIMEOUT_HZ);
+
+ if (ret <= 0 || skip)
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
+ skip, ar->state, ret);
+
+skip:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ return 1;
+}
+
+#ifdef CONFIG_PM
+static int ath10k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ goto resume;
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto resume;
+ }
+
+ ret = 0;
+ goto exit;
+resume:
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to resume target: %d\n", ret);
+
+ ret = 1;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume target: %d\n", ret);
+ ret = 1;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+#endif
+
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath10k *ar = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* If device failed to restart it will be in a different state, e.g.
+ * ATH10K_STATE_WEDGED */
+ if (ar->state == ATH10K_STATE_RESTARTED) {
+ ath10k_info(ar, "device successfully recovered\n");
+ ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey = &ar->survey[idx];
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* Helper table for legacy fixed_rate/bitrate_mask */
+static const u8 cck_ofdm_rate[] = {
+ /* CCK */
+ 3, /* 1Mbps */
+ 2, /* 2Mbps */
+ 1, /* 5.5Mbps */
+ 0, /* 11Mbps */
+ /* OFDM */
+ 3, /* 6Mbps */
+ 7, /* 9Mbps */
+ 2, /* 12Mbps */
+ 6, /* 18Mbps */
+ 1, /* 24Mbps */
+ 5, /* 36Mbps */
+ 0, /* 48Mbps */
+ 4, /* 54Mbps */
+};
+
+/* Check if only one bit set */
+static int ath10k_check_single_mask(u32 mask)
+{
+ int bit;
+
+ bit = ffs(mask);
+ if (!bit)
+ return 0;
+
+ mask &= ~BIT(bit - 1);
+ if (mask)
+ return 2;
+
+ return 1;
+}
+
+static bool
+ath10k_default_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ u32 legacy = 0x00ff;
+ u8 ht = 0xff, i;
+ u16 vht = 0x3ff;
+ u16 nrf = ar->num_rf_chains;
+
+ if (ar->cfg_tx_chainmask)
+ nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ legacy = 0x00fff;
+ vht = 0;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ break;
+ default:
+ return false;
+ }
+
+ if (mask->control[band].legacy != legacy)
+ return false;
+
+ for (i = 0; i < nrf; i++)
+ if (mask->control[band].ht_mcs[i] != ht)
+ return false;
+
+ for (i = 0; i < nrf; i++)
+ if (mask->control[band].vht_mcs[i] != vht)
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_nss)
+{
+ int ht_nss = 0, vht_nss = 0, i;
+
+ /* check legacy */
+ if (ath10k_check_single_mask(mask->control[band].legacy))
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (mask->control[band].ht_mcs[i] == 0xff)
+ continue;
+ else if (mask->control[band].ht_mcs[i] == 0x00)
+ break;
+
+ return false;
+ }
+
+ ht_nss = i;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (mask->control[band].vht_mcs[i] == 0x03ff)
+ continue;
+ else if (mask->control[band].vht_mcs[i] == 0x0000)
+ break;
+
+ return false;
+ }
+
+ vht_nss = i;
+
+ if (ht_nss > 0 && vht_nss > 0)
+ return false;
+
+ if (ht_nss)
+ *fixed_nss = ht_nss;
+ else if (vht_nss)
+ *fixed_nss = vht_nss;
+ else
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ enum wmi_rate_preamble *preamble)
+{
+ int legacy = 0, ht = 0, vht = 0, i;
+
+ *preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ /* check legacy */
+ legacy = ath10k_check_single_mask(mask->control[band].legacy);
+ if (legacy > 1)
+ return false;
+
+ /* check HT */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
+ if (ht > 1)
+ return false;
+
+ /* check VHT */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
+ if (vht > 1)
+ return false;
+
+ /* Currently we support only one fixed_rate */
+ if ((legacy + ht + vht) != 1)
+ return false;
+
+ if (ht)
+ *preamble = WMI_RATE_PREAMBLE_HT;
+ else if (vht)
+ *preamble = WMI_RATE_PREAMBLE_VHT;
+
+ return true;
+}
+
+static bool
+ath10k_bitrate_mask_rate(struct ath10k *ar,
+ const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ u8 rate = 0, pream = 0, nss = 0, i;
+ enum wmi_rate_preamble preamble;
+
+ /* Check if single rate correct */
+ if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
+ return false;
+
+ pream = preamble;
+
+ switch (preamble) {
+ case WMI_RATE_PREAMBLE_CCK:
+ case WMI_RATE_PREAMBLE_OFDM:
+ i = ffs(mask->control[band].legacy) - 1;
+
+ if (band == IEEE80211_BAND_2GHZ && i < 4)
+ pream = WMI_RATE_PREAMBLE_CCK;
+
+ if (band == IEEE80211_BAND_5GHZ)
+ i += 4;
+
+ if (i >= ARRAY_SIZE(cck_ofdm_rate))
+ return false;
+
+ rate = cck_ofdm_rate[i];
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ break;
+
+ if (i == IEEE80211_HT_MCS_MASK_LEN)
+ return false;
+
+ rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+ nss = i;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ break;
+
+ if (i == NL80211_VHT_NSS_MAX)
+ return false;
+
+ rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ nss = i;
+ break;
+ }
+
+ *fixed_nss = nss + 1;
+ nss <<= 4;
+ pream <<= 6;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
+ pream, nss, rate);
+
+ *fixed_rate = pream | nss | rate;
+
+ return true;
+}
+
+static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
+ const struct cfg80211_bitrate_mask *mask,
+ enum ieee80211_band band,
+ u8 *fixed_rate,
+ u8 *fixed_nss)
+{
+ /* First check full NSS mask, if we can simply limit NSS */
+ if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
+ return true;
+
+ /* Next Check single rate is set */
+ return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
+}
+
+static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
+ u8 fixed_rate,
+ u8 fixed_nss,
+ u8 force_sgi)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->fixed_rate == fixed_rate &&
+ arvif->fixed_nss == fixed_nss &&
+ arvif->force_sgi == force_sgi)
+ goto exit;
+
+ if (fixed_rate == WMI_FIXED_RATE_NONE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+
+ if (force_sgi)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
+
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_rate);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
+ fixed_rate, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_rate = fixed_rate;
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, fixed_nss);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
+ fixed_nss, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->fixed_nss = fixed_nss;
+
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ force_sgi);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n",
+ force_sgi, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->force_sgi = force_sgi;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
+ u8 fixed_rate = WMI_FIXED_RATE_NONE;
+ u8 fixed_nss = ar->num_rf_chains;
+ u8 force_sgi;
+
+ if (ar->cfg_tx_chainmask)
+ fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ force_sgi = mask->control[band].gi;
+ if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ if (!ath10k_default_bitrate_mask(ar, band, mask)) {
+ if (!ath10k_get_fixed_rate_nss(ar, mask, band,
+ &fixed_rate,
+ &fixed_nss))
+ return -EINVAL;
+ }
+
+ if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
+ ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
+ return -EINVAL;
+ }
+
+ return ath10k_set_fixed_rate_param(arvif, fixed_rate,
+ fixed_nss, force_sgi);
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ case IEEE80211_SMPS_NUM_MODES:
+ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ /*
+ * FIXME: Return 0 for time being. Need to figure out whether FW
+ * has the API to fetch 64-bit local TSF
+ */
+
+ return 0;
+}
+
+static int ath10k_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+ arvif->vdev_id, sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
+ * creation/removal. Do we need to verify this?
+ */
+ return 0;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Firmware offloads Tx aggregation entirely so deny mac80211
+ * Tx aggregation requests.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+ .tx = ath10k_tx,
+ .start = ath10k_start,
+ .stop = ath10k_stop,
+ .config = ath10k_config,
+ .add_interface = ath10k_add_interface,
+ .remove_interface = ath10k_remove_interface,
+ .configure_filter = ath10k_configure_filter,
+ .bss_info_changed = ath10k_bss_info_changed,
+ .hw_scan = ath10k_hw_scan,
+ .cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_key = ath10k_set_key,
+ .set_default_unicast_key = ath10k_set_default_unicast_key,
+ .sta_state = ath10k_sta_state,
+ .conf_tx = ath10k_conf_tx,
+ .remain_on_channel = ath10k_remain_on_channel,
+ .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
+ .set_rts_threshold = ath10k_set_rts_threshold,
+ .flush = ath10k_flush,
+ .tx_last_beacon = ath10k_tx_last_beacon,
+ .set_antenna = ath10k_set_antenna,
+ .get_antenna = ath10k_get_antenna,
+ .reconfig_complete = ath10k_reconfig_complete,
+ .get_survey = ath10k_get_survey,
+ .set_bitrate_mask = ath10k_set_bitrate_mask,
+ .sta_rc_update = ath10k_sta_rc_update,
+ .get_tsf = ath10k_get_tsf,
+ .ampdu_action = ath10k_ampdu_action,
+ .get_et_sset_count = ath10k_debug_get_et_sset_count,
+ .get_et_stats = ath10k_debug_get_et_stats,
+ .get_et_strings = ath10k_debug_get_et_strings,
+
+ CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath10k_suspend,
+ .resume = ath10k_resume,
+#endif
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = ath10k_sta_add_debugfs,
+#endif
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+};
+
+/* Note: Be careful if you re-order these. There is code which depends on this
+ * ordering.
+ */
+static struct ieee80211_rate ath10k_rates[] = {
+ /* CCK */
+ RATETAB_ENT(10, 0x82, 0),
+ RATETAB_ENT(20, 0x84, 0),
+ RATETAB_ENT(55, 0x8b, 0),
+ RATETAB_ENT(110, 0x96, 0),
+ /* OFDM */
+ RATETAB_ENT(60, 0x0c, 0),
+ RATETAB_ENT(90, 0x12, 0),
+ RATETAB_ENT(120, 0x18, 0),
+ RATETAB_ENT(180, 0x24, 0),
+ RATETAB_ENT(240, 0x30, 0),
+ RATETAB_ENT(360, 0x48, 0),
+ RATETAB_ENT(480, 0x60, 0),
+ RATETAB_ENT(540, 0x6c, 0),
+};
+
+#define ath10k_a_rates (ath10k_rates + 4)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+struct ath10k *ath10k_mac_create(size_t priv_size)
+{
+ struct ieee80211_hw *hw;
+ struct ath10k *ar;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
+ if (!hw)
+ return NULL;
+
+ ar = hw->priv;
+ ar->hw = hw;
+
+ return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+ ieee80211_free_hw(ar->hw);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ },
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+ {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ {
+ .limits = ath10k_10x_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 mcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->vht_cap_info;
+
+ mcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_rf_chains)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
+ }
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+ return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+
+ if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar->ht_cap_info;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ /* max AMSDU is implicitly taken from vht_cap_info */
+ if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rf_chains; i++)
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif_iter *arvif_iter = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath10k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ };
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+ void *channels;
+ int ret;
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+ ht_cap = ath10k_get_ht_cap(ar);
+ vht_cap = ath10k_create_vht_cap(ar);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ channels = kmemdup(ath10k_2ghz_channels,
+ sizeof(ath10k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ band->ht_cap = ht_cap;
+
+ /* Enable the VHT support at 2.4 GHz */
+ band->vht_cap = vht_cap;
+
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ }
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ channels = kmemdup(ath10k_5ghz_channels,
+ sizeof(ath10k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_a_rates_size;
+ band->bitrates = ath10k_a_rates;
+ band->ht_cap = ht_cap;
+ band->vht_cap = vht_cap;
+ ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ }
+
+ ar->hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
+
+ ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
+ ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_AP_LINK_PS |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_SW_CRYPTO_CONTROL;
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+ ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+ }
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+
+ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
+ * that userspace (e.g. wpa_supplicant/hostapd) can generate
+ * correct Probe Responses. This is more of a hack advert..
+ */
+ ar->hw->wiphy->probe_resp_offload |=
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ }
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+
+ /*
+ * on LL hardware queues are managed entirely by the FW
+ * so we only advertise to mac we can do the queues thing
+ */
+ ar->hw->queues = 4;
+
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_if_comb);
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ /* Init ath dfs pattern detector */
+ ar->ath_common.debug_mask = ATH_DBG_DFS;
+ ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+ NL80211_DFS_UNSET);
+
+ if (!ar->dfs_detector)
+ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
+ }
+
+ ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+ ath10k_reg_notifier);
+ if (ret) {
+ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
+ goto err_free;
+ }
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
+ goto err_free;
+ }
+
+ if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+ ret = regulatory_hint(ar->hw->wiphy,
+ ar->ath_common.regulatory.alpha2);
+ if (ret)
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ ieee80211_unregister_hw(ar->hw);
+err_free:
+ kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+ return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+ ieee80211_unregister_hw(ar->hw);
+
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
+ kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 000000000..68296117d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+#define WEP_KEYID_SHIFT 6
+
+struct ath10k_generic_iter {
+ struct ath10k *ar;
+ int ret;
+};
+
+struct ath10k *ath10k_mac_create(size_t priv_size);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void __ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_timeout_work(struct work_struct *work);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx);
+
+static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath10k_vif *)vif->drv_priv;
+}
+
+static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (arvif->tx_seq_no == 0)
+ arvif->tx_seq_no = 0x1000;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ arvif->tx_seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+ }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 000000000..d09d79a33
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,2773 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include "core.h"
+#include "debug.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
+#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6174_2_1_DEVICE_ID (0x003e)
+
+static const struct pci_device_id ath10k_pci_id_table[] = {
+ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+ {0}
+};
+
+static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
+ /* QCA988X pre 2.0 chips are not supported because they need some nasty
+ * hacks. ath10k doesn't have them and these devices crash horribly
+ * because of that.
+ */
+ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+};
+
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer);
+
+static const struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: unused */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* NB: 50% of src nentries, since tx has 2 frags */
+
+ /* CE5: unused */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(4096),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+};
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+ u32 cause;
+
+ /* Check if the shared legacy irq is for us */
+ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+ return true;
+
+ return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+ /* IMPORTANT: INTR_CLR register has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared. */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer. */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->num_msi_intrs > 1)
+ return "msi-x";
+
+ if (ar_pci->num_msi_intrs == 1)
+ return "msi";
+
+ return "legacy";
+}
+
+static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map pci rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ while (num--) {
+ ret = __ath10k_pci_rx_post_buf(pipe);
+ if (ret) {
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ mod_timer(&ar_pci->rx_post_retry, jiffies +
+ ATH10K_PCI_RX_POST_RETRY_MS);
+ break;
+ }
+ }
+}
+
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ __ath10k_pci_rx_post_pipe(pipe);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static void ath10k_pci_rx_post(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ for (i = 0; i < CE_COUNT; i++)
+ __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+{
+ struct ath10k *ar = (void *)ptr;
+
+ ath10k_pci_rx_post(ar);
+}
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+ int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 buf;
+ unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int id;
+ unsigned int flags;
+ struct ath10k_ce_pipe *ce_diag;
+ /* Host buffer address in CE space */
+ u32 ce_data;
+ dma_addr_t ce_data_base = 0;
+ void *data_buf = NULL;
+ int i;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed from Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ orig_nbytes = nbytes;
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
+
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ memset(data_buf, 0, orig_nbytes);
+
+ remaining_bytes = orig_nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ nbytes = min_t(unsigned int, remaining_bytes,
+ DIAG_TRANSFER_LIMIT);
+
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ if (ret != 0)
+ goto done;
+
+ /* Request CE to send from Target(!) address to Host buffer */
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
+ address);
+
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
+ 0);
+ if (ret)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
+ mdelay(1);
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != (u32)address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ ce_data += nbytes;
+ }
+
+done:
+ if (ret == 0)
+ memcpy(data, data_buf, orig_nbytes);
+ else
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+ address, ret);
+
+ if (data_buf)
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
+{
+ __le32 val = 0;
+ int ret;
+
+ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
+ *value = __le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
+ u32 src, u32 len)
+{
+ u32 host_addr, addr;
+ int ret;
+
+ host_addr = host_interest_item_address(src);
+
+ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
+ src, ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
+ addr, len, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
+ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
+
+static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 buf;
+ unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int id;
+ unsigned int flags;
+ struct ath10k_ce_pipe *ce_diag;
+ void *data_buf = NULL;
+ u32 ce_data; /* Host buffer address in CE space */
+ dma_addr_t ce_data_base = 0;
+ int i;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed to Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ orig_nbytes = nbytes;
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, orig_nbytes);
+
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from
+ * Target CPU virtual address space
+ * to
+ * CE address space
+ */
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+
+ remaining_bytes = orig_nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ /* FIXME: check cast */
+ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+ /* Set up to receive directly into Target(!) address */
+ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ if (ret != 0)
+ goto done;
+
+ /*
+ * Request CE to send caller-supplied data that
+ * was copied to bounce buffer to Target(!) address.
+ */
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ nbytes, 0, 0);
+ if (ret != 0)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+ &completed_nbytes,
+ &id, &flags) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (buf != address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ ce_data += nbytes;
+ }
+
+done:
+ if (data_buf) {
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
+ }
+
+ if (ret != 0)
+ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
+ address, ret);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
+{
+ __le32 val = __cpu_to_le32(value);
+
+ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
+}
+
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+ u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar))
+ return 0;
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
+ PCIE_SOC_WAKE_V_MASK);
+ return ath10k_pci_wake_wait(ar);
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
+ PCIE_SOC_WAKE_RESET);
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
+ &nbytes, &transfer_id) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (skb == NULL)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ cb->tx_completion(ar, skb);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ u32 ce_data;
+ unsigned int nbytes, max_nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &ce_data, &nbytes, &transfer_id,
+ &flags) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ cb->rx_completion(ar, skb);
+ }
+
+ ath10k_pci_rx_post_pipe(pipe_info);
+}
+
+static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
+
+ spin_lock_bh(&ar_pci->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto err;
+ }
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ /* `i` is equal to `n_items -1` after for() */
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return err;
+}
+
+static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
+static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_pci_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
+ hi_failure_state,
+ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
+ if (ret) {
+ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ __le32_to_cpu(reg_dump_values[i]),
+ __le32_to_cpu(reg_dump_values[i + 1]),
+ __le32_to_cpu(reg_dump_values[i + 2]),
+ __le32_to_cpu(reg_dump_values[i + 3]));
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
+ crash_data->registers[i] = reg_dump_values[i];
+}
+
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char uuid[50];
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_crash_counter++;
+
+ crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+
+ if (crash_data)
+ scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
+ else
+ scnprintf(uuid, sizeof(uuid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
+ ath10k_print_driver_info(ar);
+ ath10k_pci_dump_registers(ar, crash_data);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->workqueue, &ar->restart_work);
+}
+
+static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
+
+ if (!force) {
+ int resources;
+ /*
+ * Decide whether to actually poll for completions, or just
+ * wait for a later chance.
+ * If there seem to be plenty of resources left, then just wait
+ * since checking involves reading a CE register, which is a
+ * relatively expensive operation.
+ */
+ resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+ /*
+ * If at least 50% of the total resources are still available,
+ * don't bother checking again yet.
+ */
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
+
+ memcpy(&ar_pci->msg_callbacks_current, callbacks,
+ sizeof(ar_pci->msg_callbacks_current));
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ tasklet_kill(&ar_pci->intr_tq);
+ tasklet_kill(&ar_pci->msi_fw_err);
+
+ for (i = 0; i < CE_COUNT; i++)
+ tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+ del_timer_sync(&ar_pci->rx_post_retry);
+}
+
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id, u8 *ul_pipe,
+ u8 *dl_pipe, int *ul_is_polled,
+ int *dl_is_polled)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
+
+ /* polling for received messages not supported */
+ *dl_is_polled = 0;
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ *ul_is_polled =
+ (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
+
+ return 0;
+}
+
+static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ int ul_is_polled, dl_is_polled;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
+ (void)ath10k_pci_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe,
+ dl_pipe,
+ &ul_is_polled,
+ &dl_is_polled);
+}
+
+static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ synchronize_irq(ar_pci->pdev->irq + i);
+}
+
+static void ath10k_pci_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ ath10k_pci_irq_enable(ar);
+ ath10k_pci_rx_post(ar);
+
+ return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct ce_desc *ce_desc;
+ struct sk_buff *skb;
+ unsigned int id;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ar_pci = ath10k_pci_priv(ar);
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ ce_desc = ce_ring->shadow_base;
+ if (WARN_ON(!ce_desc))
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+ id = MS(__le16_to_cpu(ce_desc[i].flags),
+ CE_DESC_FLAGS_META_DATA);
+
+ ar_pci->msg_callbacks_current.tx_completion(ar, skb);
+ }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ * buffers that were enqueued for receive
+ * buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ struct ath10k_pci_pipe *pipe_info;
+
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ ath10k_pci_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
+}
+
+static void ath10k_pci_flush(struct ath10k *ar)
+{
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_buffer_cleanup(ar);
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ /* Most likely the device has HTT Rx ring configured. The only way to
+ * prevent the device from accessing (and possible corrupting) host
+ * memory is to reset the chip now.
+ *
+ * There's also no known way of masking MSI interrupts on the device.
+ * For ranged MSI the CE-related interrupts can be masked. However
+ * regardless how many MSI interrupts are assigned the first one
+ * is always used for firmware indications (crashes) and cannot be
+ * masked. To prevent the device from asserting the interrupt reset it
+ * before proceeding with cleanup.
+ */
+ ath10k_pci_warm_reset(ar);
+
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+ ath10k_pci_flush(ar);
+}
+
+static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
+ dma_addr_t req_paddr = 0;
+ dma_addr_t resp_paddr = 0;
+ struct bmi_xfer xfer = {};
+ void *treq, *tresp = NULL;
+ int ret = 0;
+
+ might_sleep();
+
+ if (resp && !resp_len)
+ return -EINVAL;
+
+ if (resp && resp_len && *resp_len == 0)
+ return -EINVAL;
+
+ treq = kmemdup(req, req_len, GFP_KERNEL);
+ if (!treq)
+ return -ENOMEM;
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+ if (ret)
+ goto err_dma;
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+ if (!tresp) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+ if (ret)
+ goto err_req;
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+
+ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
+ }
+
+ ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+ if (ret)
+ goto err_resp;
+
+ ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+ if (ret) {
+ u32 unused_buffer;
+ unsigned int unused_nbytes;
+ unsigned int unused_id;
+
+ ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+ &unused_nbytes, &unused_id);
+ } else {
+ /* non-zero means we did not time out */
+ ret = 0;
+ }
+
+err_resp:
+ if (resp) {
+ u32 unused_buffer;
+
+ ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+ dma_unmap_single(ar->dev, resp_paddr,
+ *resp_len, DMA_FROM_DEVICE);
+ }
+err_req:
+ dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+ if (ret == 0 && resp_len) {
+ *resp_len = min(*resp_len, xfer.resp_len);
+ memcpy(resp, tresp, xfer.resp_len);
+ }
+err_dma:
+ kfree(treq);
+ kfree(tresp);
+
+ return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
+{
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id))
+ return;
+
+ xfer->tx_done = true;
+}
+
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct bmi_xfer *xfer;
+ u32 ce_data;
+ unsigned int nbytes;
+ unsigned int transfer_id;
+ unsigned int flags;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+ &nbytes, &transfer_id, &flags))
+ return;
+
+ if (WARN_ON_ONCE(!xfer))
+ return;
+
+ if (!xfer->wait_for_resp) {
+ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
+ return;
+ }
+
+ xfer->resp_len = nbytes;
+ xfer->rx_done = true;
+}
+
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer)
+{
+ unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ath10k_pci_bmi_send_done(tx_pipe);
+ ath10k_pci_bmi_recv_data(rx_pipe);
+
+ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
+ return 0;
+
+ schedule();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_pci_read32(ar, addr);
+ val |= CORE_CTRL_CPU_INTR_MASK;
+ ath10k_pci_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_pci_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->pdev->device) {
+ case QCA988X_2_0_DEVICE_ID:
+ return 1;
+ case QCA6174_2_1_DEVICE_ID:
+ switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+ case QCA6174_HW_1_0_CHIP_ID_REV:
+ case QCA6174_HW_1_1_CHIP_ID_REV:
+ return 3;
+ case QCA6174_HW_1_3_CHIP_ID_REV:
+ return 2;
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
+ return 6;
+ case QCA6174_HW_3_0_CHIP_ID_REV:
+ case QCA6174_HW_3_1_CHIP_ID_REV:
+ case QCA6174_HW_3_2_CHIP_ID_REV:
+ return 9;
+ }
+ break;
+ }
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_pci_init_config(struct ath10k *ar)
+{
+ u32 interconnect_targ_addr;
+ u32 pcie_state_targ_addr = 0;
+ u32 pipe_cfg_targ_addr = 0;
+ u32 svc_to_pipe_map = 0;
+ u32 pcie_config_flags = 0;
+ u32 ealloc_value;
+ u32 ealloc_targ_addr;
+ u32 flag2_value;
+ u32 flag2_targ_addr;
+ int ret = 0;
+
+ /* Download to Target the CE Config and the service-to-CE map */
+ interconnect_targ_addr =
+ host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+ /* Supply Target-side CE configuration */
+ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
+ &pcie_state_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pcie_state_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pcie state addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ pipe_cfg_addr)),
+ &pipe_cfg_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pipe_cfg_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pipe cfg addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+ target_ce_config_wlan,
+ sizeof(target_ce_config_wlan));
+
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ svc_to_pipe_map)),
+ &svc_to_pipe_map);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ if (svc_to_pipe_map == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid svc_to_pipe map\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+ target_service_to_ce_map_wlan,
+ sizeof(target_service_to_ce_map_wlan));
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ &pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ /* configure early allocation */
+ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* first bank is switched to IRAM */
+ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+ HI_EARLY_ALLOC_MAGIC_MASK);
+ ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
+ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+ HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell Target to proceed with initialization */
+ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get option val: %d\n", ret);
+ return ret;
+ }
+
+ flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set option val: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_alloc_pipes(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe;
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_pci->pipe_info[i];
+ pipe->ce_hdl = &ar_pci->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
+ ath10k_pci_ce_send_done,
+ ath10k_pci_ce_recv_data);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ /* Last CE is Diagnostic Window */
+ if (i == CE_COUNT - 1) {
+ ar_pci->ce_diag = pipe->ce_hdl;
+ continue;
+ }
+
+ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_free_pipes(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_pci_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
+{
+ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
+ FW_IND_EVENT_PENDING;
+}
+
+static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ val &= ~FW_IND_EVENT_PENDING;
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
+}
+
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+}
+
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ msleep(10);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
+
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_warm_reset_counter++;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_pci_irq_disable(ar);
+
+ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
+ * were to access copy engine while host performs copy engine reset
+ * then it is possible for the device to confuse pci-e controller to
+ * the point of bringing host system to a complete stop (i.e. hang).
+ */
+ ath10k_pci_warm_reset_si0(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+ ath10k_pci_wait_for_target_init(ar);
+
+ ath10k_pci_warm_reset_clear_lf(ar);
+ ath10k_pci_warm_reset_ce(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
+{
+ int i, ret;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
+
+ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+ * It is thus preferred to use warm reset which is safer but may not be
+ * able to recover the device from all possible fail scenarios.
+ *
+ * Warm reset doesn't always work on first try so attempt it a few
+ * times before giving up.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+ ret);
+ continue;
+ }
+
+ /* FIXME: Sometimes copy engine doesn't recover after warm
+ * reset. In most cases this needs cold reset. In some of these
+ * cases the device is in such a state that a cold reset may
+ * lock up the host.
+ *
+ * Reading any host interest register via copy engine is
+ * sufficient to verify if device is capable of booting
+ * firmware blob.
+ */
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+ &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+ return 0;
+ }
+
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+ ath10k_warn(ar, "refusing cold reset as requested\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
+
+ /* FIXME: QCA6174 requires cold + warm reset to work. */
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ if (QCA_REV_988X(ar))
+ return ath10k_pci_qca988x_chip_reset(ar);
+ else if (QCA_REV_6174(ar))
+ return ath10k_pci_qca6174_chip_reset(ar);
+ else
+ return -ENOTSUPP;
+}
+
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "firmware crashed during chip reset\n");
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ }
+
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce;
+ }
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+
+err_sleep:
+ ath10k_pci_sleep(ar);
+ return ret;
+}
+
+static void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ /* Currently hif_power_up performs effectively a reset and hif_stop
+ * resets the chip as well so there's no point in resetting here.
+ */
+
+ ath10k_pci_sleep(ar);
+}
+
+#ifdef CONFIG_PM
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0x3) {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ (val & 0xffffff00) | 0x03);
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0) {
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ val & 0xffffff00);
+ /*
+ * Suspend/Resume resets the PCI configuration space,
+ * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+ * to keep PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_pci_hif_start,
+ .stop = ath10k_pci_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .set_callbacks = ath10k_pci_hif_set_callbacks,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_pci_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_pci_read32,
+ .write32 = ath10k_pci_write32,
+#ifdef CONFIG_PM
+ .suspend = ath10k_pci_hif_suspend,
+ .resume = ath10k_pci_hif_resume,
+#endif
+};
+
+static void ath10k_pci_ce_tasklet(unsigned long ptr)
+{
+ struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
+ struct ath10k_pci *ar_pci = pipe->ar_pci;
+
+ ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
+}
+
+static void ath10k_msi_err_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+
+ if (!ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
+ return;
+ }
+
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+}
+
+/*
+ * Handler for a per-engine interrupt on a PARTICULAR CE.
+ * This is used in cases where each CE has a private MSI interrupt.
+ */
+static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * NOTE: We are able to derive ce_id from irq because we
+ * use a one-to-one mapping for CE's 0..5.
+ * CE's 6 & 7 do not use interrupts at all.
+ *
+ * This mapping must be kept in sync with the mapping
+ * used by firmware.
+ */
+ tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ tasklet_schedule(&ar_pci->msi_fw_err);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->num_msi_intrs == 0) {
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ }
+
+ tasklet_schedule(&ar_pci->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath10k_pci_tasklet(unsigned long data)
+{
+ struct ath10k *ar = (struct ath10k *)data;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ return;
+ }
+
+ ath10k_ce_per_engine_service_any(ar);
+
+ /* Re-enable legacy irq that was disabled in the irq handler */
+ if (ar_pci->num_msi_intrs == 0)
+ ath10k_pci_enable_legacy_irq(ar);
+}
+
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret, i;
+
+ ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
+ ath10k_pci_msi_fw_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
+ ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
+ return ret;
+ }
+
+ for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
+ ret = request_irq(ar_pci->pdev->irq + i,
+ ath10k_pci_per_engine_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
+ ar_pci->pdev->irq + i, ret);
+
+ for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
+ free_irq(ar_pci->pdev->irq + i, ar);
+
+ free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ return ath10k_pci_request_irq_legacy(ar);
+ case 1:
+ return ath10k_pci_request_irq_msi(ar);
+ case MSI_NUM_REQUEST:
+ return ath10k_pci_request_irq_msix(ar);
+ }
+
+ ath10k_warn(ar, "unknown irq configuration upon request\n");
+ return -EINVAL;
+}
+
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ /* There's at least one interrupt irregardless whether its legacy INTR
+ * or MSI or MSI-X */
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
+ tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
+ (unsigned long)ar);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ar_pci->pipe_info[i].ar_pci = ar_pci;
+ tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
+ (unsigned long)&ar_pci->pipe_info[i]);
+ }
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ath10k_pci_init_irq_tasklets(ar);
+
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
+ ath10k_info(ar, "limiting irq mode to: %d\n",
+ ath10k_pci_irq_mode);
+
+ /* Try MSI-X */
+ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
+ ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+ ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
+ ar_pci->num_msi_intrs);
+ if (ret > 0)
+ return 0;
+
+ /* fall-through */
+ }
+
+ /* Try MSI */
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ ar_pci->num_msi_intrs = 1;
+ ret = pci_enable_msi(ar_pci->pdev);
+ if (ret == 0)
+ return 0;
+
+ /* fall-through */
+ }
+
+ /* Try legacy irq
+ *
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking. */
+ ar_pci->num_msi_intrs = 0;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ return 0;
+}
+
+static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->num_msi_intrs) {
+ case 0:
+ ath10k_pci_deinit_irq_legacy(ar);
+ return 0;
+ case 1:
+ /* fall-through */
+ case MSI_NUM_REQUEST:
+ pci_disable_msi(ar_pci->pdev);
+ return 0;
+ default:
+ pci_disable_msi(ar_pci->pdev);
+ }
+
+ ath10k_warn(ar, "unknown irq configuration upon deinit\n");
+ return -EINVAL;
+}
+
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long timeout;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
+
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+ val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ /* the device has crashed so don't bother trying anymore */
+ if (val & FW_IND_EVENT_PENDING)
+ break;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
+ if (ar_pci->num_msi_intrs == 0)
+ /* Fix potential race by repeating CORE_BASE writes */
+ ath10k_pci_enable_legacy_irq(ar);
+
+ mdelay(10);
+ } while (time_before(jiffies, timeout));
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+
+ if (val == 0xffffffff) {
+ ath10k_err(ar, "failed to read device register, device is gone\n");
+ return -EIO;
+ }
+
+ if (val & FW_IND_EVENT_PENDING) {
+ ath10k_warn(ar, "device has crashed during init\n");
+ return -ECOMM;
+ }
+
+ if (!(val & FW_IND_INITIALIZED)) {
+ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
+ val);
+ return -ETIMEDOUT;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
+ return 0;
+}
+
+static int ath10k_pci_cold_reset(struct ath10k *ar)
+{
+ int i;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_cold_reset_counter++;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Put Target, including PCIe, into RESET. */
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
+ val |= 1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+ if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
+ RTC_STATE_COLD_RESET_MASK)
+ break;
+ msleep(1);
+ }
+
+ /* Pull Target, including PCIe, out of RESET. */
+ val &= ~1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+ if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
+ RTC_STATE_COLD_RESET_MASK))
+ break;
+ msleep(1);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_claim(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 lcr_val;
+ int ret;
+
+ pci_set_drvdata(pdev, ar);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_request_region(pdev, BAR_NUM, "ath");
+ if (ret) {
+ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
+ ret);
+ goto err_device;
+ }
+
+ /* Target expects 32 bit DMA. Enforce it. */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
+ goto err_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
+ ret);
+ goto err_region;
+ }
+
+ pci_set_master(pdev);
+
+ /* Workaround: Disable ASPM */
+ pci_read_config_dword(pdev, 0x80, &lcr_val);
+ pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
+
+ /* Arrange for access to Target SoC registers. */
+ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
+ if (!ar_pci->mem) {
+ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
+ ret = -EIO;
+ goto err_master;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ return 0;
+
+err_master:
+ pci_clear_master(pdev);
+
+err_region:
+ pci_release_region(pdev, BAR_NUM);
+
+err_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void ath10k_pci_release(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+
+ pci_iounmap(pdev, ar_pci->mem);
+ pci_release_region(pdev, BAR_NUM);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
+{
+ const struct ath10k_pci_supp_chip *supp_chip;
+ int i;
+ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
+ supp_chip = &ath10k_pci_supp_chips[i];
+
+ if (supp_chip->dev_id == dev_id &&
+ supp_chip->rev_id == rev_id)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ int ret = 0;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id;
+
+ switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA988X;
+ break;
+ case QCA6174_2_1_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA6174;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+ hw_rev, &ath10k_pci_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_pci->pdev = pdev;
+ ar_pci->dev = &pdev->dev;
+ ar_pci->ar = ar;
+
+ spin_lock_init(&ar_pci->ce_lock);
+ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
+ (unsigned long)ar);
+
+ ret = ath10k_pci_claim(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up: %d\n", ret);
+ goto err_release;
+ }
+
+ ret = ath10k_pci_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ goto err_sleep;
+ }
+
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_irq_disable(ar);
+
+ ret = ath10k_pci_init_irq(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
+ goto err_free_pipes;
+ }
+
+ ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ goto err_free_irq;
+ }
+
+ if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, chip_id);
+ goto err_sleep;
+ }
+
+ ath10k_pci_sleep(ar);
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+
+err_deinit_irq:
+ ath10k_pci_deinit_irq(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_sleep:
+ ath10k_pci_sleep(ar);
+
+err_release:
+ ath10k_pci_release(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath10k *ar = pci_get_drvdata(pdev);
+ struct ath10k_pci *ar_pci;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
+
+ if (!ar)
+ return;
+
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci)
+ return;
+
+ ath10k_core_unregister(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_free_pipes(ar);
+ ath10k_pci_release(ar);
+ ath10k_core_destroy(ar);
+}
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static struct pci_driver ath10k_pci_driver = {
+ .name = "ath10k_pci",
+ .id_table = ath10k_pci_id_table,
+ .probe = ath10k_pci_probe,
+ .remove = ath10k_pci_remove,
+};
+
+static int __init ath10k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath10k_pci_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+ pci_unregister_driver(&ath10k_pci_driver);
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 000000000..bddf54320
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+
+/*
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+ bool tx_done;
+ bool rx_done;
+ bool wait_for_resp;
+ u32 resp_len;
+};
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+ /* Pipe configuration Target address */
+ /* NB: ce_pipe_config[CE_COUNT] */
+ u32 pipe_cfg_addr;
+
+ /* Service to pipe map Target address */
+ /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+ u32 svc_to_pipe_map;
+
+ /* number of MSI interrupts requested */
+ u32 msi_requested;
+
+ /* number of MSI interrupts granted */
+ u32 msi_granted;
+
+ /* Message Signalled Interrupt address */
+ u32 msi_addr;
+
+ /* Base data */
+ u32 msi_data;
+
+ /*
+ * Data for firmware interrupt;
+ * MSI data for other interrupts are
+ * in various SoC registers
+ */
+ u32 msi_fw_intr_data;
+
+ /* PCIE_PWR_METHOD_* */
+ u32 power_mgmt_method;
+
+ /* PCIE_CONFIG_FLAG_* */
+ u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/* Per-pipe state. */
+struct ath10k_pci_pipe {
+ /* Handle of underlying Copy Engine */
+ struct ath10k_ce_pipe *ce_hdl;
+
+ /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ u8 pipe_num;
+
+ /* Convenience back pointer to hif_ce_state. */
+ struct ath10k *hif_ce_state;
+
+ size_t buf_sz;
+
+ /* protects compl_free and num_send_allowed */
+ spinlock_t pipe_lock;
+
+ struct ath10k_pci *ar_pci;
+ struct tasklet_struct intr;
+};
+
+struct ath10k_pci_supp_chip {
+ u32 dev_id;
+ u32 rev_id;
+};
+
+struct ath10k_pci {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+
+ /*
+ * Number of MSI interrupts granted, 0 --> using legacy PCI line
+ * interrupts.
+ */
+ int num_msi_intrs;
+
+ struct tasklet_struct intr_tq;
+ struct tasklet_struct msi_fw_err;
+
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
+
+ struct ath10k_hif_cb msg_callbacks_current;
+
+ /* Copy Engine used for Diagnostic Accesses */
+ struct ath10k_ce_pipe *ce_diag;
+
+ /* FIXME: document what this really protects */
+ spinlock_t ce_lock;
+
+ /* Map CE id to ce_state */
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+ struct timer_list rx_post_retry;
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+ return (struct ath10k_pci *)ar->drv_priv;
+}
+
+#define ATH10K_PCI_RX_POST_RETRY_MS 50
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR 0xceef0000
+#define CDC_WAR_DATA_CE 4
+
+/*
+ * TODO: Should be a function call specific to each Target-type.
+ * This convoluted macro converts from Target CPU Virtual Address Space to CE
+ * Address Space. As part of this process, we conservatively fetch the current
+ * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
+ * for this device; but that's not guaranteed.
+ */
+#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
+ (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
+ CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
+ 0x100000 | ((addr) & 0xfffff))
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+/* Target exposes its registers for direct access. However before host can
+ * access them it needs to make sure the target is awake (ath10k_pci_wake,
+ * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
+ * to sleep unless host tells it to (ath10k_pci_sleep).
+ *
+ * If host tries to access target registers without waking it up it can
+ * scribble over host memory.
+ *
+ * If target is asleep waking it up may take up to even 2ms.
+ */
+
+static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
+ u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(value, ar_pci->mem + offset);
+}
+
+static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + offset);
+}
+
+static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 000000000..e9cc7787b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+enum rx_attention_flags {
+ RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0,
+ RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1,
+ RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2,
+ RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3,
+ RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4,
+ RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5,
+ RX_ATTENTION_FLAGS_NON_QOS = 1 << 6,
+ RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7,
+ RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8,
+ RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9,
+ RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10,
+ RX_ATTENTION_FLAGS_EOSP = 1 << 11,
+ RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12,
+ RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13,
+ RX_ATTENTION_FLAGS_ORDER = 1 << 14,
+ RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15,
+ RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16,
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17,
+ RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
+ RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19,
+ RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20,
+ RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21,
+ RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22,
+ RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23,
+ RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24,
+ RX_ATTENTION_FLAGS_DIRECTED = 1 << 25,
+ RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26,
+ RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27,
+ RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28,
+ RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29,
+ RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30,
+ RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31,
+};
+
+struct rx_attention {
+ __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * last_mpdu
+ * Indicates the last MSDU of the last MPDU of the PPDU. The
+ * PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * peer_idx_invalid
+ * Indicates no matching entries within the the max search
+ * count. Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ * Indicates an unsuccessful search for the peer index due to
+ * timeout. Only set when first_msdu is set.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * Set if packet is U-APSD trigger. Key table will have bits
+ * per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * classification
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+struct rx_frag_info {
+ u8 ring0_more_count;
+ u8 ring1_more_count;
+ u8 ring2_more_count;
+ u8 ring3_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 0. Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+ HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
+ HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+ HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
+ HTT_RX_MPDU_ENCRYPT_WAPI = 5,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
+ HTT_RX_MPDU_ENCRYPT_NONE = 7,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
+#define RX_MPDU_START_INFO0_FROM_DS (1 << 11)
+#define RX_MPDU_START_INFO0_TO_DS (1 << 12)
+#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13)
+#define RX_MPDU_START_INFO0_RETRY (1 << 14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB 28
+#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
+
+struct rx_mpdu_start {
+ __le32 info0;
+ union {
+ struct {
+ __le32 pn31_0;
+ __le32 info1; /* %RX_MPDU_START_INFO1_ */
+ } __packed;
+ struct {
+ u8 pn[6];
+ } __packed;
+ } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ * The index of the address search table which associated with
+ * the peer table entry corresponding to this MPDU. Only valid
+ * when first_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * encrypted
+ * Protected bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * retry
+ * Retry bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * txbf_h_info
+ * The MPDU data will contain H information. Primarily used
+ * for debug.
+ *
+ * seq_num
+ * The sequence number from the 802.11 header. Only valid when
+ * first_msdu is set.
+ *
+ * encrypt_type
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer table)
+ * 0: WEP40
+ * 1: WEP104
+ * 2: TKIP without MIC
+ * 3: WEP128
+ * 4: TKIP (WPA)
+ * 5: WAPI
+ * 6: AES-CCM (WPA2)
+ * 7: No cipher
+ * Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ * Bits [31:0] of the PN number extracted from the IV field
+ * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
+ * valid.
+ * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ * WEPSeed[1], pn1}. Only pn[47:0] is valid.
+ * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ * pn0}. Only pn[47:0] is valid.
+ * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ * pn[47:0] are valid.
+ * Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ * Bits [47:32] of the PN number. See description for
+ * pn_31_0. The remaining PN fields are in the rx_msdu_end
+ * descriptor
+ *
+ * pn
+ * Use this field to access the pn without worrying about
+ * byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ * See definition in RX attention descriptor
+ *
+ * reserved_2
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * tid
+ * The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13)
+#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30)
+#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31)
+
+struct rx_mpdu_end {
+ __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ * Reserved
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * last_mpdu
+ * Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ * Indicates that a delimiter FCS error occurred after this
+ * MPDU before the next MPDU. Only valid when last_msdu is
+ * set.
+ *
+ * post_delim_cnt
+ * Count of the delimiters after this MPDU. This requires the
+ * last MPDU to be held until all the EOF descriptors have been
+ * received. This may be inefficient in the future when
+ * ML-MIMO is used. Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ * See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ * See definition in RX attention descriptor
+ *
+ * decrypt_err
+ * See definition in RX attention descriptor
+ *
+ * fcs_err
+ * See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
+#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11)
+#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12)
+#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13)
+#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
+
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
+enum rx_msdu_decap_format {
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header. */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
+ RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes). */
+ RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start {
+ __le32 info0; /* %RX_MSDU_START_INFO0_ */
+ __le32 flow_id_crc;
+ __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+/*
+ * msdu_length
+ * MSDU length in bytes after decapsulation. This field is
+ * still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation
+ *
+ * ip_offset
+ * Indicates the IP offset in bytes from the start of the
+ * packet after decapsulation. Only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ring_mask
+ * Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ * Indicates the offset in bytes to the start of TCP or UDP
+ * header from the start of the IP header after decapsulation.
+ * Only valid if tcp_prot or udp_prot is set. The value 0
+ * indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * flow_id_crc
+ * The flow_id_crc runs CRC32 on the following information:
+ * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ * protocol[7:0]}.
+ * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ * next_header[7:0]}
+ * UDP case: sort_port[15:0], dest_port[15:0]
+ * TCP case: sort_port[15:0], dest_port[15:0],
+ * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ * {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ * timestamp.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ * 0: RAW: No decapsulation
+ * 1: Native WiFi
+ * 2: Ethernet 2 (DIX)
+ * 3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a
+ * fragmented IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * reserved_2b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
+#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14)
+#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
+#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
+
+struct rx_msdu_end {
+ __le16 ip_hdr_cksum;
+ __le16 tcp_hdr_cksum;
+ u8 key_id_octet;
+ u8 classification_filter;
+ u8 wapi_pn[10];
+ __le32 info0;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ * This can include the IP header checksum or the pseudo header
+ * checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ * The key ID octet from the IV. Only valid when first_msdu is
+ * set.
+ *
+ *classification_filter
+ * Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
+ * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ * descriptor.
+ *
+ *ext_wapi_pn_95_64
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ * pn11).
+ *
+ *ext_wapi_pn_127_96
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ * pn15).
+ *
+ *reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when
+ * first_msdu is set. This field is taken directly from the
+ * length field of the A-MPDU delimiter or the preamble length
+ * field for non-A-MPDU frames.
+ *
+ *first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated
+ * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
+ * have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is
+ * only valid when last_msdu is set.
+ *
+ *reserved_3a
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ *pre_delim_err
+ * Indicates that the first delimiter had a FCS failure. Only
+ * valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
+#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
+
+#define RX_PPDU_START_SIG_RATE_OFDM_48 0
+#define RX_PPDU_START_SIG_RATE_OFDM_24 1
+#define RX_PPDU_START_SIG_RATE_OFDM_12 2
+#define RX_PPDU_START_SIG_RATE_OFDM_6 3
+#define RX_PPDU_START_SIG_RATE_OFDM_54 4
+#define RX_PPDU_START_SIG_RATE_OFDM_36 5
+#define RX_PPDU_START_SIG_RATE_OFDM_18 6
+#define RX_PPDU_START_SIG_RATE_OFDM_9 7
+
+#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
+#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
+#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
+#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
+#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
+#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
+#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+
+struct rx_ppdu_start {
+ struct {
+ u8 pri20_mhz;
+ u8 ext20_mhz;
+ u8 ext40_mhz;
+ u8 ext80_mhz;
+ } rssi_chains[4];
+ u8 rssi_comb;
+ __le16 rsvd0;
+ u8 info0; /* %RX_PPDU_START_INFO0_ */
+ __le32 info1; /* %RX_PPDU_START_INFO1_ */
+ __le32 info2; /* %RX_PPDU_START_INFO2_ */
+ __le32 info3; /* %RX_PPDU_START_INFO3_ */
+ __le32 info4; /* %RX_PPDU_START_INFO4_ */
+ __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ * The combined RSSI of RX PPDU of all active chains and
+ * bandwidths. Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ * Do we really support this?
+ *
+ * reserved_4b
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ * If l_sig_rate_select is 0:
+ * 0x8: OFDM 48 Mbps
+ * 0x9: OFDM 24 Mbps
+ * 0xA: OFDM 12 Mbps
+ * 0xB: OFDM 6 Mbps
+ * 0xC: OFDM 54 Mbps
+ * 0xD: OFDM 36 Mbps
+ * 0xE: OFDM 18 Mbps
+ * 0xF: OFDM 9 Mbps
+ * If l_sig_rate_select is 1:
+ * 0x8: CCK 11 Mbps long preamble
+ * 0x9: CCK 5.5 Mbps long preamble
+ * 0xA: CCK 2 Mbps long preamble
+ * 0xB: CCK 1 Mbps long preamble
+ * 0xC: CCK 11 Mbps short preamble
+ * 0xD: CCK 5.5 Mbps short preamble
+ * 0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ * Legacy signal rate select. If set then l_sig_rate indicates
+ * CCK rates. If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ * Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ * Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ * Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ * Indicates the type of preamble ahead:
+ * 0x4: Legacy (OFDM/CCK)
+ * 0x8: HT
+ * 0x9: HT with TxBF
+ * 0xC: VHT
+ * 0xD: VHT with TxBF
+ * 0x80 - 0xFF: Reserved for special baseband data types such
+ * as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (first 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (first 24 bits)
+ * Else
+ * Reserved
+ *
+ * reserved_6
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (last 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (last 24 bits)
+ * Else
+ * Reserved
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_7
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ * WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ * 0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ * Service field from BB for OFDM, HT and VHT packets. CCK
+ * packets will have service field of 0.
+ *
+ * reserved_9
+ * Reserved: HW should fill with 0, FW should ignore.
+*/
+
+#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
+
+#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+
+struct rx_ppdu_end_common {
+ __le32 evm_p0;
+ __le32 evm_p1;
+ __le32 evm_p2;
+ __le32 evm_p3;
+ __le32 evm_p4;
+ __le32 evm_p5;
+ __le32 evm_p6;
+ __le32 evm_p7;
+ __le32 evm_p8;
+ __le32 evm_p9;
+ __le32 evm_p10;
+ __le32 evm_p11;
+ __le32 evm_p12;
+ __le32 evm_p13;
+ __le32 evm_p14;
+ __le32 evm_p15;
+ __le32 tsf_timestamp;
+ __le32 wb_timestamp;
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+} __packed;
+
+struct rx_ppdu_end_qca988x {
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
+#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
+#define RX_PPDU_END_RTT_UNUSED_LSB 24
+#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
+
+struct rx_ppdu_end_qca6174 {
+ __le32 rtt; /* %RX_PPDU_END_RTT_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_qca988x qca988x;
+ struct rx_ppdu_end_qca6174 qca6174;
+ } __packed;
+} __packed;
+
+/*
+ * evm_p0
+ * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ * Receive TSF timestamp sampled on the rising edge of
+ * rx_clear. For PHY errors this may be the current TSF when
+ * phy_error is asserted if the rx_clear does not assert before
+ * the end of the PHY error.
+ *
+ * wb_timestamp
+ * WLAN/BT timestamp is a 1 usec resolution timestamp which
+ * does not get updated based on receive beacon like TSF. The
+ * same rules for capturing tsf_timestamp are used to capture
+ * the wb_timestamp.
+ *
+ * locationing_timestamp
+ * Timestamp used for locationing. This timestamp is used to
+ * indicate fractions of usec. For example if the MAC clock is
+ * running at 80 MHz, the timestamp will increment every 12.5
+ * nsec. The value starts at 0 and increments to 79 and
+ * returns to 0 and repeats. This information is valid for
+ * every PPDU. This information can be used in conjunction
+ * with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ * See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ * Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ * Indicates that location information was requested.
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_18
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ * Receive antenna value
+ *
+ * tx_ht_vht_ack
+ * Indicates that a HT or VHT Ack/BA frame was transmitted in
+ * response to this receive packet.
+ *
+ * bb_captured_channel
+ * Indicates that the BB has captured a channel dump. FW can
+ * then read the channel dump memory. This may indicate that
+ * the channel was captured either based on PCU setting the
+ * capture_channel bit BB descriptor or FW setting the
+ * capture_channel mode bit.
+ *
+ * reserved_19
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ * Indicates the number of bytes of baseband information for
+ * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ * which indicates that this is not a normal PPDU but rather
+ * contains baseband debug information.
+ *
+ * reserved_20
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ * PPDU end status is only valid when ppdu_done bit is set.
+ * Every time HW sets this bit in memory FW/SW must clear this
+ * bit in memory. FW will initialize all the ppdu_done dword
+ * to 0.
+*/
+
+#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
+#define FW_RX_DESC_INFO0_FORWARD (1 << 1)
+#define FW_RX_DESC_INFO0_INSPECT (1 << 5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB 6
+
+struct fw_rx_desc_base {
+ u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
new file mode 100644
index 000000000..d22addf61
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static void send_fft_sample(struct ath10k *ar,
+ const struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+
+ if (!ar->spectral.rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
+ u8 *data)
+{
+ int dc_pos;
+ u8 max_exp;
+
+ dc_pos = bin_len / 2;
+
+ /* peak index outside of bins */
+ if (dc_pos < max_index || -dc_pos >= max_index)
+ return 0;
+
+ for (max_exp = 0; max_exp < 8; max_exp++) {
+ if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
+ break;
+ }
+
+ /* max_exp not found */
+ if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
+ return 0;
+
+ return max_exp;
+}
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ struct fft_sample_ath10k *fft_sample;
+ u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
+ u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
+ u32 reg0, reg1;
+ u8 chain_idx, *bins;
+ int dc_pos;
+
+ fft_sample = (struct fft_sample_ath10k *)&buf;
+
+ if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+
+ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
+ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
+ fft_sample->tlv.length = __cpu_to_be16(length);
+
+ /* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
+ * but the results/plots suggest that its actually 22/44/88 MHz.
+ */
+ switch (phyerr->chan_width_mhz) {
+ case 20:
+ fft_sample->chan_width_mhz = 22;
+ break;
+ case 40:
+ fft_sample->chan_width_mhz = 44;
+ break;
+ case 80:
+ /* TODO: As experiments with an analogue sender and various
+ * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+ * show, the particular configuration of 80 MHz/64 bins does
+ * not match with the other smaples at all. Until the reason
+ * for that is found, don't report these samples.
+ */
+ if (bin_len == 64)
+ return -EINVAL;
+ fft_sample->chan_width_mhz = 88;
+ break;
+ default:
+ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
+ }
+
+ fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
+ fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+ fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
+ fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
+ fft_sample->rssi = phyerr->rssi_combined;
+
+ total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
+ base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
+ fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
+ fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
+
+ freq1 = __le16_to_cpu(phyerr->freq1);
+ freq2 = __le16_to_cpu(phyerr->freq2);
+ fft_sample->freq1 = __cpu_to_be16(freq1);
+ fft_sample->freq2 = __cpu_to_be16(freq2);
+
+ chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
+
+ fft_sample->noise = __cpu_to_be16(
+ __le16_to_cpu(phyerr->nf_chains[chain_idx]));
+
+ bins = (u8 *)fftr;
+ bins += sizeof(*fftr);
+
+ fft_sample->tsf = __cpu_to_be64(tsf);
+
+ /* max_exp has been directly reported by previous hardware (ath9k),
+ * maybe its possible to get it by other means?
+ */
+ fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
+ bin_len, bins);
+
+ memcpy(fft_sample->data, bins, bin_len);
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = bin_len / 2;
+ fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
+ fft_sample->data[dc_pos - 1]) / 2;
+
+ send_fft_sample(ar, &fft_sample->tlv);
+
+ return 0;
+}
+
+static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (list_empty(&ar->arvifs))
+ return NULL;
+
+ /* if there already is a vif doing spectral, return that. */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->spectral_enabled)
+ return arvif;
+
+ /* otherwise, return the first vif. */
+ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath10k_spectral_scan_trigger(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int res;
+ int vdev_id;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+ vdev_id = arvif->vdev_id;
+
+ if (ar->spectral.mode == SPECTRAL_DISABLED)
+ return 0;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath10k_spectral_scan_config(struct ath10k *ar,
+ enum ath10k_spectral_mode mode)
+{
+ struct wmi_vdev_spectral_conf_arg arg;
+ struct ath10k_vif *arvif;
+ int vdev_id, count, res = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ vdev_id = arvif->vdev_id;
+
+ arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
+ ar->spectral.mode = mode;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
+ return res;
+ }
+
+ if (mode == SPECTRAL_DISABLED)
+ return 0;
+
+ if (mode == SPECTRAL_BACKGROUND)
+ count = WMI_SPECTRAL_COUNT_DEFAULT;
+ else
+ count = max_t(u8, 1, ar->spectral.config.count);
+
+ arg.vdev_id = vdev_id;
+ arg.scan_count = count;
+ arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
+ arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
+ arg.scan_fft_size = ar->spectral.config.fft_size;
+ arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
+ arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+ arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+ arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+ arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+ arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+ arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+ arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+ arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
+ arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+ arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
+ arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+ arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+ res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
+ return res;
+ }
+
+ return 0;
+}
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *mode = "";
+ unsigned int len;
+ enum ath10k_spectral_mode spectral_mode;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_mode = ar->spectral.mode;
+ mutex_unlock(&ar->conf_mutex);
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ int res;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ if (ar->spectral.mode == SPECTRAL_MANUAL ||
+ ar->spectral.mode == SPECTRAL_BACKGROUND) {
+ /* reset the configuration to adopt possibly changed
+ * debugfs parameters
+ */
+ res = ath10k_spectral_scan_config(ar,
+ ar->spectral.mode);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
+ res);
+ }
+ res = ath10k_spectral_scan_trigger(ar);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
+ res);
+ }
+ } else {
+ res = -EINVAL;
+ }
+ } else if (strncmp("background", buf, 9) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
+ } else if (strncmp("manual", buf, 6) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
+ } else if (strncmp("disable", buf, 7) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
+ } else {
+ res = -EINVAL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ if (res < 0)
+ return res;
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+ u8 spectral_count;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_count = ar->spectral.config.count;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", spectral_count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.count = val;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_bins(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int len, bins, fft_size, bin_scale;
+
+ mutex_lock(&ar->conf_mutex);
+
+ fft_size = ar->spectral.config.fft_size;
+ bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ bins = 1 << (fft_size - bin_scale);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", bins);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_bins(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ if (!is_power_of_2(val))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.fft_size = ilog2(val);
+ ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_bins = {
+ .read = read_file_spectral_bins,
+ .write = write_file_spectral_bins,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+int ath10k_spectral_start(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ arvif->spectral_enabled = 0;
+
+ ar->spectral.mode = SPECTRAL_DISABLED;
+ ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
+ ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+
+ return 0;
+}
+
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ if (!arvif->spectral_enabled)
+ return 0;
+
+ return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
+}
+
+int ath10k_spectral_create(struct ath10k *ar)
+{
+ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
+ ar->debug.debugfs_phy,
+ 1024, 256,
+ &rfs_spec_scan_cb, NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_count",
+ S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_bins",
+ S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_bins);
+
+ return 0;
+}
+
+void ath10k_spectral_destroy(struct ath10k *ar)
+{
+ if (ar->spectral.rfs_chan_spec_scan) {
+ relay_close(ar->spectral.rfs_chan_spec_scan);
+ ar->spectral.rfs_chan_spec_scan = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
new file mode 100644
index 000000000..042f5b302
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+#include "../spectral_common.h"
+
+/**
+ * struct ath10k_spec_scan - parameters for Atheros spectral scan
+ *
+ * @count: number of scan results requested for manual mode
+ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
+ */
+struct ath10k_spec_scan {
+ u8 count;
+ u8 fft_size;
+};
+
+/* enum ath10k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ */
+enum ath10k_spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+};
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf);
+int ath10k_spectral_start(struct ath10k *ar);
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
+int ath10k_spectral_create(struct ath10k *ar);
+void ath10k_spectral_destroy(struct ath10k *ar);
+
+#else
+
+static inline int
+ath10k_spectral_process_fft(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_spectral_destroy(struct ath10k *ar)
+{
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 000000000..a417aae52
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+#include "hw.h"
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure. It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
+#define HOST_INTEREST_MAX_SIZE 0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused0c; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to SOC_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /* Host uses BE CPU or not */
+ u32 hi_be; /* 0x44 */
+
+ u32 hi_stack; /* normal stack */ /* 0x48 */
+ u32 hi_err_stack; /* error stack */ /* 0x4c */
+ u32 hi_desired_cpu_speed_hz; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+
+ /*
+ * Indication of Board Data state:
+ * 0: board data is not yet initialized.
+ * 1: board data is initialized; unknown size
+ * >1: number of bytes of initialized board data
+ */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_table; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+ /* Pointer to extended board Data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /* 0xbc - [31:0]: idle timeout in ms */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+ /* wow extension configuration */
+ u32 hi_wow_ext_config; /* 0xec */
+ u32 hi_pwr_save_flags; /* 0xf0 */
+
+ /* Spatial Multiplexing Power Save (SMPS) options */
+ u32 hi_smps_options; /* 0xf4 */
+
+ /* Interconnect-specific state */
+ u32 hi_interconnect_state; /* 0xf8 */
+
+ /* Coex configuration flags */
+ u32 hi_coex_config; /* 0xfc */
+
+ /* Early allocation support */
+ u32 hi_early_alloc; /* 0x100 */
+ /* FW swap field */
+ /*
+ * Bits of this 32bit word will be used to pass specific swap
+ * instruction to FW
+ */
+ /*
+ * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+ * FW will not swap TX descriptor. Meaning packets are formed
+ * on the target processor.
+ */
+ /* Bit 1 - unused */
+ u32 hi_fw_swap; /* 0x104 */
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR 0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT 0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR 0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD 0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE 0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE 0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG 0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE 0x100
+#define HI_OPTION_NUM_DEV_LSB 0x200
+#define HI_OPTION_NUM_DEV_MSB 0x800
+#define HI_OPTION_DEV_MODE_LSB 0x1000
+#define HI_OPTION_DEV_MODE_MSB 0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL 0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN 0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN 0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP 0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK 0x7
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+Fw Mode/SubMode Mask
+|-----------------------------------------------------------------------------|
+| SUB | SUB | SUB | SUB | | | | |
+|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
+| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
+|-----------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS 0x2
+#define HI_OPTION_FW_MODE_MASK 0x3
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
+#define HI_OPTION_FW_SUBMODE_MASK 0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU 0x01
+#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT 0x2
+#define HI_OPTION_RF_KILL_MASK 0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
+#define HI_RESET_FLAG_WARM_RESET 0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT 0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID 0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 2..0 UART ID (0 = Default)
+ * 3 Baud Select (0 = 9600, 1 = 115200)
+ * 30..4 Reserved
+ * 31 Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT 0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK (0x00000001)
+#define HI_SMPS_MODE_MASK (0x00000002)
+#define HI_SMPS_MODE_STATIC (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT (3)
+#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT (11)
+#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT (15)
+#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 8..0 Size of each WOW pattern (max 511)
+ * 15..9 Number of patterns per list (max 127)
+ * 17..16 Number of lists (max 4)
+ * 30..18 Reserved
+ * 31 Enabled
+ *
+ * set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT 16
+#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+ ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+ HI_WOW_EXT_NUM_LIST_MASK) | \
+ (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+ HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+ (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+ HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+ (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+ (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+ HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+ (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+ HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range Meaning
+ * --------- ----------------------------------
+ * [0:3] number of bank assigned to be IRAM
+ * [4:15] reserved
+ * [16:31] magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ * would consider the bits[0:15] are valid and base on that to calculate
+ * the end of DRAM. Early allocation would be located at that area and
+ * may be reclaimed when necesary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ * symbol of ROM which is located before the app-data and might NOT be
+ * re-claimable. If this is adopted, link script should keep this in
+ * mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC 0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
+
+#define HI_EARLY_ALLOC_VALID() \
+ ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+ HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+ (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+ >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED 0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+ 1- Reduce Pwr Search
+ 2- Reduce Pwr Listen*/
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB 4
+#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+ ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ 7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#define QCA6174_BOARD_DATA_SZ 8192
+#define QCA6174_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
new file mode 100644
index 000000000..c03c4ac51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "testmode.h"
+
+#include <net/netlink.h>
+#include <linux/firmware.h>
+
+#include "debug.h"
+#include "wmi.h"
+#include "hif.h"
+#include "hw.h"
+
+#include "testmode_i.h"
+
+static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
+ [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH10K_TM_DATA_MAX_LEN },
+ [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ bool consumed;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->testmode.utf_monitor) {
+ consumed = false;
+ goto out;
+ }
+
+ /* Only testmode.c should be handling events from utf firmware,
+ * otherwise all sort of problems will arise as mac80211 operations
+ * are not initialised.
+ */
+ consumed = true;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar,
+ "failed to allocate skb for testmode wmi event\n");
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to to put testmode wmi even cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH10K_TESTMODE_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+{
+ char filename[100];
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_UTF) {
+ ret = -EALREADY;
+ goto err;
+ }
+
+ /* start utf only when the driver is not in use */
+ if (ar->state != ATH10K_STATE_OFF) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (WARN_ON(ar->testmode.utf != NULL)) {
+ /* utf image is already downloaded, it shouldn't be */
+ ret = -EEXIST;
+ goto err;
+ }
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
+
+ /* load utf firmware image */
+ ret = reject_firmware(&ar->testmode.utf, filename, ar->dev);
+ if (ret) {
+ ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+ filename, ret);
+ goto err;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->testmode.utf_monitor = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ BUILD_BUG_ON(sizeof(ar->fw_features) !=
+ sizeof(ar->testmode.orig_fw_features));
+
+ memcpy(ar->testmode.orig_fw_features, ar->fw_features,
+ sizeof(ar->fw_features));
+ ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
+
+ /*(DEBLOBBED)*//* firmware image does not advertise firmware features. Do
+ * an ugly hack where we force the firmware features so that wmi.c
+ * will use the correct WMI interface.
+ */
+ memset(ar->fw_features, 0, sizeof(ar->fw_features));
+ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_fw_features;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+ if (ret) {
+ ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_power_down;
+ }
+
+ ar->state = ATH10K_STATE_UTF;
+
+ ath10k_info(ar, "UTF firmware started\n");
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_fw_features:
+ /* return the original firmware features */
+ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
+ sizeof(ar->fw_features));
+ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+
+ release_firmware(ar->testmode.utf);
+ ar->testmode.utf = NULL;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->testmode.utf_monitor = false;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* return the original firmware features */
+ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
+ sizeof(ar->fw_features));
+ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+
+ release_firmware(ar->testmode.utf);
+ ar->testmode.utf = NULL;
+
+ ar->state = ATH10K_STATE_OFF;
+}
+
+static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+ ret = 0;
+
+ ath10k_info(ar, "UTF firmware stopped\n");
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret, buf_len;
+ u32 cmd_id;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath10k *ar = hw->priv;
+ struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
+ ath10k_tm_policy);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH10K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
+ case ATH10K_TM_CMD_GET_VERSION:
+ return ath10k_tm_cmd_get_version(ar, tb);
+ case ATH10K_TM_CMD_UTF_START:
+ return ath10k_tm_cmd_utf_start(ar, tb);
+ case ATH10K_TM_CMD_UTF_STOP:
+ return ath10k_tm_cmd_utf_stop(ar, tb);
+ case ATH10K_TM_CMD_WMI:
+ return ath10k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void ath10k_testmode_destroy(struct ath10k *ar)
+{
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ /* utf firmware is not running, nothing to do */
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+}
diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h
new file mode 100644
index 000000000..9cdd15081
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath10k_testmode_destroy(struct ath10k *ar);
+
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath10k_testmode_destroy(struct ath10k *ar)
+{
+}
+
+static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
new file mode 100644
index 000000000..ba81bf66c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* "API" level of the ath10k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH10K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH10K_TESTMODE_VERSION_MINOR 0
+
+#define ATH10K_TM_DATA_MAX_LEN 5000
+
+enum ath10k_tm_attr {
+ __ATH10K_TM_ATTR_INVALID = 0,
+ ATH10K_TM_ATTR_CMD = 1,
+ ATH10K_TM_ATTR_DATA = 2,
+ ATH10K_TM_ATTR_WMI_CMDID = 3,
+ ATH10K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH10K_TM_ATTR_VERSION_MINOR = 5,
+
+ /* keep last */
+ __ATH10K_TM_ATTR_AFTER_LAST,
+ ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath10k testmode interface commands specified in
+ * ATH10K_TM_ATTR_CMD
+ */
+enum ath10k_tm_cmd {
+ /* Returns the supported ath10k testmode interface version in
+ * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH10K_TM_CMD_GET_VERSION = 0,
+
+ /* Boots the UTF firmware, the netdev interface must be down at the
+ * time.
+ */
+ ATH10K_TM_CMD_UTF_START = 1,
+
+ /* Shuts down the UTF firmware and puts the driver back into OFF
+ * state.
+ */
+ ATH10K_TM_CMD_UTF_STOP = 2,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
+ * ATH10K_TM_ATTR_DATA.
+ */
+ ATH10K_TM_CMD_WMI = 3,
+};
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 000000000..aede75080
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
+ enum wmi_vdev_type type)
+{
+ struct ath10k_vif *arvif;
+ int count = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (!arvif->is_up)
+ continue;
+
+ if (arvif->vdev_type != type)
+ continue;
+
+ count++;
+ }
+ return count;
+}
+
+static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+
+ return 0;
+}
+
+static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.duty_cycle;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
+ unsigned long duty_cycle)
+{
+ struct ath10k *ar = cdev->devdata;
+ u32 period, duration, enabled;
+ int num_bss, ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
+ ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
+ duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+ /* TODO: Right now, thermal mitigation is handled only for single/multi
+ * vif AP mode. Since quiet param is not validated in STA mode, it needs
+ * to be investigated further to handle multi STA and multi-vif (AP+STA)
+ * mode properly.
+ */
+ num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
+ if (!num_bss) {
+ ath10k_warn(ar, "no active AP interfaces\n");
+ ret = -ENETDOWN;
+ goto out;
+ }
+ period = max(ATH10K_QUIET_PERIOD_MIN,
+ (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
+ duration = (period * duty_cycle) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ goto out;
+ }
+ ar->thermal.duty_cycle = duty_cycle;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops ath10k_thermal_ops = {
+ .get_max_state = ath10k_thermal_get_max_dutycycle,
+ .get_cur_state = ath10k_thermal_get_cur_dutycycle,
+ .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+};
+
+static ssize_t ath10k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath10k_wmi_pdev_get_temperature(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celcius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath10k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath10k_hwmon);
+
+int ath10k_thermal_register(struct ath10k *ar)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ int ret;
+
+ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+ &ath10k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath10k_err(ar, "failed to create thermal symlink\n");
+ goto err_cooling_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+
+ /* Do not register hwmon device when temperature reading is not
+ * supported by firmware
+ */
+ if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
+ return 0;
+
+ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
+ * guess linux/hwmon.h is missing proper stubs. */
+ if (!config_enabled(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
+ "ath10k_hwmon", ar,
+ ath10k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath10k_err(ar, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_remove_link;
+ }
+ return 0;
+
+err_remove_link:
+ sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+err_cooling_destroy:
+ thermal_cooling_device_unregister(cdev);
+ return ret;
+}
+
+void ath10k_thermal_unregister(struct ath10k *ar)
+{
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 000000000..bccc17ae0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _THERMAL_
+#define _THERMAL_
+
+#define ATH10K_QUIET_PERIOD_DEFAULT 100
+#define ATH10K_QUIET_PERIOD_MIN 25
+#define ATH10K_QUIET_START_OFFSET 10
+#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
+#define ATH10K_HWMON_NAME_LEN 15
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+
+struct ath10k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 duty_cycle;
+ /* temperature value in Celcius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#ifdef CONFIG_THERMAL
+int ath10k_thermal_register(struct ath10k *ar);
+void ath10k_thermal_unregister(struct ath10k *ar);
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+#else
+static inline int ath10k_thermal_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_thermal_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 000000000..4a31e2c6f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 000000000..540788738
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf)
+{
+ const struct ieee80211_hdr *hdr = buf;
+
+ return ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+ TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
+ TP_ARGS(ar, level, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+ TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
+ int ret),
+
+ TP_ARGS(ar, id, buf, buf_len, ret),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ __entry->ret = ret;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu ret %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_dbglog,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_pktlog,
+ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+ u8 vdev_id, u8 tid),
+
+ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ __field(u16, msdu_len)
+ __field(u8, vdev_id)
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ __entry->msdu_len = msdu_len;
+ __entry->vdev_id = vdev_id;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id,
+ __entry->msdu_len,
+ __entry->vdev_id,
+ __entry->tid
+ )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+ TP_ARGS(ar, msdu_id),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(data), data, __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len - ath10k_frm_hdr_len(data);
+ memcpy(__get_dynamic_array(payload),
+ data + ath10k_frm_hdr_len(data), __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag_container,
+ TP_PROTO(struct ath10k *ar,
+ u8 type,
+ u32 timestamp,
+ u32 code,
+ u16 len,
+ const void *data),
+
+ TP_ARGS(ar, type, timestamp, code, len, data),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, type)
+ __field(u32, timestamp)
+ __field(u32, code)
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ __entry->code = code;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s diag container type %hhu timestamp %u code %u len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->type,
+ __entry->timestamp,
+ __entry->code,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 000000000..3f00cec8a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+ return;
+
+ /* If the original wait_for_completion() timed out before
+ * {data,mgmt}_tx_completed() was called then we could complete
+ * offchan_tx_completed for a different skb. Prevent this by using
+ * offchan_tx_skb. */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->offchan_tx_skb != skb) {
+ ath10k_warn(ar, "completed old offchannel frame\n");
+ goto out;
+ }
+
+ complete(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = NULL; /* just for sanity */
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_tx_info *info;
+ struct ath10k_skb_cb *skb_cb;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
+ return;
+ }
+
+ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
+ if (!msdu) {
+ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
+ tx_done->msdu_id);
+ return;
+ }
+
+ skb_cb = ATH10K_SKB_CB(msdu);
+
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ if (skb_cb->htt.txbuf)
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
+
+ ath10k_report_offchan_tx(htt->ar, msdu);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
+
+ if (tx_done->discard) {
+ ieee80211_free_txskb(htt->ar->hw, msdu);
+ goto exit;
+ }
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (tx_done->no_ack)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status(htt->ar->hw, msdu);
+ /* we do not own the msdu anymore */
+
+exit:
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+ __ath10k_htt_tx_dec_pending(htt);
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (memcmp(peer->addr, addr, ETH_ALEN))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (test_bit(peer_id, peer->peer_ids))
+ return peer;
+
+ return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ar->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ar->data_lock);
+ mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
+ }), 3*HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer->addr, ev->addr);
+ list_add(&peer->list, &ar->peers);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ ev->vdev_id, ev->addr, ev->peer_id);
+
+ set_bit(ev->peer_id, peer->peer_ids);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
+ ev->peer_id);
+ goto exit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, ev->peer_id);
+
+ clear_bit(ev->peer_id, peer->peer_ids);
+
+ if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 000000000..a90e09f5c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 000000000..c8b64e7a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1063 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_OPS_H_
+#define _WMI_OPS_H_
+
+struct ath10k;
+struct sk_buff;
+
+struct wmi_ops {
+ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
+ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+
+ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg);
+ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg);
+ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg);
+ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg);
+ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg);
+ int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg);
+ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg);
+ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg);
+ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+
+ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
+ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
+ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
+ u32 value);
+ struct sk_buff *(*gen_init)(struct ath10k *ar);
+ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg);
+ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg);
+ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN]);
+ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart);
+ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ u32 tid_bitmap);
+ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value);
+ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg);
+ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode);
+ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg);
+ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab);
+ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms);
+ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+ u32 log_level);
+ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
+ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
+ u32 period, u32 duration,
+ u32 next_offset,
+ u32 enabled);
+ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
+ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac);
+ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 buf_size);
+ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid,
+ u32 status);
+ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 initiator,
+ u32 reason);
+ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len);
+ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *bcn);
+ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac);
+ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+};
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+
+static inline int
+ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->rx(ar, skb);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_scan)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_scan(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_rx)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_ch_info)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_vdev_start)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_peer_kick)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_swba)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_swba(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (!ar->wmi.ops->pull_fw_stats)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
+}
+
+static inline int
+ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret)
+ return ret;
+
+ /* FIXME There's no ACK event for Management Tx. This probably
+ * shouldn't be called here either. */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_rd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
+ dfs_reg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_suspend)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_resume)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_resume(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_init)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_init(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
+}
+
+static inline int
+ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_start_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_start_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_stop_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_start_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_restart_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_stop)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_up)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_down)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
+ u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_install_key)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+ enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
+ num_ac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_flush)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
+ enum wmi_peer_param param_id, u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_psmode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_sta_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_ap_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_scan_chan_list)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_assoc)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+}
+
+static inline int
+ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_beacon_dma)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
+ dtim_zero, deliver_cab);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_wmm)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_stats)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+}
+
+static inline int
+ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_force_fw_hang)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+}
+
+static inline int
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_dbglog_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_disable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_disable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
+ u32 next_offset, u32 enabled)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
+ next_offset, enabled);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_temperature)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_temperature_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_clear_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_clear_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_set_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_set_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_delba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
+ reason);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->delba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
+ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bcn_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
+ prb_caps, prb_erp, prb_ies,
+ prb_ies_len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_prb_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
+}
+
+static inline int
+ath10k_wmi_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_sta_keepalive)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 000000000..ee0c5f602
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,2796 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "core.h"
+#include "debug.h"
+#include "hw.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+#include "wmi-tlv.h"
+
+/***************/
+/* TLV helpers */
+/**************/
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TLV_TAG_ARRAY_BYTE]
+ = { .min_len = sizeof(u8) },
+ [WMI_TLV_TAG_ARRAY_UINT32]
+ = { .min_len = sizeof(u32) },
+ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
+ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
+ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
+ = { .min_len = sizeof(struct wmi_host_swba_event) },
+ [WMI_TLV_TAG_STRUCT_TIM_INFO]
+ = { .min_len = sizeof(struct wmi_tim_info) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
+ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
+ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct hal_reg_capabilities) },
+ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
+ = { .min_len = sizeof(struct wlan_host_mem_req) },
+ [WMI_TLV_TAG_STRUCT_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
+ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+};
+
+static int
+ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
+ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = __le16_to_cpu(tlv->tag);
+ tlv_len = __le16_to_cpu(tlv->len);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TLV_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static u16 ath10k_wmi_tlv_len(const void *ptr)
+{
+ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
+}
+
+/**************/
+/* TLV events */
+/**************/
+static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_bcn_tx_status_ev *ev;
+ u32 vdev_id, tx_status;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ tx_status = __le32_to_cpu(ev->tx_status);
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ switch (tx_status) {
+ case WMI_TLV_BCN_TX_STATUS_OK:
+ break;
+ case WMI_TLV_BCN_TX_STATUS_XRETRY:
+ case WMI_TLV_BCN_TX_STATUS_DROP:
+ case WMI_TLV_BCN_TX_STATUS_FILTERED:
+ /* FIXME: It's probably worth telling mac80211 to stop the
+ * interface as it is crippled.
+ */
+ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
+ vdev_id, tx_status);
+ break;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_diag_data_ev *ev;
+ const struct wmi_tlv_diag_item *item;
+ const void *data;
+ int ret, num_items, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_items = __le32_to_cpu(ev->num_items);
+ len = ath10k_wmi_tlv_len(data);
+
+ while (num_items--) {
+ if (len == 0)
+ break;
+ if (len < sizeof(*item)) {
+ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
+ break;
+ }
+
+ item = data;
+
+ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
+ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
+ break;
+ }
+
+ trace_ath10k_wmi_diag_container(ar,
+ item->type,
+ __le32_to_cpu(item->timestamp),
+ __le32_to_cpu(item->code),
+ __le16_to_cpu(item->len),
+ item->payload);
+
+ len -= sizeof(*item);
+ len -= roundup(__le16_to_cpu(item->len), 4);
+
+ data += sizeof(*item);
+ data += roundup(__le16_to_cpu(item->len), 4);
+ }
+
+ if (num_items != -1 || len != 0)
+ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
+ num_items, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const void *data;
+ int ret, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+ len = ath10k_wmi_tlv_len(data);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
+ trace_ath10k_wmi_diag(ar, data, len);
+
+ kfree(tb);
+ return 0;
+}
+
+/***********/
+/* TLV ops */
+/***********/
+
+static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_TLV_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_TLV_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_TLV_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_TLV_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_TLV_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_TLV_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_TLV_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_TLV_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_TLV_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_TLV_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_TLV_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_TLV_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_TLV_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
+ break;
+ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
+ ath10k_wmi_tlv_event_diag_data(ar, skb);
+ break;
+ case WMI_TLV_DIAG_EVENTID:
+ ath10k_wmi_tlv_event_diag(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_rx_ev *ev;
+ const u8 *frame;
+ u32 msdu_len;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
+ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->channel = ev->channel;
+ arg->buf_len = ev->buf_len;
+ arg->status = ev->status;
+ arg->snr = ev->snr;
+ arg->phy_mode = ev->phy_mode;
+ arg->rate = ev->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+
+ if (skb->len < (frame - skb->data) + msdu_len) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, msdu_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_start_response_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+struct wmi_tlv_swba_parse {
+ const struct wmi_host_swba_event *ev;
+ bool tim_done;
+ bool noa_done;
+ size_t n_tim;
+ size_t n_noa;
+ struct wmi_swba_ev_arg *arg;
+};
+
+static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
+ return -EPROTO;
+
+ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
+ return -ENOBUFS;
+
+ swba->arg->tim_info[swba->n_tim++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
+ return -EPROTO;
+
+ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
+ return -ENOBUFS;
+
+ swba->arg->noa_info[swba->n_noa++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
+ swba->ev = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ if (!swba->tim_done) {
+ swba->tim_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_tim_parse,
+ swba);
+ if (ret)
+ return ret;
+ } else if (!swba->noa_done) {
+ swba->noa_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_noa_parse,
+ swba);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_tlv_swba_parse swba = { .arg = arg };
+ u32 map;
+ size_t n_vdevs;
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_swba_parse, &swba);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!swba.ev)
+ return -EPROTO;
+
+ arg->vdev_map = swba.ev->vdev_map;
+
+ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
+ if (map & BIT(0))
+ n_vdevs++;
+
+ if (n_vdevs != swba.n_tim ||
+ n_vdevs != swba.n_noa)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_phyerr_ev *ev;
+ const void *phyerrs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
+ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !phyerrs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->num_phyerrs = ev->num_phyerrs;
+ arg->tsf_l32 = ev->tsf_l32;
+ arg->tsf_u32 = ev->tsf_u32;
+ arg->buf_len = ev->buf_len;
+ arg->phyerrs = phyerrs;
+
+ kfree(tb);
+ return 0;
+}
+
+#define WMI_TLV_ABI_VER_NS0 0x5F414351
+#define WMI_TLV_ABI_VER_NS1 0x00004C4D
+#define WMI_TLV_ABI_VER_NS2 0x00000000
+#define WMI_TLV_ABI_VER_NS3 0x00000000
+
+#define WMI_TLV_ABI_VER0_MAJOR 1
+#define WMI_TLV_ABI_VER0_MINOR 0
+#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
+ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
+#define WMI_TLV_ABI_VER1 53
+
+static int
+ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_rdy_ev_arg *arg = data;
+ int i;
+
+ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
+ return -EPROTO;
+
+ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
+ if (!arg->mem_reqs[i]) {
+ arg->mem_reqs[i] = ptr;
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
+ reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
+ svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
+ mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+ if (!ev || !reg || !svc_bmap || !mem_reqs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* This is an internal ABI compatibility check for WMI TLV so check it
+ * here instead of the generic WMI code.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
+ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
+ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
+ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
+ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
+ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
+
+ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
+ kfree(tb);
+ return -ENOTSUPP;
+ }
+
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->abi.abi_ver0;
+ arg->sw_ver1 = ev->abi.abi_ver1;
+ arg->fw_build = ev->fw_build_vers;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = reg->eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = svc_bmap;
+ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+
+ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
+ ath10k_wmi_tlv_parse_mem_reqs, arg);
+ if (ret) {
+ kfree(tb);
+ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
+ return ret;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_rdy_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->sw_version = ev->abi.abi_ver0;
+ arg->abi_version = ev->abi.abi_ver1;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+ struct ath10k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+ dst->data_snr = __le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] =
+ __le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] =
+ __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] =
+ __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] =
+ __le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] =
+ __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_tlv_stats_ev *ev;
+ const void *data;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ size_t data_len;
+ int ret;
+ int i;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data_len = ath10k_wmi_tlv_len(data);
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+ num_pdev_stats, num_vdev_stats, num_peer_stats,
+ num_bcnflt_stats, num_chan_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_tlv_vdev_stats *src;
+ struct ath10k_fw_stats_vdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = data;
+ if (data_len < sizeof(*src))
+ return -EPROTO;
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
+{
+ struct wmi_tlv_pdev_suspend *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->opt = __cpu_to_le32(opt);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct wmi_tlv_resume_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->reserved = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
+ u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_tlv_pdev_set_rd_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->regd = __cpu_to_le32(rd);
+ cmd->regd_2ghz = __cpu_to_le32(rd2g);
+ cmd->regd_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
+ u32 param_value)
+{
+ struct wmi_tlv_pdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ struct wmi_tlv_init_cmd *cmd;
+ struct wmi_tlv_resource_config *cfg;
+ struct wmi_host_mem_chunks *chunks;
+ size_t len, chunks_len;
+ void *ptr;
+
+ chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*cfg)) +
+ (sizeof(*tlv) + chunks_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
+ tlv->len = __cpu_to_le16(sizeof(*cfg));
+ cfg = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cfg);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chunks_len);
+ chunks = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += chunks_len;
+
+ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
+ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
+ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
+ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
+ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
+ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
+ cfg->num_offload_peers = __cpu_to_le32(3);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+ } else {
+ cfg->num_offload_peers = __cpu_to_le32(0);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
+ }
+
+ cfg->num_peer_keys = __cpu_to_le32(2);
+ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+ cfg->ast_skid_limit = __cpu_to_le32(0x10);
+ cfg->tx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
+ cfg->rx_decap_mode = __cpu_to_le32(1);
+ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
+ cfg->num_mcast_groups = __cpu_to_le32(0);
+ cfg->num_mcast_table_elems = __cpu_to_le32(0);
+ cfg->mcast2ucast_mode = __cpu_to_le32(0);
+ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
+ cfg->num_wds_entries = __cpu_to_le32(0x20);
+ cfg->dma_burst_size = __cpu_to_le32(0);
+ cfg->mac_aggr_delim = __cpu_to_le32(0);
+ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
+ cfg->vow_config = __cpu_to_le32(0);
+ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
+ cfg->max_frag_entries = __cpu_to_le32(2);
+ cfg->num_tdls_vdevs = __cpu_to_le32(1);
+ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
+ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
+ cfg->num_wow_filters = __cpu_to_le32(0x16);
+ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
+ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
+ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
+ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+
+ ath10k_wmi_put_host_mem_chunks(ar, chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_tlv_start_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, chan_len, ssid_len, bssid_len, ie_len;
+ __le32 *chans;
+ struct wmi_ssid *ssids;
+ struct wmi_mac_addr *addrs;
+ void *ptr;
+ int i, ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ chan_len = arg->n_channels * sizeof(__le32);
+ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
+ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
+ ie_len = roundup(arg->ie_len, 4);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
+ (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
+ (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
+ (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+ cmd->num_channels = __cpu_to_le32(arg->n_channels);
+ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
+ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
+ cmd->ie_len = __cpu_to_le32(arg->ie_len);
+ cmd->num_probes = __cpu_to_le32(3);
+
+ /* FIXME: There are some scan flag inconsistencies across firmwares,
+ * e.g. WMI-TLV inverts the logic behind the following flag.
+ */
+ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(chan_len);
+ chans = (void *)tlv->value;
+ for (i = 0; i < arg->n_channels; i++)
+ chans[i] = __cpu_to_le32(arg->channels[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += chan_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(ssid_len);
+ ssids = (void *)tlv->value;
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
+ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += ssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(bssid_len);
+ addrs = (void *)tlv->value;
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
+
+ ptr += sizeof(*tlv);
+ ptr += bssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ie_len);
+ memcpy(tlv->value, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*tlv);
+ ptr += ie_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_vdev_type vdev_type,
+ enum wmi_vdev_subtype vdev_subtype,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(vdev_type);
+ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_tlv_vdev_start_cmd *cmd;
+ struct wmi_channel *ch;
+ struct wmi_p2p_noa_descriptor *noa;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*ch)) +
+ (sizeof(*tlv) + 0);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ch));
+ ch = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*ch);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = 0;
+ noa = (void *)tlv->value;
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+ ptr += 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
+ if (arg->key_data)
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(arg->key_len, sizeof(__le32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
+ const struct wmi_sta_uapsd_auto_trig_arg *arg)
+{
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
+ tlv->len = __cpu_to_le16(sizeof(*ac));
+ ac = (void *)tlv->value;
+
+ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
+ ac->user_priority = __cpu_to_le32(arg->user_priority);
+ ac->service_interval = __cpu_to_le32(arg->service_interval);
+ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
+ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
+ ac->wmm_ac, ac->user_priority, ac->service_interval,
+ ac->suspend_interval, ac->delay_interval);
+
+ return ptr + sizeof(*tlv) + sizeof(*ac);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ size_t ac_tlv_len;
+ void *ptr;
+ int i;
+
+ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + ac_tlv_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->num_ac = __cpu_to_le32(num_ac);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(ac_tlv_len);
+ ac = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ for (i = 0; i < num_ac; i++)
+ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_wmm(void *ptr,
+ const struct wmi_wmm_params_arg *arg)
+{
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
+ tlv->len = __cpu_to_le16(sizeof(*wmm));
+ wmm = (void *)tlv->value;
+ ath10k_wmi_set_wmm_param(wmm, arg);
+
+ return ptr + sizeof(*tlv) + sizeof(*wmm);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_tlv_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*arp);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->enabled = __cpu_to_le32(arg->enabled);
+ cmd->method = __cpu_to_le32(arg->method);
+ cmd->interval = __cpu_to_le32(arg->interval);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
+ tlv->len = __cpu_to_le16(sizeof(*arp));
+ arp = (void *)tlv->value;
+
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_tlv_peer_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_tlv_peer_assoc_cmd *cmd;
+ struct wmi_vht_rate_set *vht_rate;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, legacy_rate_len, ht_rate_len;
+ void *ptr;
+
+ if (arg->peer_mpdu_density > 16)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+
+ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(__le32));
+ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + legacy_rate_len) +
+ (sizeof(*tlv) + ht_rate_len) +
+ (sizeof(*tlv) + sizeof(*vht_rate));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
+ cmd->flags = __cpu_to_le32(arg->peer_flags);
+ cmd->caps = __cpu_to_le32(arg->peer_caps);
+ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
+ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(legacy_rate_len);
+ memcpy(tlv->value, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += legacy_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ht_rate_len);
+ memcpy(tlv->value, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += ht_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
+ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
+ vht_rate = (void *)tlv->value;
+
+ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*vht_rate);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 param_value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_tlv_scan_chan_list_cmd *cmd;
+ struct wmi_channel *ci;
+ struct wmi_channel_arg *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t chans_len, len;
+ int i;
+ void *ptr, *chans;
+
+ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + chans_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chans_len);
+ chans = (void *)tlv->value;
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+
+ tlv = chans;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ci));
+ ci = (void *)tlv->value;
+
+ ath10k_wmi_put_wmi_channel(ci, ch);
+
+ chans += sizeof(*tlv);
+ chans += sizeof(*ci);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += chans_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* nothing to set here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ u32 log_level) {
+ struct wmi_tlv_dbglog_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, bmap_len;
+ u32 value;
+ void *ptr;
+
+ if (module_enable) {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ module_enable,
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
+ } else {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ WMI_TLV_DBGLOG_ALL_MODULES,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
+ }
+
+ bmap_len = 0;
+ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
+ cmd->value = __cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(bmap_len);
+
+ /* nothing to do here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(bmap_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct wmi_tlv_pktlog_enable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->filter = __cpu_to_le32(filter);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
+ filter);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct wmi_tlv_pktlog_disable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp, void *prb_ies,
+ size_t prb_ies_len)
+{
+ struct wmi_tlv_bcn_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
+ sizeof(*tlv) + roundup(bcn->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
+ cmd->buf_len = __cpu_to_le32(bcn->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
+ * then it is then impossible to pass original ie len.
+ * This chunk is not used yet so if setting probe resp template yields
+ * problems with beaconing or crashes firmware look here.
+ */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
+ info = (void *)tlv->value;
+ info->caps = __cpu_to_le32(prb_caps);
+ info->erp = __cpu_to_le32(prb_erp);
+ memcpy(info->ies, prb_ies, prb_ies_len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+ ptr += prb_ies_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ /* FIXME: Adjust TSF? */
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *prb)
+{
+ struct wmi_tlv_prb_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) +
+ sizeof(*tlv) + roundup(prb->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->buf_len = __cpu_to_le32(prb->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info));
+ info = (void *)tlv->value;
+ info->caps = 0;
+ info->erp = 0;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
+ memcpy(tlv->value, prb->data, prb->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct wmi_tlv_p2p_go_bcn_ie *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
+ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(p2p_ie[1] + 2, 4);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
+ vdev_id);
+ return skb;
+}
+
+/****************/
+/* TLV mappings */
+/****************/
+
+static struct wmi_cmd_map wmi_tlv_cmd_map = {
+ .init_cmdid = WMI_TLV_INIT_CMDID,
+ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
+ .echo_cmdid = WMI_TLV_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
+ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+};
+
+static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
+ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_TLV_PDEV_PARAM_DCS,
+ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
+ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_TLV_VDEV_PARAM_WDS,
+ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_TLV_VDEV_PARAM_SGI,
+ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_TLV_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+};
+
+static const struct wmi_ops wmi_tlv_ops = {
+ .rx = ath10k_wmi_tlv_op_rx,
+ .map_svc = wmi_tlv_svc_map,
+
+ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+
+ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_tlv_op_gen_init,
+ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
+ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
+ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+ /* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+ /* .gen_pdev_set_quiet_mode not implemented */
+ /* .gen_pdev_get_temperature not implemented */
+ /* .gen_addba_clear_resp not implemented */
+ /* .gen_addba_send not implemented */
+ /* .gen_addba_set_resp not implemented */
+ /* .gen_delba_send not implemented */
+ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
+ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
+ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
+ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
+ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+};
+
+/************/
+/* TLV init */
+/************/
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar)
+{
+ ar->wmi.cmd = &wmi_tlv_cmd_map;
+ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.ops = &wmi_tlv_ops;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 000000000..a6c8280cc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,1459 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WMI_TLV_H
+#define _WMI_TLV_H
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_tlv_grp_id {
+ WMI_TLV_GRP_START = 0x3,
+ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
+ WMI_TLV_GRP_PDEV,
+ WMI_TLV_GRP_VDEV,
+ WMI_TLV_GRP_PEER,
+ WMI_TLV_GRP_MGMT,
+ WMI_TLV_GRP_BA_NEG,
+ WMI_TLV_GRP_STA_PS,
+ WMI_TLV_GRP_DFS,
+ WMI_TLV_GRP_ROAM,
+ WMI_TLV_GRP_OFL_SCAN,
+ WMI_TLV_GRP_P2P,
+ WMI_TLV_GRP_AP_PS,
+ WMI_TLV_GRP_RATECTL,
+ WMI_TLV_GRP_PROFILE,
+ WMI_TLV_GRP_SUSPEND,
+ WMI_TLV_GRP_BCN_FILTER,
+ WMI_TLV_GRP_WOW,
+ WMI_TLV_GRP_RTT,
+ WMI_TLV_GRP_SPECTRAL,
+ WMI_TLV_GRP_STATS,
+ WMI_TLV_GRP_ARP_NS_OFL,
+ WMI_TLV_GRP_NLO_OFL,
+ WMI_TLV_GRP_GTK_OFL,
+ WMI_TLV_GRP_CSA_OFL,
+ WMI_TLV_GRP_CHATTER,
+ WMI_TLV_GRP_TID_ADDBA,
+ WMI_TLV_GRP_MISC,
+ WMI_TLV_GRP_GPIO,
+ WMI_TLV_GRP_FWTEST,
+ WMI_TLV_GRP_TDLS,
+ WMI_TLV_GRP_RESMGR,
+ WMI_TLV_GRP_STA_SMPS,
+ WMI_TLV_GRP_WLAN_HB,
+ WMI_TLV_GRP_RMC,
+ WMI_TLV_GRP_MHF_OFL,
+ WMI_TLV_GRP_LOCATION_SCAN,
+ WMI_TLV_GRP_OEM,
+ WMI_TLV_GRP_NAN,
+ WMI_TLV_GRP_COEX,
+ WMI_TLV_GRP_OBSS_OFL,
+ WMI_TLV_GRP_LPI,
+ WMI_TLV_GRP_EXTSCAN,
+ WMI_TLV_GRP_DHCP_OFL,
+ WMI_TLV_GRP_IPA,
+ WMI_TLV_GRP_MDNS_OFL,
+ WMI_TLV_GRP_SAP_OFL,
+};
+
+enum wmi_tlv_cmd_id {
+ WMI_TLV_INIT_CMDID = 0x1,
+ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
+ WMI_TLV_STOP_SCAN_CMDID,
+ WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
+ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ WMI_TLV_PDEV_SET_PARAM_CMDID,
+ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_TLV_PDEV_DUMP_CMDID,
+ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_DELETE_CMDID,
+ WMI_TLV_VDEV_START_REQUEST_CMDID,
+ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ WMI_TLV_VDEV_UP_CMDID,
+ WMI_TLV_VDEV_STOP_CMDID,
+ WMI_TLV_VDEV_DOWN_CMDID,
+ WMI_TLV_VDEV_SET_PARAM_CMDID,
+ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
+ WMI_TLV_VDEV_WMM_DELTS_CMDID,
+ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_TLV_VDEV_PLMREQ_START_CMDID,
+ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
+ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_DELETE_CMDID,
+ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ WMI_TLV_PEER_SET_PARAM_CMDID,
+ WMI_TLV_PEER_ASSOC_CMDID,
+ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ WMI_TLV_PEER_INFO_REQ_CMDID,
+ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
+ WMI_TLV_PDEV_SEND_BCN_CMDID,
+ WMI_TLV_BCN_TMPL_CMDID,
+ WMI_TLV_BCN_FILTER_RX_CMDID,
+ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ WMI_TLV_MGMT_TX_CMDID,
+ WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_ADDBA_SEND_CMDID,
+ WMI_TLV_ADDBA_STATUS_CMDID,
+ WMI_TLV_DELBA_SEND_CMDID,
+ WMI_TLV_ADDBA_SET_RESP_CMDID,
+ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
+ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
+ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
+ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_ROAM_SCAN_PERIOD,
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_ROAM_AP_PROFILE,
+ WMI_TLV_ROAM_CHAN_LIST,
+ WMI_TLV_ROAM_SCAN_CMD,
+ WMI_TLV_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_TLV_ROAM_INVOKE_CMDID,
+ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
+ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_TLV_OFL_SCAN_PERIOD,
+ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_TLV_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
+ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
+ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_PDEV_RESUME_CMDID,
+ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
+ WMI_TLV_RMV_BCN_FILTER_CMDID,
+ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
+ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_TLV_WOW_ENABLE_CMDID,
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_TLV_EXTWOW_ENABLE_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
+ WMI_TLV_RTT_TSF_CMDID,
+ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
+ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
+ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_TLV_REQUEST_STATS_EXT_CMDID,
+ WMI_TLV_REQUEST_LINK_STATS_CMDID,
+ WMI_TLV_START_LINK_STATS_CMDID,
+ WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
+ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_APFIND_CMDID,
+ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
+ WMI_TLV_PEER_TID_DELBA_CMDID,
+ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_TLV_STA_KEEPALIVE_CMDID,
+ WMI_TLV_BA_REQ_SSN_CMDID,
+ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_CMDID,
+ WMI_TLV_DBGLOG_CFG_CMDID,
+ WMI_TLV_PDEV_QVIT_CMDID,
+ WMI_TLV_PDEV_FTM_INTG_CMDID,
+ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_TLV_FORCE_FW_HANG_CMDID,
+ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_TLV_THERMAL_MGMT_CMDID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
+ WMI_TLV_GPIO_OUTPUT_CMDID,
+ WMI_TLV_TXBF_CMDID,
+ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
+ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_TLV_UNIT_TEST_CMDID,
+ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
+ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
+ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
+ WMI_TLV_STA_SMPS_PARAM_CMDID,
+ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
+ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
+ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_TLV_RMC_CONFIG_CMDID,
+ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
+ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
+ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
+ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
+ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
+ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
+ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
+ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
+ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_START_SCAN_CMDID,
+ WMI_TLV_LPI_STOP_SCAN_CMDID,
+ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_STOP_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
+ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
+ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_MDNS_SET_FQDN_CMDID,
+ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
+ WMI_TLV_MDNS_GET_STATS_CMDID,
+ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
+};
+
+enum wmi_tlv_event_id {
+ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
+ WMI_TLV_READY_EVENTID,
+ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
+ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
+ WMI_TLV_CHAN_INFO_EVENTID,
+ WMI_TLV_PHYERR_EVENTID,
+ WMI_TLV_PDEV_DUMP_EVENTID,
+ WMI_TLV_TX_PAUSE_EVENTID,
+ WMI_TLV_DFS_RADAR_EVENTID,
+ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
+ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
+ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_STOPPED_EVENTID,
+ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_INFO_EVENTID,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_TLV_PEER_STATE_EVENTID,
+ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
+ WMI_TLV_HOST_SWBA_EVENTID,
+ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_TLV_BA_RSP_SSN_EVENTID,
+ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
+ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
+ WMI_TLV_PROFILE_MATCH,
+ WMI_TLV_ROAM_SYNCH_EVENTID,
+ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_NOA_EVENTID,
+ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
+ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
+ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
+ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
+ WMI_TLV_IFACE_LINK_STATS_EVENTID,
+ WMI_TLV_PEER_LINK_STATS_EVENTID,
+ WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_TLV_APFIND_EVENTID,
+ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
+ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_EVENTID,
+ WMI_TLV_DEBUG_MESG_EVENTID,
+ WMI_TLV_UPDATE_STATS_EVENTID,
+ WMI_TLV_DEBUG_PRINT_EVENTID,
+ WMI_TLV_DCS_INTERFERENCE_EVENTID,
+ WMI_TLV_PDEV_QVIT_EVENTID,
+ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
+ WMI_TLV_PDEV_FTM_INTG_EVENTID,
+ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_TLV_THERMAL_MGMT_EVENTID,
+ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_TLV_DIAG_EVENTID,
+ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
+ WMI_TLV_UPLOADH_EVENTID,
+ WMI_TLV_CAPTUREH_EVENTID,
+ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
+ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
+ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
+ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
+ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
+ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
+ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_STATUS_EVENTID,
+ WMI_TLV_LPI_HANDOFF_EVENTID,
+ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
+ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
+ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PMF_QOS,
+ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_DCS,
+ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ WMI_TLV_PDEV_PARAM_PROXY_STA,
+ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_TLV_PDEV_PARAM_BURST_DUR,
+ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
+ WMI_TLV_PDEV_PARAM_HYST_EN,
+ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
+ WMI_TLV_PDEV_PARAM_LED_ENABLE,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ WMI_TLV_VDEV_PARAM_PREAMBLE,
+ WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_TLV_VDEV_PARAM_WDS,
+ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ WMI_TLV_VDEV_PARAM_CHWIDTH,
+ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ WMI_TLV_VDEV_PARAM_SGI,
+ WMI_TLV_VDEV_PARAM_LDPC,
+ WMI_TLV_VDEV_PARAM_TX_STBC,
+ WMI_TLV_VDEV_PARAM_RX_STBC,
+ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ WMI_TLV_VDEV_PARAM_NSS,
+ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_TLV_VDEV_PARAM_TXBF,
+ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
+ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_TLV_VDEV_PARAM_MAX_RATE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+};
+
+enum wmi_tlv_tag {
+ WMI_TLV_TAG_LAST_RESERVED = 15,
+
+ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_BYTE,
+ WMI_TLV_TAG_ARRAY_STRUCT,
+ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
+
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
+ WMI_TLV_TAG_STRUCT_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
+ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
+ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
+ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
+ WMI_TLV_TAG_STRUCT_CSA_EVENT,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_IGTK_INFO,
+ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
+ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
+ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
+ WMI_TLV_TAG_STRUCT_TIM_INFO,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
+ WMI_TLV_TAG_STRUCT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
+ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
+ WMI_TLV_TAG_STRUCT_INIT_CMD,
+ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
+ WMI_TLV_TAG_STRUCT_CHANNEL,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
+ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
+ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
+ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ECHO_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
+ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
+ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
+ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TXBF_CMD,
+ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_EVENT,
+ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
+ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
+ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
+ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
+ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
+ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
+ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_RATE_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
+ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
+ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
+ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+
+ WMI_TLV_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_TLV_SERVICE_AP_DFS,
+ WMI_TLV_SERVICE_11AC,
+ WMI_TLV_SERVICE_BLOCKACK,
+ WMI_TLV_SERVICE_PHYERR,
+ WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_TLV_SERVICE_RTT,
+ WMI_TLV_SERVICE_WOW,
+ WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_NLO,
+ WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_TLV_SERVICE_CHATTER,
+ WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_TLV_SERVICE_GPIO,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_TLV_SERVICE_EARLY_RX,
+ WMI_TLV_SERVICE_STA_SMPS,
+ WMI_TLV_SERVICE_FWTEST,
+ WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_TLV_SERVICE_TDLS,
+ WMI_TLV_SERVICE_BURST,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_TLV_SERVICE_WLAN_HB,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_TLV_SERVICE_QPOWER,
+ WMI_TLV_SERVICE_PLMREQ,
+ WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_TLV_SERVICE_RMC,
+ WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_TLV_SERVICE_COEX_SAR,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_TLV_SERVICE_NAN,
+ WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_TLV_SERVICE_LPASS,
+ WMI_TLV_SERVICE_EXTSCAN,
+ WMI_TLV_SERVICE_D0WOW,
+ WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+};
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+ BIT((svc_id)%(sizeof(u32))))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void
+wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_TLV_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_TLV_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_TLV_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_TLV_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_TLV_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
+ WMI_SERVICE_STA_SMPS, len);
+ SVCMAP(WMI_TLV_SERVICE_FWTEST,
+ WMI_SERVICE_FWTEST, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_SERVICE_STA_WMMAC, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_TLV_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
+ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_ADAPTIVE_OCS, len);
+ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_BA_SSN_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
+ WMI_SERVICE_WLAN_HB, len);
+ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_BATCH_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_QPOWER,
+ WMI_SERVICE_QPOWER, len);
+ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
+ WMI_SERVICE_PLMREQ, len);
+ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_THERMAL_MGMT, len);
+ SVCMAP(WMI_TLV_SERVICE_RMC,
+ WMI_SERVICE_RMC, len);
+ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_MHF_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
+ WMI_SERVICE_COEX_SAR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
+ SVCMAP(WMI_TLV_SERVICE_NAN,
+ WMI_SERVICE_NAN, len);
+ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_SERVICE_L1SS_STAT, len);
+ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
+ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_OBSS_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_IBSS_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_LPASS,
+ WMI_SERVICE_LPASS, len);
+ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
+ WMI_SERVICE_EXTSCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_D0WOW,
+ WMI_SERVICE_D0WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_HSOFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_RX_FULL_REORDER, len);
+ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_DHCP_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_MDNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+}
+
+#undef SVCMAP
+
+struct wmi_tlv {
+ __le16 len;
+ __le16 tag;
+ u8 value[0];
+} __packed;
+
+#define WMI_TLV_MGMT_RX_NUM_RSSI 4
+
+struct wmi_tlv_mgmt_rx_ev {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
+} __packed;
+
+struct wmi_tlv_abi_version {
+ __le32 abi_ver0;
+ __le32 abi_ver1;
+ __le32 abi_ver_ns0;
+ __le32 abi_ver_ns1;
+ __le32 abi_ver_ns2;
+ __le32 abi_ver_ns3;
+} __packed;
+
+enum wmi_tlv_hw_bd_id {
+ WMI_TLV_HW_BD_LEGACY = 0,
+ WMI_TLV_HW_BD_QCA6174 = 1,
+ WMI_TLV_HW_BD_QCA2582 = 2,
+};
+
+struct wmi_tlv_hw_bd_info {
+ u8 rev;
+ u8 project_id;
+ u8 custom_id;
+ u8 reference_design_id;
+} __packed;
+
+struct wmi_tlv_svc_rdy_ev {
+ __le32 fw_build_vers;
+ struct wmi_tlv_abi_version abi;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_chans;
+ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
+ struct wmi_tlv_hw_bd_info hw_bd_info[5];
+} __packed;
+
+struct wmi_tlv_rdy_ev {
+ struct wmi_tlv_abi_version abi;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_tlv_resource_config {
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_bufs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_reqs;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+} __packed;
+
+struct wmi_tlv_init_cmd {
+ struct wmi_tlv_abi_version abi;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_tlv_pdev_set_param_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_tlv_pdev_set_rd_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 regd;
+ __le32 regd_2ghz;
+ __le32 regd_5ghz;
+ __le32 conform_limit_2ghz;
+ __le32 conform_limit_5ghz;
+} __packed;
+
+struct wmi_tlv_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+} __packed;
+
+struct wmi_tlv_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ __le32 num_channels;
+ __le32 num_bssids;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 num_probes;
+} __packed;
+
+struct wmi_tlv_vdev_start_cmd {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 bcn_intval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct wmi_ssid ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_tx_power;
+ __le32 num_noa_descr;
+ __le32 disable_hw_ack;
+} __packed;
+
+enum {
+ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
+ WMI_TLV_PEER_TYPE_BSS = 1,
+ WMI_TLV_PEER_TYPE_TDLS = 2,
+ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
+ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
+};
+
+struct wmi_tlv_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_tlv_peer_assoc_cmd {
+ struct wmi_mac_addr mac_addr;
+ __le32 vdev_id;
+ __le32 new_assoc;
+ __le32 assoc_id;
+ __le32 flags;
+ __le32 caps;
+ __le32 listen_intval;
+ __le32 ht_caps;
+ __le32 max_mpdu;
+ __le32 mpdu_density;
+ __le32 rate_caps;
+ __le32 nss;
+ __le32 vht_caps;
+ __le32 phy_mode;
+ __le32 ht_info[2];
+ __le32 num_legacy_rates;
+ __le32 num_ht_rates;
+} __packed;
+
+struct wmi_tlv_pdev_suspend {
+ __le32 pdev_id; /* not used yet */
+ __le32 opt;
+} __packed;
+
+struct wmi_tlv_pdev_set_wmm_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 dg_type; /* no idea.. */
+} __packed;
+
+struct wmi_tlv_vdev_wmm_params {
+ __le32 dummy;
+ struct wmi_wmm_params params;
+} __packed;
+
+struct wmi_tlv_vdev_set_wmm_cmd {
+ __le32 vdev_id;
+ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
+} __packed;
+
+struct wmi_tlv_phyerr_ev {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+} __packed;
+
+enum wmi_tlv_dbglog_param {
+ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
+};
+
+enum wmi_tlv_dbglog_log_level {
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
+ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
+};
+
+#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
+#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
+ sizeof(__le32))
+#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
+#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
+ (((module_id << 16) & 0xffff0000) | \
+ ((log_level << 0) & 0x000000ff))
+
+struct wmi_tlv_dbglog_cmd {
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_tlv_resume_cmd {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_req_stats_cmd {
+ __le32 stats_id; /* wmi_stats_id */
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_tlv_vdev_stats {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[4]; /* per-AC */
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[4];
+ __le32 num_tx_frames_failures[4];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[10];
+ __le32 beacon_rssi_history[10];
+} __packed;
+
+struct wmi_tlv_pktlog_enable {
+ __le32 reserved;
+ __le32 filter;
+} __packed;
+
+struct wmi_tlv_pktlog_disable {
+ __le32 reserved;
+} __packed;
+
+enum wmi_tlv_bcn_tx_status {
+ WMI_TLV_BCN_TX_STATUS_OK,
+ WMI_TLV_BCN_TX_STATUS_XRETRY,
+ WMI_TLV_BCN_TX_STATUS_DROP,
+ WMI_TLV_BCN_TX_STATUS_FILTERED,
+};
+
+struct wmi_tlv_bcn_tx_status_ev {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_tlv_bcn_prb_info {
+ __le32 caps;
+ __le32 erp;
+ u8 ies[0];
+} __packed;
+
+struct wmi_tlv_bcn_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_prb_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_p2p_go_bcn_ie {
+ __le32 vdev_id;
+ __le32 ie_len;
+} __packed;
+
+enum wmi_tlv_diag_item_type {
+ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
+};
+
+struct wmi_tlv_diag_item {
+ u8 type;
+ u8 reserved;
+ __le16 len;
+ __le32 timestamp;
+ __le32 code;
+ u8 payload[0];
+} __packed;
+
+struct wmi_tlv_diag_data_ev {
+ __le32 num_items;
+} __packed;
+
+struct wmi_tlv_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+} __packed;
+
+struct wmi_tlv_stats_ev {
+ __le32 stats_id; /* WMI_STAT_ */
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+} __packed;
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 000000000..c7ea77edc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,5513 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "mac.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.2.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+};
+
+static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+};
+
+/* firmware 10.2 specific mappings */
+static struct wmi_cmd_map wmi_10_2_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
+{
+ u32 flags = 0;
+
+ memset(ch, 0, sizeof(*ch));
+
+ if (arg->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (arg->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (arg->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (arg->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (arg->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (arg->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
+
+ ch->mhz = __cpu_to_le32(arg->freq);
+ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+ ch->band_center_freq2 = 0;
+ ch->min_power = arg->min_power;
+ ch->max_power = arg->max_power;
+ ch->reg_power = arg->max_reg_power;
+ ch->antenna_max = arg->max_antenna_gain;
+
+ /* mode & flags share storage */
+ ch->mode = arg->mode;
+ ch->flags |= __cpu_to_le32(flags);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ return ret;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ return ret;
+}
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
+{
+ struct sk_buff *skb;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned WMI skb\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *bcn;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ bcn = arvif->beacon;
+
+ if (!bcn)
+ goto unlock;
+
+ cb = ATH10K_SKB_CB(bcn);
+
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENDING:
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ arvif->beacon_state = ATH10K_BEACON_SENDING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
+ arvif->vdev_id,
+ bcn->data, bcn->len,
+ cb->paddr,
+ cb->bcn.dtim_zero,
+ cb->bcn.deliver_cab);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (ret == 0)
+ arvif->beacon_state = ATH10K_BEACON_SENT;
+ else
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
+ cmd_id);
+ return ret;
+ }
+
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), 3*HZ);
+
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int len;
+ u32 buf_len = msdu->len;
+ u16 fc;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(cmd->hdr) + msdu->len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ len = round_up(len, 4);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+
+ ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
+ memcpy(cmd->buf, msdu->data, msdu->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_tx_payload(ar, skb->data, skb->len);
+
+ return skb;
+}
+
+static void ath10k_wmi_event_scan_started(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ ar->scan.state = ATH10K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+ break;
+ }
+}
+
+static const char *
+ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ struct wmi_scan_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ return 0;
+}
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_scan_ev_arg arg = {};
+ enum wmi_scan_event_type event_type;
+ enum wmi_scan_completion_reason reason;
+ u32 freq;
+ u32 req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ int ret;
+
+ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
+ return ret;
+ }
+
+ event_type = __le32_to_cpu(arg.event_type);
+ reason = __le32_to_cpu(arg.reason);
+ freq = __le32_to_cpu(arg.channel_freq);
+ req_id = __le32_to_cpu(arg.scan_req_id);
+ scan_id = __le32_to_cpu(arg.scan_id);
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath10k_wmi_event_scan_type_str(event_type, reason),
+ event_type, reason, freq, req_id, scan_id, vdev_id,
+ ath10k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath10k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath10k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath10k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ ath10k_wmi_event_scan_foreign_chan(ar, freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath10k_warn(ar, "received scan start failure event\n");
+ ath10k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+ return 0;
+}
+
+static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
+{
+ enum ieee80211_band band;
+
+ switch (phy_mode) {
+ case MODE_11A:
+ case MODE_11NA_HT20:
+ case MODE_11NA_HT40:
+ case MODE_11AC_VHT20:
+ case MODE_11AC_VHT40:
+ case MODE_11AC_VHT80:
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ case MODE_11G:
+ case MODE_11B:
+ case MODE_11GONLY:
+ case MODE_11NG_HT20:
+ case MODE_11NG_HT40:
+ case MODE_11AC_VHT20_2G:
+ case MODE_11AC_VHT40_2G:
+ case MODE_11AC_VHT80_2G:
+ default:
+ band = IEEE80211_BAND_2GHZ;
+ }
+
+ return band;
+}
+
+static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
+{
+ u8 rate_idx = 0;
+
+ /* rate in Kbps */
+ switch (rate) {
+ case 1000:
+ rate_idx = 0;
+ break;
+ case 2000:
+ rate_idx = 1;
+ break;
+ case 5500:
+ rate_idx = 2;
+ break;
+ case 11000:
+ rate_idx = 3;
+ break;
+ case 6000:
+ rate_idx = 4;
+ break;
+ case 9000:
+ rate_idx = 5;
+ break;
+ case 12000:
+ rate_idx = 6;
+ break;
+ case 18000:
+ rate_idx = 7;
+ break;
+ case 24000:
+ rate_idx = 8;
+ break;
+ case 36000:
+ rate_idx = 9;
+ break;
+ case 48000:
+ rate_idx = 10;
+ break;
+ case 54000:
+ rate_idx = 11;
+ break;
+ default:
+ break;
+ }
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (rate_idx > 3)
+ /* Omit CCK rates */
+ rate_idx -= 4;
+ else
+ rate_idx = 0;
+ }
+
+ return rate_idx;
+}
+
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ unsigned int hdrlen;
+ bool peer_key;
+ u8 *addr, keyidx;
+
+ if (!ieee80211_is_auth(hdr->frame_control) ||
+ !ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+ return;
+
+ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+ addr = ieee80211_get_SA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer_key) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac wep key present for peer %pM\n", addr);
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+}
+
+static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
+ * trailer with credit update. Trim the excess garbage.
+ */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_mgmt_rx_ev_arg arg = {};
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u32 rx_status;
+ u32 channel;
+ u32 phy_mode;
+ u32 snr;
+ u32 rate;
+ u32 buf_len;
+ u16 fc;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+ return ret;
+ }
+
+ channel = __le32_to_cpu(arg.channel);
+ buf_len = __le32_to_cpu(arg.buf_len);
+ rx_status = __le32_to_cpu(arg.status);
+ snr = __le32_to_cpu(arg.snr);
+ phy_mode = __le32_to_cpu(arg.phy_mode);
+ rate = __le32_to_cpu(arg.rate);
+
+ memset(status, 0, sizeof(*status));
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx status %08x\n", rx_status);
+
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ * MODE_11B. This means phy_mode is not a reliable source for the band
+ * of mgmt rx.
+ */
+ if (channel >= 1 && channel <= 14) {
+ status->band = IEEE80211_BAND_2GHZ;
+ } else if (channel >= 36 && channel <= 165) {
+ status->band = IEEE80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ status->freq = ieee80211_channel_to_frequency(channel, status->band);
+ status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = get_rate_idx(rate, status->band);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set. */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !ieee80211_is_auth(hdr->frame_control)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_action(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx(ar->hw, skb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ return 0;
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_ch_info_ev_arg arg = {};
+ struct survey_info *survey;
+ u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+ int idx, ret;
+
+ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ return;
+ }
+
+ err_code = __le32_to_cpu(arg.err_code);
+ freq = __le32_to_cpu(arg.freq);
+ cmd_flags = __le32_to_cpu(arg.cmd_flags);
+ noise_floor = __le32_to_cpu(arg.noise_floor);
+ rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+ cycle_count = __le32_to_cpu(arg.cycle_count);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+ cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ /* During scanning chan info is reported twice for each
+ * visited channel. The reported cycle count is global
+ * and per-channel cycle count must be calculated */
+
+ cycle_count -= ar->survey_last_cycle_count;
+ rx_clear_count -= ar->survey_last_rx_clear_count;
+
+ survey = &ar->survey[idx];
+ survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
+ survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->noise = noise_floor;
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_NOISE_DBM;
+ }
+
+ ar->survey_last_rx_clear_count = rx_clear_count;
+ ar->survey_last_cycle_count = cycle_count;
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+}
+
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ skb->len);
+
+ trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
+
+ return 0;
+}
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(src->r0_frags);
+ dst->r1_frags = __le32_to_cpu(src->r1_frags);
+ dst->r2_frags = __le32_to_cpu(src->r2_frags);
+ dst->r3_frags = __le32_to_cpu(src->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
+}
+
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+}
+
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10x_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_4_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+ ath10k_debug_fw_stats_process(ar, skb);
+}
+
+static int
+ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ return 0;
+}
+
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_ev_arg arg = {};
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+ return;
+ }
+
+ if (WARN_ON(__le32_to_cpu(arg.status)))
+ return;
+
+ complete(&ar->vdev_setup_done);
+}
+
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+ complete(&ar->vdev_setup_done);
+}
+
+static int
+ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ return 0;
+}
+
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_kick_ev_arg arg = {};
+ struct ieee80211_sta *sta;
+ int ret;
+
+ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
+ ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ arg.mac_addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
+ if (!sta) {
+ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_tim_info *tim_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+ struct ieee80211_tim_ie *tim;
+ u8 *ies, *ie;
+ u8 ie_len, pvm_len;
+ __le32 t;
+ u32 v;
+
+ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+ * we must copy the bitmap upon change and reuse it later */
+ if (__le32_to_cpu(tim_info->tim_changed)) {
+ int i;
+
+ BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
+ sizeof(tim_info->tim_bitmap));
+
+ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+ t = tim_info->tim_bitmap[i / 4];
+ v = __le32_to_cpu(t);
+ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+ }
+
+ /* FW reports either length 0 or 16
+ * so we calculate this on our own */
+ arvif->u.ap.tim_len = 0;
+ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+ if (arvif->u.ap.tim_bitmap[i])
+ arvif->u.ap.tim_len = i;
+
+ arvif->u.ap.tim_len++;
+ }
+
+ ies = bcn->data;
+ ies += ieee80211_hdrlen(hdr->frame_control);
+ ies += 12; /* fixed parameters */
+
+ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+ (u8 *)skb_tail_pointer(bcn) - ies);
+ if (!ie) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ ath10k_warn(ar, "no tim ie found;\n");
+ return;
+ }
+
+ tim = (void *)ie + 2;
+ ie_len = ie[1];
+ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+ if (pvm_len < arvif->u.ap.tim_len) {
+ int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+ void *next_ie = ie + 2 + ie_len;
+
+ if (skb_put(bcn, expand_size)) {
+ memmove(next_ie + expand_size, next_ie, move_size);
+
+ ie[1] += expand_size;
+ ie_len += expand_size;
+ pvm_len += expand_size;
+ } else {
+ ath10k_warn(ar, "tim expansion failed\n");
+ }
+ }
+
+ if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
+ return;
+ }
+
+ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
+ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+ if (tim->dtim_count == 0) {
+ ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+
+ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
+ ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+ tim->dtim_count, tim->dtim_period,
+ tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
+{
+ u32 len = 0;
+ u8 noa_descriptors = noa->num_descriptors;
+ u8 opp_ps_info = noa->ctwindow_oppps;
+ bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
+
+ if (!noa_descriptors && !opps_enabled)
+ return len;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_p2p_noa_info *noa)
+{
+ u8 *new_data, *old_data = arvif->u.ap.noa_data;
+ u32 new_len;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
+ new_len = ath10k_p2p_calc_noa_ie_len(noa);
+ if (!new_len)
+ goto cleanup;
+
+ new_data = kmalloc(new_len, GFP_ATOMIC);
+ if (!new_data)
+ goto cleanup;
+
+ ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
+
+ spin_lock_bh(&ar->data_lock);
+ arvif->u.ap.noa_data = new_data;
+ arvif->u.ap.noa_len = new_len;
+ spin_unlock_bh(&ar->data_lock);
+ kfree(old_data);
+ }
+
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+ memcpy(skb_put(bcn, arvif->u.ap.noa_len),
+ arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ return;
+
+cleanup:
+ spin_lock_bh(&ar->data_lock);
+ arvif->u.ap.noa_data = NULL;
+ arvif->u.ap.noa_len = 0;
+ spin_unlock_bh(&ar->data_lock);
+ kfree(old_data);
+}
+
+static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
+ i++;
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_swba_ev_arg arg = {};
+ u32 map;
+ int i = -1;
+ const struct wmi_tim_info *tim_info;
+ const struct wmi_p2p_noa_info *noa_info;
+ struct ath10k_vif *arvif;
+ struct sk_buff *bcn;
+ dma_addr_t paddr;
+ int ret, vdev_id = 0;
+
+ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
+ return;
+ }
+
+ map = __le32_to_cpu(arg.vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
+ map);
+
+ for (; map; map >>= 1, vdev_id++) {
+ if (!(map & 0x1))
+ continue;
+
+ i++;
+
+ if (i >= WMI_MAX_AP_VDEV) {
+ ath10k_warn(ar, "swba has corrupted vdev map\n");
+ break;
+ }
+
+ tim_info = arg.tim_info[i];
+ noa_info = arg.noa_info[i];
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
+ i,
+ __le32_to_cpu(tim_info->tim_len),
+ __le32_to_cpu(tim_info->tim_mcast),
+ __le32_to_cpu(tim_info->tim_changed),
+ __le32_to_cpu(tim_info->tim_num_ps_pending),
+ __le32_to_cpu(tim_info->tim_bitmap[3]),
+ __le32_to_cpu(tim_info->tim_bitmap[2]),
+ __le32_to_cpu(tim_info->tim_bitmap[1]),
+ __le32_to_cpu(tim_info->tim_bitmap[0]));
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif == NULL) {
+ ath10k_warn(ar, "no vif for vdev_id %d found\n",
+ vdev_id);
+ continue;
+ }
+
+ /* There are no completions for beacons so wait for next SWBA
+ * before telling mac80211 to decrement CSA counter
+ *
+ * Once CSA counter is completed stop sending beacons until
+ * actual channel switch is done */
+ if (arvif->vif->csa_active &&
+ ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_csa_finish(arvif->vif);
+ continue;
+ }
+
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+ if (!bcn) {
+ ath10k_warn(ar, "could not get mac80211 beacon\n");
+ continue;
+ }
+
+ ath10k_tx_h_seq_no(arvif->vif, bcn);
+ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
+ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->beacon) {
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
+ arvif->vdev_id);
+ break;
+ case ATH10K_BEACON_SENDING:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
+ arvif->vdev_id);
+ dev_kfree_skb(bcn);
+ goto skip;
+ }
+
+ ath10k_mac_vif_beacon_free(arvif);
+ }
+
+ if (!arvif->beacon_buf) {
+ paddr = dma_map_single(arvif->ar->dev, bcn->data,
+ bcn->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
+ goto skip;
+ }
+
+ ATH10K_SKB_CB(bcn)->paddr = paddr;
+ } else {
+ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+ bcn->len, IEEE80211_MAX_FRAME_LEN);
+ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+ }
+ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
+ }
+
+ arvif->beacon = bcn;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+
+ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
+skip:
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ath10k_wmi_tx_beacons_nowait(ar);
+}
+
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_radar_report *rr,
+ u64 tsf)
+{
+ u32 reg0, reg1, tsf32l;
+ struct pulse_event pe;
+ u64 tsf64;
+ u8 rssi, width;
+
+ reg0 = __le32_to_cpu(rr->reg0);
+ reg1 = __le32_to_cpu(rr->reg1);
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+ if (!ar->dfs_detector)
+ return;
+
+ /* report event to DFS pattern detector */
+ tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
+ tsf64 = tsf & (~0xFFFFFFFFULL);
+ tsf64 |= tsf32l;
+
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+ rssi = phyerr->rssi_combined;
+
+ /* hardware store this as 8 bit signed value,
+ * set to zero if negative number
+ */
+ if (rssi & 0x80)
+ rssi = 0;
+
+ pe.ts = tsf64;
+ pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.width = width;
+ pe.rssi = rssi;
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+ pe.freq, pe.width, pe.rssi, pe.ts);
+
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs no pulse pattern detected, yet\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+ /* Control radar events reporting in debugfs file
+ dfs_block_radar_events */
+ if (ar->dfs_block_radar_events) {
+ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
+ return;
+ }
+
+ ieee80211_radar_detected(ar->hw);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ const struct phyerr_fft_report *fftr,
+ u64 tsf)
+{
+ u32 reg0, reg1;
+ u8 rssi, peak_mag;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+ rssi = phyerr->rssi_combined;
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+ /* false event detection */
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ const struct phyerr_tlv *tlv;
+ const struct phyerr_radar_report *rr;
+ const struct phyerr_fft_report *fftr;
+ const u8 *tlv_buf;
+
+ buf_len = __le32_to_cpu(phyerr->buf_len);
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+ phyerr->phy_err_code, phyerr->rssi_combined,
+ __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
+
+ /* Skip event if DFS disabled */
+ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ return;
+
+ ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+ tlv_len, tlv->tag, tlv->sig);
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
+ i);
+ return;
+ }
+
+ rr = (struct phyerr_radar_report *)tlv_buf;
+ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
+ break;
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+ ath10k_warn(ar, "too short fft report (%d)\n",
+ i);
+ return;
+ }
+
+ fftr = (struct phyerr_fft_report *)tlv_buf;
+ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
+ if (res)
+ return;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ struct phyerr_tlv *tlv;
+ const void *tlv_buf;
+ const struct phyerr_fft_report *fftr;
+ size_t fftr_len;
+
+ buf_len = __le32_to_cpu(phyerr->buf_len);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+
+ if (i + sizeof(*tlv) + tlv_len > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
+ i);
+ return;
+ }
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (sizeof(*fftr) > tlv_len) {
+ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
+ i);
+ return;
+ }
+
+ fftr_len = tlv_len - sizeof(*fftr);
+ fftr = tlv_buf;
+ res = ath10k_spectral_process_fft(ar, phyerr,
+ fftr, fftr_len,
+ tsf);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to process fft report: %d\n",
+ res);
+ return;
+ }
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ struct wmi_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->num_phyerrs = ev->num_phyerrs;
+ arg->tsf_l32 = ev->tsf_l32;
+ arg->tsf_u32 = ev->tsf_u32;
+ arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
+ arg->phyerrs = ev->phyerrs;
+
+ return 0;
+}
+
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_phyerr_ev_arg arg = {};
+ const struct wmi_phyerr *phyerr;
+ u32 count, i, buf_len, phy_err_code;
+ u64 tsf;
+ int left_len, ret;
+
+ ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+ ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
+ return;
+ }
+
+ left_len = __le32_to_cpu(arg.buf_len);
+
+ /* Check number of included events */
+ count = __le32_to_cpu(arg.num_phyerrs);
+
+ tsf = __le32_to_cpu(arg.tsf_u32);
+ tsf <<= 32;
+ tsf |= __le32_to_cpu(arg.tsf_l32);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event phyerr count %d tsf64 0x%llX\n",
+ count, tsf);
+
+ phyerr = arg.phyerrs;
+ for (i = 0; i < count; i++) {
+ /* Check if we can read event header */
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "single event (%d) wrong head len\n",
+ i);
+ return;
+ }
+
+ left_len -= sizeof(*phyerr);
+
+ buf_len = __le32_to_cpu(phyerr->buf_len);
+ phy_err_code = phyerr->phy_err_code;
+
+ if (left_len < buf_len) {
+ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
+ return;
+ }
+
+ left_len -= buf_len;
+
+ switch (phy_err_code) {
+ case PHY_ERROR_RADAR:
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
+ break;
+ case PHY_ERROR_SPECTRAL_SCAN:
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+ break;
+ case PHY_ERROR_FALSE_RADAR_EXT:
+ ath10k_wmi_event_dfs(ar, phyerr, tsf);
+ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+ break;
+ default:
+ break;
+ }
+
+ phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
+ }
+}
+
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+}
+
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
+{
+ char buf[101], c;
+ int i;
+
+ for (i = 0; i < sizeof(buf) - 1; i++) {
+ if (i >= skb->len)
+ break;
+
+ c = skb->data[i];
+
+ if (c == '\0')
+ break;
+
+ if (isascii(c) && isprint(c))
+ buf[i] = c;
+ else
+ buf[i] = '.';
+ }
+
+ if (i == sizeof(buf) - 1)
+ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
+
+ /* for some reason the debug prints end with \n, remove that */
+ if (skb->data[i - 1] == '\n')
+ i--;
+
+ /* the last byte is always reserved for the null character */
+ buf[i] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
+}
+
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+}
+
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+}
+
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+
+ pool_size = num_units * round_up(unit_len, 4);
+
+ if (!pool_size)
+ return -EINVAL;
+
+ ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+ pool_size,
+ &paddr,
+ GFP_ATOMIC);
+ if (!ar->wmi.mem_chunks[idx].vaddr) {
+ ath10k_warn(ar, "failed to allocate memory chunk\n");
+ return -ENOMEM;
+ }
+
+ memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return 0;
+}
+
+static int
+ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_service_ready_event *ev;
+ size_t i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->sw_ver1 = ev->sw_version_1;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_10x_service_ready_event *ev;
+ int i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_svc_rdy_ev_arg arg = {};
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+
+ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
+ return;
+ }
+
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+
+ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->fw_version_major =
+ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
+ ar->fw_version_release =
+ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+ ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+ arg.service_map, arg.service_map_len);
+
+ /* only manually set fw features when not using FW IE format */
+ if (ar->fw_api == 1 && ar->fw_version_build > 636)
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
+
+ ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+ ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build);
+ }
+
+ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
+ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target */
+ /* this needs to be tied, host and target
+ * can get out of sync */
+ num_units = TARGET_10X_NUM_PEERS + 1;
+ else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+ num_units = TARGET_10X_NUM_VDEVS + 1;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(arg.mem_reqs[i]->num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ __le32_to_cpu(arg.min_tx_power),
+ __le32_to_cpu(arg.max_tx_power),
+ __le32_to_cpu(arg.ht_cap),
+ __le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.sw_ver0),
+ __le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.fw_build),
+ __le32_to_cpu(arg.phy_capab),
+ __le32_to_cpu(arg.num_rf_chains),
+ __le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.num_mem_reqs));
+
+ complete(&ar->wmi.service_ready);
+}
+
+static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ struct wmi_ready_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->sw_version = ev->sw_version;
+ arg->abi_version = ev->abi_version;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ return 0;
+}
+
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_rdy_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ __le32_to_cpu(arg.sw_version),
+ __le32_to_cpu(arg.abi_version),
+ arg.mac_addr,
+ __le32_to_cpu(arg.status));
+
+ ether_addr_copy(ar->mac_addr, arg.mac_addr);
+ complete(&ar->wmi.unified_ready);
+ return 0;
+}
+
+static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
+{
+ const struct wmi_pdev_temperature_event *ev;
+
+ ev = (struct wmi_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10X_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_10X_PDEV_UTF_EVENTID:
+ /* ignore utf events */
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_2_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_10_2_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_2_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_10_2_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_2_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_10_2_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_10_2_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_2_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_2_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_2_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_10_2_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_10_2_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10_2_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10_2_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10_2_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ break;
+ case WMI_10_2_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_2_RTT_KEEPALIVE_EVENTID:
+ case WMI_10_2_GPIO_INPUT_EVENTID:
+ case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_2_GENERIC_BUFFER_EVENTID:
+ case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
+ case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
+ case WMI_10_2_WDS_PEER_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = ath10k_wmi_rx(ar, skb);
+ if (ret)
+ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
+}
+
+int ath10k_wmi_connect(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ar->wmi.eid = conn_resp.eid;
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
+ rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
+ id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->param_id = __cpu_to_le32(id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+ id, value);
+ return skb;
+}
+
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
+{
+ struct host_memory_chunk *chunk;
+ int i;
+
+ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ chunk = &chunks->items[i];
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+ }
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config config = {};
+ u32 len, val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
+ config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+ config.num_offload_reorder_bufs =
+ __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+ config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+ val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_2 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val, features;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_2 *)buf->data;
+
+ features = WMI_10_2_RX_BATCH_MODE;
+ cmd->resource_config.feature_mask = __cpu_to_le32(features);
+
+ memcpy(&cmd->resource_config.common, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
+ return buf;
+}
+
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
+{
+ if (arg->ie_len && !arg->ie)
+ return -EINVAL;
+ if (arg->n_channels && !arg->channels)
+ return -EINVAL;
+ if (arg->n_ssids && !arg->ssids)
+ return -EINVAL;
+ if (arg->n_bssids && !arg->bssids)
+ return -EINVAL;
+
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+ int len = 0;
+
+ if (arg->ie_len) {
+ len += sizeof(struct wmi_ie_data);
+ len += roundup(arg->ie_len, 4);
+ }
+
+ if (arg->n_channels) {
+ len += sizeof(struct wmi_chan_list);
+ len += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ len += sizeof(struct wmi_ssid_list);
+ len += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ len += sizeof(struct wmi_bssid_list);
+ len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ return len;
+}
+
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
+{
+ u32 scan_id;
+ u32 scan_req_id;
+
+ scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
+ scan_id |= arg->scan_id;
+
+ scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+ scan_req_id |= arg->scan_req_id;
+
+ cmn->scan_id = __cpu_to_le32(scan_id);
+ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmn->idle_time = __cpu_to_le32(arg->idle_time);
+ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ void *ptr = tlvs->tlvs;
+ int i;
+
+ if (arg->n_channels) {
+ channels = ptr;
+ channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+ channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++)
+ channels->channel_list[i].freq =
+ __cpu_to_le16(arg->channels[i]);
+
+ ptr += sizeof(*channels);
+ ptr += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ ssids = ptr;
+ ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+ ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids->ssids[i].ssid_len =
+ __cpu_to_le32(arg->ssids[i].len);
+ memcpy(&ssids->ssids[i].ssid,
+ arg->ssids[i].ssid,
+ arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*ssids);
+ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ bssids = ptr;
+ bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+ bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+ for (i = 0; i < arg->n_bssids; i++)
+ memcpy(&bssids->bssid_list[i],
+ arg->bssids[i].bssid,
+ ETH_ALEN);
+
+ ptr += sizeof(*bssids);
+ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ if (arg->ie_len) {
+ ie = ptr;
+ ie->tag = __cpu_to_le32(WMI_IE_TAG);
+ ie->ie_len = __cpu_to_le32(arg->ie_len);
+ memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*ie);
+ ptr += roundup(arg->ie_len, 4);
+ }
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_10x_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
+ return skb;
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+ struct wmi_start_scan_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+ | WMI_SCAN_EVENT_COMPLETED
+ | WMI_SCAN_EVENT_BSS_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+ | WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->n_bssids = 1;
+ arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+ arg->req_id, arg->req_type, arg->u.scan_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(type);
+ cmd->vdev_subtype = __cpu_to_le32(subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+ vdev_id, type, subtype, macaddr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev delete id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ const char *cmdname;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ if (restart)
+ cmdname = "restart";
+ else
+ cmdname = "start";
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+ cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
+ cmdname, arg->vdev_id,
+ flags, arg->channel.freq, arg->channel.mode,
+ cmd->chan.flags, arg->channel.max_power);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev down id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ if (arg->key_data)
+ memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer create vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, tid_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi set powersave id 0x%x mode %d\n",
+ vdev_id, psmode);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+ vdev_id, param_id, value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+ vdev_id, param_id, value, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel_arg *ch;
+ struct wmi_channel *ci;
+ int len;
+ int i;
+
+ len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-EINVAL);
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+ ci = &cmd->chan_info[i];
+
+ ath10k_wmi_put_wmi_channel(ci, ch);
+ }
+
+ return skb;
+}
+
+static void
+ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
+ cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
+ cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+
+ cmd->peer_legacy_rates.num_rates =
+ __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ cmd->peer_ht_rates.num_rates =
+ __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ cmd->peer_vht_rates.rx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ cmd->peer_vht_rates.rx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ cmd->peer_vht_rates.tx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ cmd->peer_vht_rates.tx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
+ int max_mcs, max_nss;
+ u32 info0;
+
+ /* TODO: Is using max values okay with firmware? */
+ max_mcs = 0xf;
+ max_nss = 0xf;
+
+ info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
+ SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ cmd->info0 = __cpu_to_le32(info0);
+}
+
+static int
+ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
+{
+ if (arg->peer_mpdu_density > 16)
+ return -EINVAL;
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
+ return skb;
+}
+
+/* This function assumes the beacon is already DMA mapped */
+static struct sk_buff *
+ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
+ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+ cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ return skb;
+}
+
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg)
+{
+ params->cwmin = __cpu_to_le32(arg->cwmin);
+ params->cwmax = __cpu_to_le32(arg->cwmax);
+ params->aifs = __cpu_to_le32(arg->aifs);
+ params->txop = __cpu_to_le32(arg->txop);
+ params->acm = __cpu_to_le32(arg->acm);
+ params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_pdev_set_wmm_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+ stats_mask);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ type, delay_ms);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ u32 log_level)
+{
+ struct wmi_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le32(module_enable);
+ cmd->module_valid = __cpu_to_le32(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+ __le32_to_cpu(cmd->module_enable),
+ __le32_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+ struct wmi_pdev_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ev_bitmap &= ATH10K_PKTLOG_ANY;
+
+ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
+ ev_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_pdev_set_quiet_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac)
+{
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->buffersize = __cpu_to_le32(buf_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->statuscode = __cpu_to_le32(status);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->initiator = __cpu_to_le32(initiator);
+ cmd->reasoncode = __cpu_to_le32(reason);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+ return skb;
+}
+
+static const struct wmi_ops wmi_ops = {
+ .rx = ath10k_wmi_op_rx,
+ .map_svc = wmi_main_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ /* .gen_pdev_get_temperature not implemented */
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+static const struct wmi_ops wmi_10_1_ops = {
+ .rx = ath10k_wmi_10_1_op_rx,
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_1_op_gen_init,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with main branch */
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+};
+
+static const struct wmi_ops wmi_10_2_4_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+};
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
+ ar->wmi.ops = &wmi_10_2_4_ops;
+ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ ar->wmi.ops = &wmi_10_2_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.ops = &wmi_10_1_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.ops = &wmi_ops;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ath10k_wmi_tlv_attach(ar);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "unsupported WMI op version: %d\n",
+ ar->wmi.op_version);
+ return -EINVAL;
+ }
+
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
+
+ ar->wmi.num_mem_chunks = 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 000000000..adf935bf0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,4948 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ * command/event structures. Do not use u8, u16, bool or
+ * enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ * using masks if necessary. Do not use the programming language's bit
+ * field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ * the u32 variables. Use these macros for set/get of these fields.
+ * Try to use this to optimize the structure without bloating it with
+ * u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ * variable is already 4-byte aligned by virtue of being a u32
+ * type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ * using the 2 stars at the begining of C comment instead of one star to
+ * enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB 0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB 24
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+
+enum wmi_service {
+ WMI_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_DFS,
+ WMI_SERVICE_11AC,
+ WMI_SERVICE_BLOCKACK,
+ WMI_SERVICE_PHYERR,
+ WMI_SERVICE_BCN_FILTER,
+ WMI_SERVICE_RTT,
+ WMI_SERVICE_RATECTRL,
+ WMI_SERVICE_WOW,
+ WMI_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_NLO,
+ WMI_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_SCAN_SCH,
+ WMI_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CHATTER,
+ WMI_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_GPIO,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_TX_ENCAP,
+ WMI_SERVICE_BURST,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_EARLY_RX,
+ WMI_SERVICE_STA_SMPS,
+ WMI_SERVICE_FWTEST,
+ WMI_SERVICE_STA_WMMAC,
+ WMI_SERVICE_TDLS,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_WLAN_HB,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_QPOWER,
+ WMI_SERVICE_PLMREQ,
+ WMI_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_RMC,
+ WMI_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_COEX_SAR,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_NAN,
+ WMI_SERVICE_L1SS_STAT,
+ WMI_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_LPASS,
+ WMI_SERVICE_EXTSCAN,
+ WMI_SERVICE_D0WOW,
+ WMI_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD,
+
+ /* keep last */
+ WMI_SERVICE_MAX,
+};
+
+enum wmi_10x_service {
+ WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10X_SERVICE_AP_UAPSD,
+ WMI_10X_SERVICE_AP_DFS,
+ WMI_10X_SERVICE_11AC,
+ WMI_10X_SERVICE_BLOCKACK,
+ WMI_10X_SERVICE_PHYERR,
+ WMI_10X_SERVICE_BCN_FILTER,
+ WMI_10X_SERVICE_RTT,
+ WMI_10X_SERVICE_RATECTRL,
+ WMI_10X_SERVICE_WOW,
+ WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_10X_SERVICE_BURST,
+
+ /* introduced in 10.2 */
+ WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+};
+
+enum wmi_main_service {
+ WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_MAIN_SERVICE_AP_DFS,
+ WMI_MAIN_SERVICE_11AC,
+ WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_MAIN_SERVICE_PHYERR,
+ WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_MAIN_SERVICE_RTT,
+ WMI_MAIN_SERVICE_RATECTRL,
+ WMI_MAIN_SERVICE_WOW,
+ WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_MAIN_SERVICE_NLO,
+ WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_MAIN_SERVICE_CHATTER,
+ WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_MAIN_SERVICE_GPIO,
+ WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_MAIN_SERVICE_TX_ENCAP,
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+#define SVCSTR(x) case x: return #x
+
+ switch (service_id) {
+ SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
+ SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_PWRSAVE);
+ SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ SVCSTR(WMI_SERVICE_AP_UAPSD);
+ SVCSTR(WMI_SERVICE_AP_DFS);
+ SVCSTR(WMI_SERVICE_11AC);
+ SVCSTR(WMI_SERVICE_BLOCKACK);
+ SVCSTR(WMI_SERVICE_PHYERR);
+ SVCSTR(WMI_SERVICE_BCN_FILTER);
+ SVCSTR(WMI_SERVICE_RTT);
+ SVCSTR(WMI_SERVICE_RATECTRL);
+ SVCSTR(WMI_SERVICE_WOW);
+ SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
+ SVCSTR(WMI_SERVICE_IRAM_TIDS);
+ SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_NLO);
+ SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_SCH);
+ SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
+ SVCSTR(WMI_SERVICE_CHATTER);
+ SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
+ SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
+ SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
+ SVCSTR(WMI_SERVICE_GPIO);
+ SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
+ SVCSTR(WMI_SERVICE_TX_ENCAP);
+ SVCSTR(WMI_SERVICE_BURST);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
+ SVCSTR(WMI_SERVICE_EARLY_RX);
+ SVCSTR(WMI_SERVICE_STA_SMPS);
+ SVCSTR(WMI_SERVICE_FWTEST);
+ SVCSTR(WMI_SERVICE_STA_WMMAC);
+ SVCSTR(WMI_SERVICE_TDLS);
+ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
+ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
+ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
+ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
+ SVCSTR(WMI_SERVICE_WLAN_HB);
+ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
+ SVCSTR(WMI_SERVICE_BATCH_SCAN);
+ SVCSTR(WMI_SERVICE_QPOWER);
+ SVCSTR(WMI_SERVICE_PLMREQ);
+ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
+ SVCSTR(WMI_SERVICE_RMC);
+ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
+ SVCSTR(WMI_SERVICE_COEX_SAR);
+ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
+ SVCSTR(WMI_SERVICE_NAN);
+ SVCSTR(WMI_SERVICE_L1SS_STAT);
+ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
+ SVCSTR(WMI_SERVICE_OBSS_SCAN);
+ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
+ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
+ SVCSTR(WMI_SERVICE_LPASS);
+ SVCSTR(WMI_SERVICE_EXTSCAN);
+ SVCSTR(WMI_SERVICE_D0WOW);
+ SVCSTR(WMI_SERVICE_HSOFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
+ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
+ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
+ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ default:
+ return NULL;
+ }
+
+#undef SVCSTR
+}
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+ BIT((svc_id)%(sizeof(u32))))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10X_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10X_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10X_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10X_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10X_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10X_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10X_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+}
+
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_MAIN_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_MAIN_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_MAIN_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_MAIN_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+}
+
+#undef SVCMAP
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+ u32 pdev_get_temperature_cmdid;
+ u32 vdev_set_wmm_params_cmdid;
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV,
+ WMI_GRP_VDEV,
+ WMI_GRP_PEER,
+ WMI_GRP_MGMT,
+ WMI_GRP_BA_NEG,
+ WMI_GRP_STA_PS,
+ WMI_GRP_DFS,
+ WMI_GRP_ROAM,
+ WMI_GRP_OFL_SCAN,
+ WMI_GRP_P2P,
+ WMI_GRP_AP_PS,
+ WMI_GRP_RATE_CTRL,
+ WMI_GRP_PROFILE,
+ WMI_GRP_SUSPEND,
+ WMI_GRP_BCN_FILTER,
+ WMI_GRP_WOW,
+ WMI_GRP_RTT,
+ WMI_GRP_SPECTRAL,
+ WMI_GRP_STATS,
+ WMI_GRP_ARP_NS_OFL,
+ WMI_GRP_NLO_OFL,
+ WMI_GRP_GTK_OFL,
+ WMI_GRP_CSA_OFL,
+ WMI_GRP_CHATTER,
+ WMI_GRP_TID_ADDBA,
+ WMI_GRP_MISC,
+ WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
+enum wmi_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+
+ /* Scan specific commands */
+ WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+ /* PDEV (physical device) specific commands */
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+ /* VDEV (virtual device) specific commands */
+ WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+ WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+
+ /* commands to directly control BA negotiation directly from host. */
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+
+ /** DFS-specific commands */
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+
+ /* Roaming specific commands */
+ WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+
+ /* offload scan specific commands */
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+ /* AP power save specific config */
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_PEER_RATE_RETRY_SCHED_CMDID =
+ WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+ /* WLAN Profiling commands. */
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+
+ /* spectral scan commands */
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+ /* F/W stats */
+ WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+ /* ARP OFFLOAD REQUEST*/
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+ /* NS offload confid*/
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+ /* GTK offload Specific WMI commands*/
+ WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+ /* CSA offload Specific WMI commands*/
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+ /* Chatter commands*/
+ WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+ /* addba specific commands */
+ WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+
+ /* set station mimo powersave method */
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ /* Configure the Station UAPSD AC Auto Trigger Parameters */
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+ /* STA Keep alive parameter configuration,
+ Requires WMI_SERVICE_STA_KEEP_ALIVE */
+ WMI_STA_KEEPALIVE_CMD,
+
+ /* misc command group */
+ WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+
+ /* GPIO Configuration */
+ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+
+ /* Scan specific events */
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+ /* PDEV specific events */
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+
+ /* VDEV specific events */
+ WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+ /* peer specific events */
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+ /* beacon/mgmt specific events */
+ WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+ /* ADDBA Related WMI Events*/
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+
+ /* WoW */
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+ /* RTT */
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+
+ /* GTK offload */
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+
+ /* CSA IE received event */
+ WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+ /* Misc events */
+ WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+ /* GPIO Event */
+ WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
+enum wmi_10_2_cmd_id {
+ WMI_10_2_START_CMDID = 0x9000,
+ WMI_10_2_END_CMDID = 0x9FFF,
+ WMI_10_2_INIT_CMDID,
+ WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
+ WMI_10_2_STOP_SCAN_CMDID,
+ WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ WMI_10_2_ECHO_CMDID,
+ WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_2_PDEV_SET_PARAM_CMDID,
+ WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_2_VDEV_CREATE_CMDID,
+ WMI_10_2_VDEV_DELETE_CMDID,
+ WMI_10_2_VDEV_START_REQUEST_CMDID,
+ WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_2_VDEV_UP_CMDID,
+ WMI_10_2_VDEV_STOP_CMDID,
+ WMI_10_2_VDEV_DOWN_CMDID,
+ WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_2_VDEV_SET_PARAM_CMDID,
+ WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_2_PEER_CREATE_CMDID,
+ WMI_10_2_PEER_DELETE_CMDID,
+ WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_2_PEER_SET_PARAM_CMDID,
+ WMI_10_2_PEER_ASSOC_CMDID,
+ WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ WMI_10_2_BCN_TX_CMDID,
+ WMI_10_2_BCN_PRB_TMPL_CMDID,
+ WMI_10_2_BCN_FILTER_RX_CMDID,
+ WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_2_MGMT_TX_CMDID,
+ WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_2_ADDBA_SEND_CMDID,
+ WMI_10_2_ADDBA_STATUS_CMDID,
+ WMI_10_2_DELBA_SEND_CMDID,
+ WMI_10_2_ADDBA_SET_RESP_CMDID,
+ WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_2_DBGLOG_CFG_CMDID,
+ WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_2_PDEV_QVIT_CMDID,
+ WMI_10_2_ROAM_SCAN_MODE,
+ WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_2_ROAM_SCAN_PERIOD,
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_2_ROAM_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_PERIOD,
+ WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_2_P2P_GO_SET_BEACON_IE,
+ WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_2_PDEV_SUSPEND_CMDID,
+ WMI_10_2_PDEV_RESUME_CMDID,
+ WMI_10_2_ADD_BCN_FILTER_CMDID,
+ WMI_10_2_RMV_BCN_FILTER_CMDID,
+ WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_2_WOW_ENABLE_CMDID,
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_2_RTT_MEASREQ_CMDID,
+ WMI_10_2_RTT_TSF_CMDID,
+ WMI_10_2_RTT_KEEPALIVE_CMDID,
+ WMI_10_2_PDEV_SEND_BCN_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_2_REQUEST_STATS_CMDID,
+ WMI_10_2_GPIO_CONFIG_CMDID,
+ WMI_10_2_GPIO_OUTPUT_CMDID,
+ WMI_10_2_VDEV_RATEMASK_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_2_FORCE_FW_HANG_CMDID,
+ WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_2_PDEV_GET_INFO,
+ WMI_10_2_VDEV_GET_INFO,
+ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_2_PEER_ATF_REQUEST_CMDID,
+ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
+};
+
+enum wmi_10_2_event_id {
+ WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_2_READY_EVENTID,
+ WMI_10_2_DEBUG_MESG_EVENTID,
+ WMI_10_2_START_EVENTID = 0x9000,
+ WMI_10_2_END_EVENTID = 0x9FFF,
+ WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
+ WMI_10_2_ECHO_EVENTID,
+ WMI_10_2_UPDATE_STATS_EVENTID,
+ WMI_10_2_INST_RSSI_STATS_EVENTID,
+ WMI_10_2_VDEV_START_RESP_EVENTID,
+ WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_2_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_2_VDEV_STOPPED_EVENTID,
+ WMI_10_2_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_2_HOST_SWBA_EVENTID,
+ WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_2_MGMT_RX_EVENTID,
+ WMI_10_2_CHAN_INFO_EVENTID,
+ WMI_10_2_PHYERR_EVENTID,
+ WMI_10_2_ROAM_EVENTID,
+ WMI_10_2_PROFILE_MATCH,
+ WMI_10_2_DEBUG_PRINT_EVENTID,
+ WMI_10_2_PDEV_QVIT_EVENTID,
+ WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_2_RTT_KEEPALIVE_EVENTID,
+ WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_2_DCS_INTERFERENCE_EVENTID,
+ WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_2_GPIO_INPUT_EVENTID,
+ WMI_10_2_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_2_GENERIC_BUFFER_EVENTID,
+ WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_2_WDS_PEER_EVENTID,
+ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0, /* 11a Mode */
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4, /* 11a HT20 mode */
+ MODE_11NG_HT20 = 5, /* 11g HT20 mode */
+ MODE_11NA_HT40 = 6, /* 11a HT40 mode */
+ MODE_11NG_HT40 = 7, /* 11g HT40 mode */
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ /* MODE_11AC_VHT160 = 11, */
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_UNKNOWN = 14,
+ MODE_MAX = 14
+};
+
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled */
+ };
+
+ return "<unknown>";
+}
+
+#define WMI_CHAN_LIST_TAG 0x1
+#define WMI_SSID_LIST_TAG 0x2
+#define WMI_BSSID_LIST_TAG 0x3
+#define WMI_IE_TAG 0x4
+
+struct wmi_channel {
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+ union {
+ __le32 flags; /* WMI_CHAN_FLAG_ */
+ struct {
+ u8 mode; /* only 6 LSBs */
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo0;
+ struct {
+ /* note: power unit is 0.5 dBm */
+ u8 min_power;
+ u8 max_power;
+ u8 reg_power;
+ u8 reg_classid;
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo1;
+ struct {
+ u8 antenna_max;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ /* note: power unit is 0.5 dBm */
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ u32 reg_class_id;
+ enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+ WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+ WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9)
+#define WMI_CHAN_FLAG_DFS (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16
+
+enum {
+ REGDMN_MODE_11A = 0x00001, /* 11a channels */
+ REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */
+ REGDMN_MODE_11B = 0x00004, /* 11b channels */
+ REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */
+ REGDMN_MODE_11G = 0x00008, /* XXX historical */
+ REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */
+ REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */
+ REGDMN_MODE_XR = 0x00100, /* XR channels */
+ REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */
+ REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+ REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */
+ REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */
+ REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */
+ REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */
+ REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */
+ REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */
+ REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */
+ REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
+ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
+ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
+ REGDMN_MODE_ALL = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+struct hal_reg_capabilities {
+ /* regdomain value specified in EEPROM */
+ __le32 eeprom_rd;
+ /*regdomain */
+ __le32 eeprom_rd_ext;
+ /* CAP1 capabilities bit map. */
+ __le32 regcap1;
+ /* REGDMN EEPROM CAP. */
+ __le32 regcap2;
+ /* REGDMN MODE */
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+ WHAL_WLAN_11A_CAPABILITY = 0x1,
+ WHAL_WLAN_11G_CAPABILITY = 0x2,
+ WHAL_WLAN_11AG_CAPABILITY = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+ /* ID of the request */
+ __le32 req_id;
+ /* size of the of each unit */
+ __le32 unit_size;
+ /* flags to indicate that
+ * the number units is dependent
+ * on number of resources(num vdevs num peers .. etc)
+ */
+ __le32 num_unit_info;
+ /*
+ * actual number of units to allocate . if flags in the num_unit_info
+ * indicate that number of units is tied to number of a particular
+ * resource to allocate then num_units filed is set to 0 and host
+ * will derive the number units from number of the resources it is
+ * requesting.
+ */
+ __le32 num_units;
+} __packed;
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+ __le32 sw_version;
+ __le32 sw_version_1;
+ __le32 abi_version;
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ struct hal_reg_capabilities hal_reg_capabilities;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+ /*
+ * Max beacon and Probe Response IE offload size
+ * (includes optional P2P IEs)
+ */
+ __le32 max_bcn_ie_size;
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+ struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_service_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /*
+ * In offload mode target supports features like WOW, chatter and
+ * other protocol offloads. In order to support them some
+ * functionalities like reorder buffering, PN checking need to be
+ * done in target. This determines maximum number of peers suported
+ * by target in offload mode
+ */
+ __le32 num_offload_peers;
+
+ /* For target-based RX reordering */
+ __le32 num_offload_reorder_bufs;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* maximum VDEV that could use GTK offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overriden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+enum wmi_10_2_feature_mask {
+ WMI_10_2_RX_BATCH_MODE = BIT(0),
+ WMI_10_2_ATF_CONFIG = BIT(1),
+};
+
+struct wmi_resource_config_10_2 {
+ struct wmi_resource_config_10x common;
+ __le32 max_peer_ext_stats;
+ __le32 smart_ant_cap; /* 0-disable, 1-enable */
+ __le32 bk_min_free;
+ __le32 be_min_free;
+ __le32 vi_min_free;
+ __le32 vo_min_free;
+ __le32 feature_mask;
+} __packed;
+
+#define NUM_UNITS_IS_NUM_VDEVS 0x1
+#define NUM_UNITS_IS_NUM_PEERS 0x2
+
+/* strucutre describing host memory chunk. */
+struct host_memory_chunk {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+ /* the physical address the memory chunk */
+ __le32 ptr;
+ /* size of the chunk */
+ __le32 size;
+} __packed;
+
+struct wmi_host_mem_chunks {
+ __le32 count;
+ /* some fw revisions require at least 1 chunk regardless of count */
+ struct host_memory_chunk items[1];
+} __packed;
+
+struct wmi_init_cmd {
+ struct wmi_resource_config resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_2 {
+ struct wmi_resource_config_10_2 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_chan_list_entry {
+ __le16 freq;
+ u8 phy_mode; /* valid for 10.2 only */
+ u8 reserved;
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+ __le32 tag; /* WMI_CHAN_LIST_TAG */
+ __le32 num_chan;
+ struct wmi_chan_list_entry channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+ __le32 tag; /* WMI_BSSID_LIST_TAG */
+ __le32 num_bssid;
+ struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+ __le32 tag; /* WMI_IE_TAG */
+ __le32 ie_len;
+ u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+ __le32 ssid_len;
+ u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+ __le32 tag; /* WMI_SSID_LIST_TAG */
+ __le32 num_ssids;
+ struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+struct wmi_start_scan_common {
+ /* Scan ID */
+ __le32 scan_id;
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+} __packed;
+
+struct wmi_start_scan_tlvs {
+ /* TLV parameters. These includes channel list, ssid list, bssid list,
+ * extra ies.
+ */
+ u8 tlvs[0];
+} __packed;
+
+struct wmi_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u16 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that. */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+enum wmi_stop_scan_type {
+ WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
+ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
+ WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+ u32 req_id;
+ enum wmi_stop_scan_type req_type;
+ union {
+ u32 scan_id;
+ u32 vdev_id;
+ } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+ struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+ u32 n_channels;
+ struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+ WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */
+ WMI_BSS_FILTER_ALL, /* all beacons forwarded */
+ WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */
+ WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+ WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */
+ WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */
+ WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */
+ WMI_BSS_FILTER_LAST_BSS, /* marker only */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = 0x1,
+ WMI_SCAN_EVENT_COMPLETED = 0x2,
+ WMI_SCAN_EVENT_BSS_CHANNEL = 0x4,
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
+ WMI_SCAN_EVENT_DEQUEUED = 0x10,
+ WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_START_FAILED = 0x40,
+ WMI_SCAN_EVENT_RESTARTED = 0x80,
+ WMI_SCAN_EVENT_MAX = 0x8000
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM 52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr_v1 {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
+ u8 buf[0];
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define PHY_ERROR_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_RADAR 0x05
+
+struct wmi_phyerr {
+ __le32 tsf_timestamp;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 buf_len;
+ u8 buf[0];
+} __packed;
+
+struct wmi_phyerr_event {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ struct wmi_phyerr phyerrs[0];
+} __packed;
+
+#define PHYERR_TLV_SIG 0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
+#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
+
+struct phyerr_radar_report {
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
+ __le32 reg1; /* REDAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0
+
+struct phyerr_fft_report {
+ __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+ __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
+
+struct phyerr_tlv {
+ __le16 len;
+ u8 tag;
+ u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE 50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40
+
+struct wmi_mgmt_tx_hdr {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+ struct wmi_mgmt_tx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+ __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+} __packed;
+
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+ /* period in TUs */
+ __le32 period;
+
+ /* duration in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+
+ /* enable/disable */
+ __le32 enabled;
+} __packed;
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+ ATH10K_PROT_NONE = 0, /* no protection */
+ ATH10K_PROT_CTSONLY = 1, /* CTS to self */
+ ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
+};
+
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+ WMI_CSA_IE_PRESENT = 0x00000001,
+ WMI_XCSA_IE_PRESENT = 0x00000002,
+ WMI_WBW_IE_PRESENT = 0x00000004,
+ WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+ __le32 i_fc_dur;
+ /* Bit 0-15: FC */
+ /* Bit 16-31: DUR */
+ struct wmi_mac_addr i_addr1;
+ struct wmi_mac_addr i_addr2;
+ __le32 csa_ie[2];
+ __le32 xcsa_ie[2];
+ __le32 wb_ie[2];
+ __le32 cswarp_ie;
+ __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+ u32 cal_period;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_pdev_param {
+ /* TX chain mask */
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chain mask */
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ /*
+ * Dynamic bandwidth - 0: disable, 1: enable
+ *
+ * When enabled HW rate control tries different bandwidths when
+ * retransmitting frames.
+ */
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ /* RX buffering flush enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ /* RX buffering matermark */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ /* RX buffering timeout enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ /* RX buffering timeout value */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ /* pdev level stats update period in ms */
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP frames are sent */
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable proxy STA */
+ WMI_PDEV_PARAM_PROXY_STA,
+ /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ /* Enable/Disable power gating sleep */
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10X_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10X_PDEV_PARAM_RX_FILTER,
+ WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
+ WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_PDEV_PARAM_CAL_PERIOD
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+/* valid period is 1 ~ 60000ms, unit in millisecond */
+#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
+
+struct wmi_pdev_get_tpc_config_cmd {
+ /* parameter */
+ __le32 param;
+} __packed;
+
+#define WMI_TPC_RATE_MAX 160
+#define WMI_TPC_TX_N_CHAIN 4
+
+enum wmi_tpc_config_event_flag {
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+ WMI_TP_SCALE_MAX = 0, /* no scaling (default) */
+ WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */
+ WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */
+ WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */
+ WMI_TP_SCALE_MIN = 4, /* min, but still on */
+ WMI_TP_SCALE_SIZE = 5, /* max num of enum */
+};
+
+struct wmi_pdev_chanlist_update_event {
+ /* number of channels */
+ __le32 num_chan;
+ /* array of channels */
+ struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+ /* message buffer, NULL terminated */
+ char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+ /* P2P device */
+ VDEV_SUBTYPE_P2PDEV = 0,
+ /* P2P client */
+ VDEV_SUBTYPE_P2PCLI,
+ /* P2P GO */
+ VDEV_SUBTYPE_P2PGO,
+ /* BT3.0 HS */
+ VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+ /* idnore power , only use flags , mode and freq */
+ struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_pktlog_enable_cmd {
+ __le32 ev_bitmap;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+ /* map indicating DSCP to TID conversion */
+ __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+ WMI_SET_MCAST_RATE,
+ WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+ enum mcast_bcast_rate_id rate_id;
+ __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txop;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+ struct wmi_wmm_params ac_be;
+ struct wmi_wmm_params ac_bk;
+ struct wmi_wmm_params ac_vi;
+ struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txop;
+ u32 acm;
+ u32 no_ack;
+};
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats_peer {
+ /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+ __le32 dummy;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_STAT_PEER = BIT(0),
+ WMI_STAT_AP = BIT(1),
+ WMI_STAT_PDEV = BIT(2),
+ WMI_STAT_VDEV = BIT(3),
+ WMI_STAT_BCNFLT = BIT(4),
+ WMI_STAT_VDEV_RATE = BIT(5),
+};
+
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
+struct wmi_request_stats_cmd {
+ __le32 stats_id;
+
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
+} __packed;
+
+/* Suspend option */
+enum {
+ /* suspend */
+ WMI_PDEV_SUSPEND,
+
+ /* suspend and disable all interrupts */
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+ /* suspend option sent to target */
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+ __le32 stats_id; /* WMI_STAT_ */
+ /*
+ * number of pdev stats event structures
+ * (wmi_pdev_stats) 0 or 1
+ */
+ __le32 num_pdev_stats;
+ /*
+ * number of vdev stats event structures
+ * (wmi_vdev_stats) 0 or max vdevs
+ */
+ __le32 num_vdev_stats;
+ /*
+ * number of peer stats event structures
+ * (wmi_peer_stats) 0 or max peers
+ */
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ /*
+ * followed by
+ * num_pdev_stats * size of(struct wmi_pdev_stats)
+ * num_vdev_stats * size of(struct wmi_vdev_stats)
+ * num_peer_stats * size of(struct wmi_peer_stats)
+ *
+ * By having a zero sized array, the pointer to data area
+ * becomes available without increasing the struct size
+ */
+ u8 data[0];
+} __packed;
+
+struct wmi_10_2_stats_event {
+ __le32 stats_id; /* %WMI_REQUEST_ */
+ __le32 num_pdev_stats;
+ __le32 num_pdev_ext_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats_base {
+ __le32 chan_nf;
+ __le32 tx_frame_count;
+ __le32 rx_frame_count;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
+} __packed;
+
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_pdev_stats_mem {
+ __le32 dram_free;
+ __le32 iram_free;
+} __packed;
+
+struct wmi_10_2_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ __le32 mc_drop;
+ struct wmi_pdev_stats_rx rx;
+ __le32 pdev_rx_timeout;
+ struct wmi_pdev_stats_mem mem;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+/*
+ * VDEV statistics
+ * TODO: add all VDEV stats here
+ */
+struct wmi_vdev_stats {
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_10x_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+} __packed;
+
+struct wmi_10_2_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+} __packed;
+
+struct wmi_10_2_4_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 unknown_value; /* FIXME: what is this word? */
+} __packed;
+
+struct wmi_10_2_pdev_ext_stats {
+ __le32 rx_rssi_comb;
+ __le32 rx_rssi[4];
+ __le32 rx_mcs[10];
+ __le32 tx_mcs[10];
+ __le32 ack_rssi;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE = 0,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
+ WMI_VDEV_SUBTYPE_P2P_GO = 3,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ * AP/GO */
+#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
+/*
+ * Indicates if robust management frame/management frame
+ * protection is enabled. For GO/AP vdevs, it indicates that
+ * it may support station/client associations with RMF enabled.
+ * For STA/client vdevs, it indicates that sta will
+ * associate with AP with RMF enabled. */
+#define WMI_VDEV_START_PMF_ENABLED (1<<1)
+
+struct wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+ /* WMI channel */
+ struct wmi_channel chan;
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* requestor id identifying the caller module */
+ __le32 requestor_id;
+ /* beacon interval from received beacon */
+ __le32 beacon_interval;
+ /* DTIM Period from the received beacon */
+ __le32 dtim_period;
+ /* Flags */
+ __le32 flags;
+ /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+ struct wmi_ssid ssid;
+ /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ __le32 bcn_tx_rate;
+ /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ __le32 bcn_tx_power;
+ /* number of p2p NOA descriptor(s) from scan entry */
+ __le32 num_noa_descriptors;
+ /*
+ * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+ * During CAC, Our HW shouldn't ack ditected frames
+ */
+ __le32 disable_hw_ack;
+ /* actual p2p NOA descriptor from scan entry */
+ struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+ struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+ __le32 key_seq_counter_l;
+ __le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+
+struct wmi_vdev_install_key_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher; /* %WMI_CIPHER_ */
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+
+ /* contains key followed by tx mic followed by rx mic */
+ u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ const void *key_data;
+};
+
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ * HT/VHT: MCS index
+ */
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xff)
+
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+ /* RTS Threshold */
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* BMISS first time */
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ /* BMISS final time */
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ /* WMM enables/disabled */
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ /* Enable/Disable RTS-CTS */
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ /* Enable TXBFee/er */
+ WMI_VDEV_PARAM_TXBF,
+
+ /* Set packet power save */
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+ /*
+ * Drops un-encrypted packets if eceived in an encrypted connection
+ * otherwise forwards to host.
+ */
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+
+ /*
+ * Set the encapsulation type for frames.
+ */
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* muticast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10X_VDEV_PARAM_MFPTEST_SET,
+ WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+};
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_start_event_param {
+ WMI_VDEV_RESP_START_EVENT = 0,
+ WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV succesfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
+
+/* TODO: please add more comments if you have in-depth information */
+struct wmi_vdev_spectral_conf_cmd {
+ __le32 vdev_id;
+
+ /* number of fft samples to send (0 for infinite) */
+ __le32 scan_count;
+ __le32 scan_period;
+ __le32 scan_priority;
+
+ /* number of bins in the FFT: 2^(fft_size - bin_scale) */
+ __le32 scan_fft_size;
+ __le32 scan_gc_ena;
+ __le32 scan_restart_ena;
+ __le32 scan_noise_floor_ref;
+ __le32 scan_init_delay;
+ __le32 scan_nb_tone_thr;
+ __le32 scan_str_bin_thr;
+ __le32 scan_wb_rpt_mode;
+ __le32 scan_rssi_rpt_mode;
+ __le32 scan_rssi_thr;
+ __le32 scan_pwr_format;
+
+ /* rpt_mode: Format of FFT report to software for spectral scan
+ * triggered FFTs:
+ * 0: No FFT report (only spectral scan summary report)
+ * 1: 2-dword summary of metrics for each completed FFT + spectral
+ * scan summary report
+ * 2: 2-dword summary of metrics for each completed FFT +
+ * 1x- oversampled bins(in-band) per FFT + spectral scan summary
+ * report
+ * 3: 2-dword summary of metrics for each completed FFT +
+ * 2x- oversampled bins (all) per FFT + spectral scan summary
+ */
+ __le32 scan_rpt_mode;
+ __le32 scan_bin_scale;
+ __le32 scan_dbm_adj;
+ __le32 scan_chn_mask;
+} __packed;
+
+struct wmi_vdev_spectral_conf_arg {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+};
+
+#define WMI_SPECTRAL_ENABLE_DEFAULT 0
+#define WMI_SPECTRAL_COUNT_DEFAULT 0
+#define WMI_SPECTRAL_PERIOD_DEFAULT 35
+#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct wmi_vdev_spectral_enable_cmd {
+ __le32 vdev_id;
+ __le32 trigger_cmd;
+ __le32 enable_cmd;
+} __packed;
+
+#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+ __le32 vdev_id;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+ struct wmi_bcn_tx_hdr hdr;
+ u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+ u32 vdev_id;
+ u32 tx_rate;
+ u32 tx_power;
+ u32 bcn_len;
+ const void *bcn;
+};
+
+enum wmi_bcn_tx_ref_flags {
+ WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+/* TODO: It is unclear why "no antenna" works while any other seemingly valid
+ * chainmask yields no beacons on the air at all.
+ */
+#define WMI_BCN_TX_REF_DEF_ANTENNA 0
+
+struct wmi_bcn_tx_ref_cmd {
+ __le32 vdev_id;
+ __le32 data_len;
+ /* physical address of the frame - dma pointer */
+ __le32 data_ptr;
+ /* id for host to track */
+ __le32 msdu_id;
+ /* frame ctrl to setup PPDU desc */
+ __le32 frame_control;
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+ __le32 flags;
+ /* introduced in 10.2 */
+ __le32 antenna_mask;
+} __packed;
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+ /* Filter ID */
+ __le32 bcn_filter_id;
+ /* Filter type - wmi_bcn_filter */
+ __le32 bcn_filter;
+ /* Buffer len */
+ __le32 bcn_filter_len;
+ /* Filter info (threshold, BSSID, RSSI) */
+ u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+ /* Capabilities */
+ __le32 caps;
+ /* ERP info */
+ __le32 erp;
+ /* Advanced capabilities */
+ /* HT capabilities */
+ /* HT Info */
+ /* ibss_dfs */
+ /* wpa Info */
+ /* rsn Info */
+ /* rrm info */
+ /* ath_ext */
+ /* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* TIM IE offset from the beginning of the template. */
+ __le32 tim_ie_offset;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* variable length data */
+ u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* Variable length data */
+ u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+ /* enable power save for the given STA VDEV */
+ WMI_STA_PS_MODE_DISABLED = 0,
+ /* disable power save for a given STA VDEV */
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /*
+ * Power save mode
+ * (see enum wmi_sta_ps_mode)
+ */
+ __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+ WMI_CSA_OFFLOAD_DISABLE = 0,
+ WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+ __le32 vdev_id;
+ __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+ __le32 vdev_id;
+ struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+ /*
+ * Wake up when ever there is an RX activity on the VDEV. In this mode
+ * the Power save SM(state machine) will come out of sleep by either
+ * sending null frame (or) a data frame (with PS==0) in response to TIM
+ * bit set in the received beacon frame from AP.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+ /*
+ * Here the power save state machine will not wakeup in response to TIM
+ * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+ * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all
+ * access categories are delivery-enabled, the station will send a
+ * UAPSD trigger frame, otherwise it will send a PS-Poll.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time. It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /*
+ * Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /*
+ * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+
+ /* When u-APSD is enabled the firmware will be very reluctant to exit
+ * STA PS. This could result in very poor Rx performance with STA doing
+ * PS-Poll for each and every buffered frame. This value is a bit
+ * arbitrary.
+ */
+ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ __le32 wmm_ac;
+ __le32 user_priority;
+ __le32 service_interval;
+ __le32 suspend_interval;
+ __le32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_powersave_param {
+ /*
+ * Controls how frames are retrievd from AP while STA is sleeping
+ *
+ * (see enum wmi_sta_ps_param_rx_wake_policy)
+ */
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+ /*
+ * The STA will go active after this many TX
+ *
+ * (see enum wmi_sta_ps_param_tx_wake_threshold)
+ */
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+ /*
+ * Number of PS-Poll to send before STA wakes up
+ *
+ * (see enum wmi_sta_ps_param_pspoll_count)
+ *
+ */
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+ /*
+ * TX/RX inactivity time in msec before going to sleep.
+ *
+ * The power save SM will monitor tx/rx activity on the VDEV, if no
+ * activity for the specified msec of the parameter the Power save
+ * SM will go to sleep.
+ */
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+ /*
+ * Set uapsd configuration.
+ *
+ * (see enum wmi_sta_ps_param_uapsd)
+ */
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+ __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* mimo powersave mode as defined above */
+ __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+ /* Set uapsd configuration for a given peer.
+ *
+ * Include the delivery and trigger enabled state for every AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /*
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /* Time in seconds for aging out buffered frames for STA in PS */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+ __le32 param_id;
+
+ /* AP powersave param value */
+ __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1
+#define WMI_P2P_NOA_CHANGED_BIT BIT(0)
+
+struct wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ Bits 7-1 - Reserved */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ Bits 1-7 - Ctwindow in TUs */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+ struct wmi_tim_info tim_info;
+ struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_bcn_info bcn_info[0];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+ __le32 vdev_map;
+ __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+struct wmi_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+ /*
+ * rate mode . 0: disable fixed rate (auto rate)
+ * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps
+ * 2: ht20 11n rate specified as mcs index
+ * 3: ht40 11n rate specified as mcs index
+ */
+ __le32 rate_mode;
+ /*
+ * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+ * and series 3 is stored at byte 3 (MSB)
+ */
+ __le32 rate_series;
+ /*
+ * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+ * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+ * (MSB)
+ */
+ __le32 rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* fixed rate */
+ struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID 17
+
+struct wmi_addba_clear_resp_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Buffer/Window size*/
+ __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Is Initiator */
+ __le32 initiator;
+ /* Reason code */
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* status code */
+ __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+};
+
+enum wmi_peer_param {
+ WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_PEER_AMPDU = 0x2,
+ WMI_PEER_AUTHORIZE = 0x3,
+ WMI_PEER_CHAN_WIDTH = 0x4,
+ WMI_PEER_NSS = 0x5,
+ WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
+};
+
+struct wmi_peer_set_param_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+ /* total number of rates */
+ __le32 num_rates;
+ /*
+ * rates (each 8bit value) packed into a 32 bit word.
+ * the rates are filled from least significant byte to most
+ * significant byte.
+ */
+ __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+} __packed;
+
+struct wmi_rate_set_arg {
+ unsigned int num_rates;
+ u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+ __le32 rx_max_rate; /* Max Rx data rate */
+ __le32 rx_mcs_set; /* Negotiated RX VHT rates */
+ __le32 tx_max_rate; /* Max Tx data rate */
+ __le32 tx_mcs_set; /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* legacy rate set */
+ struct wmi_rate_set peer_legacy_rates;
+ /* ht rate set */
+ struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 callback_enable;
+} __packed;
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_VHT_2G 0x08000000
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG 0x01
+#define WMI_RC_CW40_FLAG 0x02
+#define WMI_RC_SGI_FLAG 0x04
+#define WMI_RC_HT_FLAG 0x08
+#define WMI_RC_RTSCTS_FLAG 0x10
+#define WMI_RC_TX_STBC_FLAG 0x20
+#define WMI_RC_RX_STBC_FLAG 0xC0
+#define WMI_RC_RX_STBC_FLAG_S 6
+#define WMI_RC_WEP_TKIP_FLAG 0x100
+#define WMI_RC_TS_FLAG 0x200
+#define WMI_RC_UAPSD_FLAG 0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_common_peer_assoc_complete_cmd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+ __le32 peer_associd; /* 16 LSBs */
+ __le32 peer_flags;
+ __le32 peer_caps; /* 16 LSBs */
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density; /* 0..16 */
+ __le32 peer_rate_caps;
+ struct wmi_rate_set peer_legacy_rates;
+ struct wmi_rate_set peer_ht_rates;
+ __le32 peer_nss; /* num of spatial streams */
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ struct wmi_vht_rate_set peer_vht_rates;
+};
+
+struct wmi_main_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+
+ /* HT Operation Element of the peer. Five bytes packed in 2
+ * INT32 array and filled from lsb to msb. */
+ __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_10_1_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+} __packed;
+
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
+
+struct wmi_10_2_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+ __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+ u8 addr[ETH_ALEN];
+ u32 vdev_id;
+ bool peer_reassoc;
+ u16 peer_aid;
+ u32 peer_flags; /* see %WMI_PEER_ */
+ u16 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density; /* 0..16 */
+ u32 peer_rate_caps; /* see %WMI_RC_ */
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 peer_num_spatial_streams;
+ u32 peer_vht_caps;
+ enum wmi_phy_mode peer_phymode;
+ struct wmi_vht_rate_set_arg peer_vht_rates;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+} __packed;
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+
+/* FIXME: empirically extrapolated */
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES 256
+#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcnsdropped;
+ __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 activefilters;
+ struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+ u32 vdev_id;
+ u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+/* Firmware crashes if keepalive interval exceeds this limit */
+#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+ struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_force_fw_hang_type {
+ WMI_FORCE_FW_HANG_ASSERT = 1,
+ WMI_FORCE_FW_HANG_NO_DETECT,
+ WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+ WMI_FORCE_FW_HANG_EMPTY_POINT,
+ WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+ WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+ __le32 type;
+ __le32 delay_ms;
+} __packed;
+
+enum ath10k_dbglog_level {
+ ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+ ATH10K_DBGLOG_LEVEL_INFO = 1,
+ ATH10K_DBGLOG_LEVEL_WARN = 2,
+ ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le32 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le32 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
+#define ATH10K_FRAGMT_THRESHOLD_MIN 540
+#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_scan_ev_arg {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+};
+
+struct wmi_mgmt_rx_ev_arg {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+};
+
+struct wmi_ch_info_ev_arg {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+};
+
+struct wmi_vdev_start_ev_arg {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+};
+
+struct wmi_peer_kick_ev_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_swba_ev_arg {
+ __le32 vdev_map;
+ const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
+};
+
+struct wmi_phyerr_ev_arg {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+ const struct wmi_phyerr *phyerrs;
+};
+
+struct wmi_svc_rdy_ev_arg {
+ __le32 min_tx_power;
+ __le32 max_tx_power;
+ __le32 ht_cap;
+ __le32 vht_cap;
+ __le32 sw_ver0;
+ __le32 sw_ver1;
+ __le32 fw_build;
+ __le32 phy_capab;
+ __le32 num_rf_chains;
+ __le32 eeprom_rd;
+ __le32 num_mem_reqs;
+ const __le32 *service_map;
+ size_t service_map_len;
+ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
+struct wmi_rdy_ev_arg {
+ __le32 sw_version;
+ __le32 abi_version;
+ __le32 status;
+ const u8 *mac_addr;
+};
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ __le32 temperature;
+} __packed;
+
+struct ath10k;
+struct ath10k_vif;
+struct ath10k_fw_stats_pdev;
+struct ath10k_fw_stats_peer;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_connect(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst);
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks);
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg);
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg);
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg);
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr, u64 tsf);
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ const struct wmi_phyerr *phyerr,
+ u64 tsf);
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+
+#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
new file mode 100644
index 000000000..2399a3921
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -0,0 +1,75 @@
+config ATH5K
+ tristate "Atheros 5xxx wireless cards support"
+ depends on (PCI || ATH25) && MAC80211
+ select ATH_COMMON
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select AVERAGE
+ select ATH5K_AHB if ATH25
+ select ATH5K_PCI if !ATH25
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros 5xxx chipset.
+
+ Currently the following chip versions are supported:
+
+ MAC: AR5211 AR5212
+ PHY: RF5111/2111 RF5112/2112 RF5413/2413
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ If you choose to build a module, it'll be called ath5k. Say M if
+ unsure.
+
+config ATH5K_DEBUG
+ bool "Atheros 5xxx debugging"
+ depends on ATH5K
+ ---help---
+ Atheros 5xxx debugging messages.
+
+ Say Y, if and you will get debug options for ath5k.
+ To use this, you need to mount debugfs:
+
+ mount -t debugfs debug /sys/kernel/debug
+
+ You will get access to files under:
+ /sys/kernel/debug/ath5k/phy0/
+
+ To enable debug, pass the debug level to the debug module
+ parameter. For example:
+
+ modprobe ath5k debug=0x00000400
+
+config ATH5K_TRACER
+ bool "Atheros 5xxx tracer"
+ depends on ATH5K
+ depends on EVENT_TRACING
+ ---help---
+ Say Y here to enable tracepoints for the ath5k driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say N.
+
+config ATH5K_AHB
+ bool "Atheros 5xxx AHB bus support"
+ depends on ATH25
+ ---help---
+ This adds support for WiSoC type chipsets of the 5xxx Atheros
+ family.
+
+config ATH5K_PCI
+ bool "Atheros 5xxx PCI bus support"
+ depends on (!ATH25 && PCI)
+ ---help---
+ This adds support for PCI type chipsets of the 5xxx Atheros
+ family.
+
+config ATH5K_TEST_CHANNELS
+ bool "Enables testing channels on ath5k"
+ depends on ATH5K && CFG80211_CERTIFICATION_ONUS
+ ---help---
+ This enables non-standard IEEE 802.11 channels on ath5k, which
+ can be used for research purposes. This option should be disabled
+ unless doing research.
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
new file mode 100644
index 000000000..1b3a34f7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -0,0 +1,22 @@
+ath5k-y += caps.o
+ath5k-y += initvals.o
+ath5k-y += eeprom.o
+ath5k-y += gpio.o
+ath5k-y += desc.o
+ath5k-y += dma.o
+ath5k-y += qcu.o
+ath5k-y += pcu.o
+ath5k-y += phy.o
+ath5k-y += reset.o
+ath5k-y += attach.o
+ath5k-y += base.o
+CFLAGS_base.o += -I$(src)
+ath5k-y += led.o
+ath5k-y += rfkill.o
+ath5k-y += ani.o
+ath5k-y += sysfs.o
+ath5k-y += mac80211-ops.o
+ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
+ath5k-$(CONFIG_ATH5K_AHB) += ahb.o
+ath5k-$(CONFIG_ATH5K_PCI) += pci.o
+obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
new file mode 100644
index 000000000..2ca88b593
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <ath25_platform.h>
+#include "ath5k.h"
+#include "debug.h"
+#include "base.h"
+#include "reg.h"
+
+/* return bus cachesize in 4B word units */
+static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath5k_hw *ah = common->priv;
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ u16 *eeprom, *eeprom_end;
+
+ eeprom = (u16 *) bcfg->radio;
+ eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
+
+ eeprom += off;
+ if (eeprom > eeprom_end)
+ return false;
+
+ *data = *eeprom;
+ return true;
+}
+
+int ath5k_hw_read_srev(struct ath5k_hw *ah)
+{
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ ah->ah_mac_srev = bcfg->devid;
+ return 0;
+}
+
+static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+{
+ struct platform_device *pdev = to_platform_device(ah->dev);
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ u8 *cfg_mac;
+
+ if (to_platform_device(ah->dev)->id == 0)
+ cfg_mac = bcfg->config->wlan0_mac;
+ else
+ cfg_mac = bcfg->config->wlan1_mac;
+
+ memcpy(mac, cfg_mac, ETH_ALEN);
+ return 0;
+}
+
+static const struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
+ .read_cachesize = ath5k_ahb_read_cachesize,
+ .eeprom_read = ath5k_ahb_eeprom_read,
+ .eeprom_read_mac = ath5k_ahb_eeprom_read_mac,
+};
+
+/*Initialization*/
+static int ath_ahb_probe(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ struct ath5k_hw *ah;
+ struct ieee80211_hw *hw;
+ struct resource *res;
+ void __iomem *mem;
+ int irq;
+ int ret = 0;
+ u32 reg;
+
+ if (!dev_get_platdata(&pdev->dev)) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource found\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ mem = ioremap_nocache(res->start, resource_size(res));
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ ret = -ENXIO;
+ goto err_iounmap;
+ }
+
+ irq = res->start;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+
+ ah = hw->priv;
+ ah->hw = hw;
+ ah->dev = &pdev->dev;
+ ah->iobase = mem;
+ ah->irq = irq;
+ ah->devid = bcfg->devid;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Enable WMAC AHB arbitration */
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+
+ /* Enable global WMAC swapping */
+ reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP);
+ reg |= AR5K_AR2315_BYTESWAP_WMAC;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
+ } else {
+ /* Enable WMAC DMA access (assuming 5312 or 231x*/
+ /* TODO: check other platforms */
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(ah->dev)->id == 0)
+ reg |= AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg |= AR5K_AR5312_ENABLE_WLAN1;
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+
+ /*
+ * On a dual-band AR5312, the multiband radio is only
+ * used as pass-through. Disable 2 GHz support in the
+ * driver for it
+ */
+ if (to_platform_device(ah->dev)->id == 0 &&
+ (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
+ (BD_WLAN1 | BD_WLAN0))
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
+ }
+
+ ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
+ ret = -ENODEV;
+ goto err_free_hw;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ return 0;
+
+ err_free_hw:
+ ieee80211_free_hw(hw);
+ err_iounmap:
+ iounmap(mem);
+ err_out:
+ return ret;
+}
+
+static int ath_ahb_remove(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct ath5k_hw *ah;
+ u32 reg;
+
+ if (!hw)
+ return 0;
+
+ ah = hw->priv;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Disable WMAC AHB arbitration */
+ reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ } else {
+ /*Stop DMA access */
+ reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(ah->dev)->id == 0)
+ reg &= ~AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg &= ~AR5K_AR5312_ENABLE_WLAN1;
+ iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ }
+
+ ath5k_deinit_ah(ah);
+ iounmap(ah->iobase);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+
+static struct platform_driver ath_ahb_driver = {
+ .probe = ath_ahb_probe,
+ .remove = ath_ahb_remove,
+ .driver = {
+ .name = "ar231x-wmac",
+ },
+};
+
+module_platform_driver(ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 000000000..5c0087576
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+
+/**
+ * DOC: Basic ANI Operation
+ *
+ * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
+ * depending on the amount of interference in the environment, increasing
+ * or reducing sensitivity as necessary.
+ *
+ * The parameters are:
+ *
+ * - "noise immunity"
+ *
+ * - "spur immunity"
+ *
+ * - "firstep level"
+ *
+ * - "OFDM weak signal detection"
+ *
+ * - "CCK weak signal detection"
+ *
+ * Basically we look at the amount of ODFM and CCK timing errors we get and then
+ * raise or lower immunity accordingly by setting one or more of these
+ * parameters.
+ *
+ * Newer chipsets have PHY error counters in hardware which will generate a MIB
+ * interrupt when they overflow. Older hardware has too enable PHY error frames
+ * by setting a RX flag and then count every single PHY error. When a specified
+ * threshold of errors has been reached we will raise immunity.
+ * Also we regularly check the amount of errors and lower or raise immunity as
+ * necessary.
+ */
+
+
+/***********************\
+* ANI parameter control *
+\***********************/
+
+/**
+ * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
+ * @ah: The &struct ath5k_hw
+ * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
+ */
+void
+ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
+{
+ /* TODO:
+ * ANI documents suggest the following five levels to use, but the HAL
+ * and ath9k use only the last two levels, making this
+ * essentially an on/off option. There *may* be a reason for this (???),
+ * so i stick with the HAL version for now...
+ */
+#if 0
+ static const s8 lo[] = { -52, -56, -60, -64, -70 };
+ static const s8 hi[] = { -18, -18, -16, -14, -12 };
+ static const s8 sz[] = { -34, -41, -48, -55, -62 };
+ static const s8 fr[] = { -70, -72, -75, -78, -80 };
+#else
+ static const s8 lo[] = { -64, -70 };
+ static const s8 hi[] = { -14, -12 };
+ static const s8 sz[] = { -55, -62 };
+ static const s8 fr[] = { -78, -80 };
+#endif
+ if (level < 0 || level >= ARRAY_SIZE(sz)) {
+ ATH5K_ERR(ah, "noise immunity level %d out of range",
+ level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_LO, lo[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_HI, hi[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRPWR, fr[level]);
+
+ ah->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+/**
+ * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
+ * @ah: The &struct ath5k_hw
+ * @level: level between 0 and @max_spur_level (the maximum level is dependent
+ * on the chip revision).
+ */
+void
+ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
+{
+ static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val) ||
+ level > ah->ani_state.max_spur_level) {
+ ATH5K_ERR(ah, "spur immunity level %d out of range",
+ level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
+
+ ah->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+/**
+ * ath5k_ani_set_firstep_level() - Set "firstep" level
+ * @ah: The &struct ath5k_hw
+ * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
+ */
+void
+ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
+{
+ static const int val[] = { 0, 4, 8 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val)) {
+ ATH5K_ERR(ah, "firstep level %d out of range", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRSTEP, val[level]);
+
+ ah->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+/**
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ static const int m1l[] = { 127, 50 };
+ static const int m2l[] = { 127, 40 };
+ static const int m1[] = { 127, 0x4d };
+ static const int m2[] = { 127, 0x40 };
+ static const int m2cnt[] = { 31, 16 };
+ static const int m2lcnt[] = { 63, 48 };
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
+
+ if (on)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+
+ ah->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+/**
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ static const int val[] = { 8, 6 };
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
+ AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
+ ah->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/***************\
+* ANI algorithm *
+\***************/
+
+/**
+ * ath5k_ani_raise_immunity() - Increase noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
+ * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
+ * the algorithm will tune more parameters then.
+ *
+ * Try to raise noise immunity (=decrease sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
+ bool ofdm_trigger)
+{
+ int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ofdm_trigger ? "ODFM" : "CCK");
+
+ /* first: raise noise immunity */
+ if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
+ return;
+ }
+
+ /* only OFDM: raise spur immunity level */
+ if (ofdm_trigger &&
+ as->spur_level < ah->ani_state.max_spur_level) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
+ return;
+ }
+
+ /* AP mode */
+ if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+
+ /* STA and IBSS mode */
+
+ /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
+ * per each neighbour node and use the minimum of these, to make sure we
+ * don't shut out a remote node by raising immunity too high. */
+
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "beacon RSSI high");
+ /* only OFDM: beacon RSSI is high, we can disable ODFM weak
+ * signal detection */
+ if (ofdm_trigger && as->ofdm_weak_sig) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ return;
+ }
+ /* as a last resort or CCK: raise firstep level */
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI in mid range, we need OFDM weak signal detect,
+ * but can raise firstep level */
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "beacon RSSI mid");
+ if (ofdm_trigger && !as->ofdm_weak_sig)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
+ * detect and zero firstep level to maximize CCK sensitivity */
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "beacon RSSI low, 2GHz");
+ if (ofdm_trigger && as->ofdm_weak_sig)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ if (as->firstep_level > 0)
+ ath5k_ani_set_firstep_level(ah, 0);
+ return;
+ }
+
+ /* TODO: why not?:
+ if (as->cck_weak_sig == true) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+ */
+}
+
+/**
+ * ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
+ *
+ * Try to lower noise immunity (=increase sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
+
+ if (ah->opmode == NL80211_IFTYPE_AP) {
+ /* AP mode */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* STA and IBSS mode (see TODO above) */
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ /* beacon signal is high, leave OFDM weak signal
+ * detection off or it may oscillate
+ * TODO: who said it's off??? */
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI is mid-range: turn on ODFM weak signal
+ * detection and next, lower firstep level */
+ if (!as->ofdm_weak_sig) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah,
+ true);
+ return;
+ }
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* beacon signal is low: only reduce firstep level */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ }
+ }
+
+ /* all modes */
+ if (as->spur_level > 0) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
+ return;
+ }
+
+ /* finally, reduce noise immunity */
+ if (as->noise_imm_level > 0) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
+ return;
+ }
+}
+
+/**
+ * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
+ *
+ * Return an approximation of the time spent "listening" in milliseconds (ms)
+ * since the last call of this function.
+ * Save a snapshot of the counter values for debugging/statistics.
+ */
+static int
+ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ int listen;
+
+ spin_lock_bh(&common->cc_lock);
+
+ ath_hw_cycle_counters_update(common);
+ memcpy(&as->last_cc, &common->cc_ani, sizeof(as->last_cc));
+
+ /* clears common->cc_ani */
+ listen = ath_hw_get_listen_time(common);
+
+ spin_unlock_bh(&common->cc_lock);
+
+ return listen;
+}
+
+/**
+ * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
+ *
+ * Clear the PHY error counters as soon as possible, since this might be called
+ * from a MIB interrupt and we want to make sure we don't get interrupted again.
+ * Add the count of CCK and OFDM errors to our internal state, so it can be used
+ * by the algorithm later.
+ *
+ * Will be called from interrupt and tasklet context.
+ * Returns 0 if both counters are zero.
+ */
+static int
+ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
+ struct ath5k_ani_state *as)
+{
+ unsigned int ofdm_err, cck_err;
+
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return 0;
+
+ ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
+ cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
+
+ /* reset counters first, we might be in a hurry (interrupt) */
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+
+ ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
+ cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
+
+ /* sometimes both can be zero, especially when there is a superfluous
+ * second interrupt. detect that here and return an error. */
+ if (ofdm_err <= 0 && cck_err <= 0)
+ return 0;
+
+ /* avoid negative values should one of the registers overflow */
+ if (ofdm_err > 0) {
+ as->ofdm_errors += ofdm_err;
+ as->sum_ofdm_errors += ofdm_err;
+ }
+ if (cck_err > 0) {
+ as->cck_errors += cck_err;
+ as->sum_cck_errors += cck_err;
+ }
+ return 1;
+}
+
+/**
+ * ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
+ *
+ * Just reset counters, so they are clear for the next "ani period".
+ */
+static void
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
+{
+ /* keep last values for debugging */
+ as->last_ofdm_errors = as->ofdm_errors;
+ as->last_cck_errors = as->cck_errors;
+ as->last_listen = as->listen_time;
+
+ as->ofdm_errors = 0;
+ as->cck_errors = 0;
+ as->listen_time = 0;
+}
+
+/**
+ * ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
+ *
+ * We count OFDM and CCK errors relative to the time where we did not send or
+ * receive ("listen" time) and raise or lower immunity accordingly.
+ * This is called regularly (every second) from the calibration timer, but also
+ * when an error threshold has been reached.
+ *
+ * In order to synchronize access from different contexts, this should be
+ * called only indirectly by scheduling the ANI tasklet!
+ */
+void
+ath5k_ani_calibration(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ani_state;
+ int listen, ofdm_high, ofdm_low, cck_high, cck_low;
+
+ /* get listen time since last call and add it to the counter because we
+ * might not have restarted the "ani period" last time.
+ * always do this to calculate the busy time also in manual mode */
+ listen = ath5k_hw_ani_get_listen_time(ah, as);
+ as->listen_time += listen;
+
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ ath5k_ani_save_and_clear_phy_errors(ah, as);
+
+ ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
+ cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
+ ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
+ cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "listen %d (now %d)", as->listen_time, listen);
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "check high ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
+
+ if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
+ /* too many PHY errors - we have to raise immunity */
+ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ ath5k_ani_raise_immunity(ah, as, ofdm_flag);
+ ath5k_ani_period_restart(as);
+
+ } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
+ /* If more than 5 (TODO: why 5?) periods have passed and we got
+ * relatively little errors we can try to lower immunity */
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "check low ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
+
+ if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
+ ath5k_ani_lower_immunity(ah, as);
+
+ ath5k_ani_period_restart(as);
+ }
+}
+
+
+/*******************\
+* Interrupt handler *
+\*******************/
+
+/**
+ * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Just read & reset the registers quickly, so they don't generate more
+ * interrupts, save the counters and schedule the tasklet to decide whether
+ * to raise immunity or not.
+ *
+ * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
+ * should take care of all "normal" MIB interrupts.
+ */
+void
+ath5k_ani_mib_intr(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ani_state;
+
+ /* nothing to do here if HW does not have PHY error counters - they
+ * can't be the reason for the MIB interrupt then */
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return;
+
+ /* not in use but clear anyways */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+
+ if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* If one of the errors triggered, we can get a superfluous second
+ * interrupt, even though we have already reset the register. The
+ * function detects that so we can return early. */
+ if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
+ return;
+
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
+ as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ani_tasklet);
+}
+
+/**
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
+ *
+ * This is used by hardware without PHY error counters to report PHY errors
+ * on a frame-by-frame basis, instead of the interrupt.
+ */
+void
+ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr)
+{
+ struct ath5k_ani_state *as = &ah->ani_state;
+
+ if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
+ as->ofdm_errors++;
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
+ tasklet_schedule(&ah->ani_tasklet);
+ } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
+ as->cck_errors++;
+ if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ani_tasklet);
+ }
+}
+
+
+/****************\
+* Initialization *
+\****************/
+
+/**
+ * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Enable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+/**
+ * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Disable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+/**
+ * ath5k_ani_init() - Initialize ANI
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
+ *
+ * Initialize ANI according to mode.
+ */
+void
+ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
+{
+ /* ANI is only possible on 5212 and newer */
+ if (ah->ah_version < AR5K_AR5212)
+ return;
+
+ if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
+ ATH5K_ERR(ah, "ANI mode %d out of range", mode);
+ return;
+ }
+
+ /* clear old state information */
+ memset(&ah->ani_state, 0, sizeof(ah->ani_state));
+
+ /* older hardware has more spur levels than newer */
+ if (ah->ah_mac_srev < AR5K_SREV_AR2414)
+ ah->ani_state.max_spur_level = 7;
+ else
+ ah->ani_state.max_spur_level = 2;
+
+ /* initial values for our ani parameters */
+ if (mode == ATH5K_ANI_MODE_OFF) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "ANI manual low -> high sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
+ "ANI manual high -> low sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ani_state.max_spur_level);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (mode == ATH5K_ANI_MODE_AUTO) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+
+ /* newer hardware has PHY error counter registers which we can use to
+ * get OFDM and CCK error counts. older hardware has to set rxfilter and
+ * report every single PHY error by calling ath5k_ani_phy_error_report()
+ */
+ if (mode == ATH5K_ANI_MODE_AUTO) {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_enable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
+ AR5K_RX_FILTER_PHYERR);
+ } else {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_disable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
+ ~AR5K_RX_FILTER_PHYERR);
+ }
+
+ ah->ani_state.ani_mode = mode;
+}
+
+
+/**************\
+* Debug output *
+\**************/
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
+void
+ath5k_ani_print_counters(struct ath5k_hw *ah)
+{
+ /* clears too */
+ pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+ pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+ pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+ pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+
+ /* no clear */
+ pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+ pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+ pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+ pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+
+ pr_notice("AR5K_PHYERR_CNT1\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+ pr_notice("AR5K_PHYERR_CNT2\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+ pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+ pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 000000000..21aa35546
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef ANI_H
+#define ANI_H
+
+#include "../ath.h"
+
+enum ath5k_phy_error_code;
+
+/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
+#define ATH5K_ANI_LISTEN_PERIOD 100
+#define ATH5K_ANI_OFDM_TRIG_HIGH 500
+#define ATH5K_ANI_OFDM_TRIG_LOW 200
+#define ATH5K_ANI_CCK_TRIG_HIGH 200
+#define ATH5K_ANI_CCK_TRIG_LOW 100
+
+/* average beacon RSSI thresholds */
+#define ATH5K_ANI_RSSI_THR_HIGH 40
+#define ATH5K_ANI_RSSI_THR_LOW 7
+
+/* maximum available levels */
+#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
+#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
+
+
+/**
+ * enum ath5k_ani_mode - mode for ANI / noise sensitivity
+ *
+ * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
+ * algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
+ */
+enum ath5k_ani_mode {
+ ATH5K_ANI_MODE_OFF = 0,
+ ATH5K_ANI_MODE_MANUAL_LOW = 1,
+ ATH5K_ANI_MODE_MANUAL_HIGH = 2,
+ ATH5K_ANI_MODE_AUTO = 3
+};
+
+
+/**
+ * struct ath5k_ani_state - ANI state and associated counters
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
+ */
+struct ath5k_ani_state {
+ enum ath5k_ani_mode ani_mode;
+
+ /* state */
+ int noise_imm_level;
+ int spur_level;
+ int firstep_level;
+ bool ofdm_weak_sig;
+ bool cck_weak_sig;
+
+ int max_spur_level;
+
+ /* used by the algorithm */
+ unsigned int listen_time;
+ unsigned int ofdm_errors;
+ unsigned int cck_errors;
+
+ /* debug/statistics only: numbers from last ANI calibration */
+ struct ath_cycle_counters last_cc;
+ unsigned int last_listen;
+ unsigned int last_ofdm_errors;
+ unsigned int last_cck_errors;
+ unsigned int sum_ofdm_errors;
+ unsigned int sum_cck_errors;
+};
+
+void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
+void ath5k_ani_mib_intr(struct ath5k_hw *ah);
+void ath5k_ani_calibration(struct ath5k_hw *ah);
+void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr);
+
+/* for manual control */
+void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
+void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
+
+void ath5k_ani_print_counters(struct ath5k_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
new file mode 100644
index 000000000..7ca0d6f93
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -0,0 +1,1717 @@
+/*
+ * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ATH5K_H
+#define _ATH5K_H
+
+/* TODO: Clean up channel debugging (doesn't work anyway) and start
+ * working on reg. control code using all available eeprom information
+ * (rev. engineering needed) */
+#define CHAN_DEBUG 0
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/average.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+/* RX/TX descriptor hw structs
+ * TODO: Driver part should only see sw structs */
+#include "desc.h"
+
+/* EEPROM structs/offsets
+ * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
+ * and clean up common bits, then introduce set/get functions in eeprom.c */
+#include "eeprom.h"
+#include "debug.h"
+#include "../ath.h"
+#include "ani.h"
+
+/* PCI IDs */
+#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
+#define PCI_DEVICE_ID_ATHEROS_AR5311 0x0011 /* AR5311 */
+#define PCI_DEVICE_ID_ATHEROS_AR5211 0x0012 /* AR5211 */
+#define PCI_DEVICE_ID_ATHEROS_AR5212 0x0013 /* AR5212 */
+#define PCI_DEVICE_ID_3COM_3CRDAG675 0x0013 /* 3CRDAG675 (Atheros AR5212) */
+#define PCI_DEVICE_ID_3COM_2_3CRPAG175 0x0013 /* 3CRPAG175 (Atheros AR5212) */
+#define PCI_DEVICE_ID_ATHEROS_AR5210_AP 0x0207 /* AR5210 (Early) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_IBM 0x1014 /* AR5212 (IBM MiniPCI) */
+#define PCI_DEVICE_ID_ATHEROS_AR5210_DEFAULT 0x1107 /* AR5210 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_DEFAULT 0x1113 /* AR5212 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_DEFAULT 0x1112 /* AR5211 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_FPGA 0xf013 /* AR5212 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_LEGACY 0xff12 /* AR5211 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_FPGA11B 0xf11b /* AR5211 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV8 0x0058 /* AR5312 WMAC (AP43-030) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0014 0x0014 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0015 0x0015 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0016 0x0016 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0017 0x0017 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0018 0x0018 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0019 0x0019 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR2413 0x001a /* AR2413 (Griffin-lite) */
+#define PCI_DEVICE_ID_ATHEROS_AR5413 0x001b /* AR5413 (Eagle) */
+#define PCI_DEVICE_ID_ATHEROS_AR5424 0x001c /* AR5424 (Condor PCI-E) */
+#define PCI_DEVICE_ID_ATHEROS_AR5416 0x0023 /* AR5416 */
+#define PCI_DEVICE_ID_ATHEROS_AR5418 0x0024 /* AR5418 */
+
+/****************************\
+ GENERIC DRIVER DEFINITIONS
+\****************************/
+
+#define ATH5K_PRINTF(fmt, ...) \
+ pr_warn("%s: " fmt, __func__, ##__VA_ARGS__)
+
+void __printf(3, 4)
+_ath5k_printk(const struct ath5k_hw *ah, const char *level,
+ const char *fmt, ...);
+
+#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
+ _ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__)
+
+#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) \
+do { \
+ if (net_ratelimit()) \
+ ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ATH5K_INFO(_sc, _fmt, ...) \
+ ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
+
+#define ATH5K_WARN(_sc, _fmt, ...) \
+ ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__)
+
+#define ATH5K_ERR(_sc, _fmt, ...) \
+ ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
+
+/*
+ * AR5K REGISTER ACCESS
+ */
+
+/* Some macros to read/write fields */
+
+/* First shift, then mask */
+#define AR5K_REG_SM(_val, _flags) \
+ (((_val) << _flags##_S) & (_flags))
+
+/* First mask, then shift */
+#define AR5K_REG_MS(_val, _flags) \
+ (((_val) & (_flags)) >> _flags##_S)
+
+/* Some registers can hold multiple values of interest. For this
+ * reason when we want to write to these registers we must first
+ * retrieve the values which we do not want to clear (lets call this
+ * old_data) and then set the register with this and our new_value:
+ * ( old_data | new_value) */
+#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \
+ ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
+ (((_val) << _flags##_S) & (_flags)), _reg)
+
+#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \
+ ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \
+ (_mask)) | (_flags), _reg)
+
+#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \
+ ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
+
+#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
+ ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
+
+/* Access QCU registers per queue */
+#define AR5K_REG_READ_Q(ah, _reg, _queue) \
+ (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
+
+#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \
+ ath5k_hw_reg_write(ah, (1 << _queue), _reg)
+
+#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \
+ _reg |= 1 << _queue; \
+} while (0)
+
+#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \
+ _reg &= ~(1 << _queue); \
+} while (0)
+
+/* Used while writing initvals */
+#define AR5K_REG_WAIT(_i) do { \
+ if (_i % 64) \
+ udelay(1); \
+} while (0)
+
+/*
+ * Some tunable values (these should be changeable by the user)
+ * TODO: Make use of them and add more options OR use debug/configfs
+ */
+#define AR5K_TUNE_DMA_BEACON_RESP 2
+#define AR5K_TUNE_SW_BEACON_RESP 10
+#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0
+#define AR5K_TUNE_MIN_TX_FIFO_THRES 1
+#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_FRAME_LEN / 64) + 1)
+#define AR5K_TUNE_REGISTER_TIMEOUT 20000
+/* Register for RSSI threshold has a mask of 0xff, so 255 seems to
+ * be the max value. */
+#define AR5K_TUNE_RSSI_THRES 129
+/* This must be set when setting the RSSI threshold otherwise it can
+ * prevent a reset. If AR5K_RSSI_THR is read after writing to it
+ * the BMISS_THRES will be seen as 0, seems hardware doesn't keep
+ * track of it. Max value depends on hardware. For AR5210 this is just 7.
+ * For AR5211+ this seems to be up to 255. */
+#define AR5K_TUNE_BMISS_THRES 7
+#define AR5K_TUNE_REGISTER_DWELL_TIME 20000
+#define AR5K_TUNE_BEACON_INTERVAL 100
+#define AR5K_TUNE_AIFS 2
+#define AR5K_TUNE_AIFS_11B 2
+#define AR5K_TUNE_AIFS_XR 0
+#define AR5K_TUNE_CWMIN 15
+#define AR5K_TUNE_CWMIN_11B 31
+#define AR5K_TUNE_CWMIN_XR 3
+#define AR5K_TUNE_CWMAX 1023
+#define AR5K_TUNE_CWMAX_11B 1023
+#define AR5K_TUNE_CWMAX_XR 7
+#define AR5K_TUNE_NOISE_FLOOR -72
+#define AR5K_TUNE_CCA_MAX_GOOD_VALUE -95
+#define AR5K_TUNE_MAX_TXPOWER 63
+#define AR5K_TUNE_DEFAULT_TXPOWER 25
+#define AR5K_TUNE_TPC_TXPOWER false
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
+#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
+
+#define AR5K_INIT_CARR_SENSE_EN 1
+
+/*Swap RX/TX Descriptor for big endian archs*/
+#if defined(__BIG_ENDIAN)
+#define AR5K_INIT_CFG ( \
+ AR5K_CFG_SWTD | AR5K_CFG_SWRD \
+)
+#else
+#define AR5K_INIT_CFG 0x00000000
+#endif
+
+/* Initial values */
+#define AR5K_INIT_CYCRSSI_THR1 2
+
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT 7
+#define AR5K_INIT_RETRY_LONG 4
+
+/* Slot time */
+#define AR5K_INIT_SLOT_TIME_TURBO 6
+#define AR5K_INIT_SLOT_TIME_DEFAULT 9
+#define AR5K_INIT_SLOT_TIME_HALF_RATE 13
+#define AR5K_INIT_SLOT_TIME_QUARTER_RATE 21
+#define AR5K_INIT_SLOT_TIME_B 20
+#define AR5K_SLOT_TIME_MAX 0xffff
+
+/* SIFS */
+#define AR5K_INIT_SIFS_TURBO 6
+#define AR5K_INIT_SIFS_DEFAULT_BG 10
+#define AR5K_INIT_SIFS_DEFAULT_A 16
+#define AR5K_INIT_SIFS_HALF_RATE 32
+#define AR5K_INIT_SIFS_QUARTER_RATE 64
+
+/* Used to calculate tx time for non 5/10/40MHz
+ * operation */
+/* It's preamble time + signal time (16 + 4) */
+#define AR5K_INIT_OFDM_PREAMPLE_TIME 20
+/* Preamble time for 40MHz (turbo) operation (min ?) */
+#define AR5K_INIT_OFDM_PREAMBLE_TIME_MIN 14
+#define AR5K_INIT_OFDM_SYMBOL_TIME 4
+#define AR5K_INIT_OFDM_PLCP_BITS 22
+
+/* Rx latency for 5 and 10MHz operation (max ?) */
+#define AR5K_INIT_RX_LAT_MAX 63
+/* Tx latencies from initvals (5212 only but no problem
+ * because we only tweak them on 5212) */
+#define AR5K_INIT_TX_LAT_A 54
+#define AR5K_INIT_TX_LAT_BG 384
+/* Tx latency for 40MHz (turbo) operation (min ?) */
+#define AR5K_INIT_TX_LAT_MIN 32
+/* Default Tx/Rx latencies (same for 5211)*/
+#define AR5K_INIT_TX_LATENCY_5210 54
+#define AR5K_INIT_RX_LATENCY_5210 29
+
+/* Tx frame to Tx data start delay */
+#define AR5K_INIT_TXF2TXD_START_DEFAULT 14
+#define AR5K_INIT_TXF2TXD_START_DELAY_10MHZ 12
+#define AR5K_INIT_TXF2TXD_START_DELAY_5MHZ 13
+
+/* We need to increase PHY switch and agc settling time
+ * on turbo mode */
+#define AR5K_SWITCH_SETTLING 5760
+#define AR5K_SWITCH_SETTLING_TURBO 7168
+
+#define AR5K_AGC_SETTLING 28
+/* 38 on 5210 but shouldn't matter */
+#define AR5K_AGC_SETTLING_TURBO 37
+
+
+
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
+enum ath5k_version {
+ AR5K_AR5210 = 0,
+ AR5K_AR5211 = 1,
+ AR5K_AR5212 = 2,
+};
+
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
+enum ath5k_radio {
+ AR5K_RF5110 = 0,
+ AR5K_RF5111 = 1,
+ AR5K_RF5112 = 2,
+ AR5K_RF2413 = 3,
+ AR5K_RF5413 = 4,
+ AR5K_RF2316 = 5,
+ AR5K_RF2317 = 6,
+ AR5K_RF2425 = 7,
+};
+
+/*
+ * Common silicon revision/version values
+ */
+
+#define AR5K_SREV_UNKNOWN 0xffff
+
+#define AR5K_SREV_AR5210 0x00 /* Crete */
+#define AR5K_SREV_AR5311 0x10 /* Maui 1 */
+#define AR5K_SREV_AR5311A 0x20 /* Maui 2 */
+#define AR5K_SREV_AR5311B 0x30 /* Spirit */
+#define AR5K_SREV_AR5211 0x40 /* Oahu */
+#define AR5K_SREV_AR5212 0x50 /* Venice */
+#define AR5K_SREV_AR5312_R2 0x52 /* AP31 */
+#define AR5K_SREV_AR5212_V4 0x54 /* ??? */
+#define AR5K_SREV_AR5213 0x55 /* ??? */
+#define AR5K_SREV_AR5312_R7 0x57 /* AP30 */
+#define AR5K_SREV_AR2313_R8 0x58 /* AP43 */
+#define AR5K_SREV_AR5213A 0x59 /* Hainan */
+#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
+#define AR5K_SREV_AR2414 0x70 /* Griffin */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR5424 0x90 /* Condor */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
+#define AR5K_SREV_AR5414 0xa0 /* Eagle */
+#define AR5K_SREV_AR2415 0xb0 /* Talon */
+#define AR5K_SREV_AR5416 0xc0 /* PCI-E */
+#define AR5K_SREV_AR5418 0xca /* PCI-E */
+#define AR5K_SREV_AR2425 0xe0 /* Swan */
+#define AR5K_SREV_AR2417 0xf0 /* Nala */
+
+#define AR5K_SREV_RAD_5110 0x00
+#define AR5K_SREV_RAD_5111 0x10
+#define AR5K_SREV_RAD_5111A 0x15
+#define AR5K_SREV_RAD_2111 0x20
+#define AR5K_SREV_RAD_5112 0x30
+#define AR5K_SREV_RAD_5112A 0x35
+#define AR5K_SREV_RAD_5112B 0x36
+#define AR5K_SREV_RAD_2112 0x40
+#define AR5K_SREV_RAD_2112A 0x45
+#define AR5K_SREV_RAD_2112B 0x46
+#define AR5K_SREV_RAD_2413 0x50
+#define AR5K_SREV_RAD_5413 0x60
+#define AR5K_SREV_RAD_2316 0x70 /* Cobra SoC */
+#define AR5K_SREV_RAD_2317 0x80
+#define AR5K_SREV_RAD_5424 0xa0 /* Mostly same as 5413 */
+#define AR5K_SREV_RAD_2425 0xa2
+#define AR5K_SREV_RAD_5133 0xc0
+
+#define AR5K_SREV_PHY_5211 0x30
+#define AR5K_SREV_PHY_5212 0x41
+#define AR5K_SREV_PHY_5212A 0x42
+#define AR5K_SREV_PHY_5212B 0x43
+#define AR5K_SREV_PHY_2413 0x45
+#define AR5K_SREV_PHY_5413 0x61
+#define AR5K_SREV_PHY_2425 0x70
+
+/* TODO add support to mac80211 for vendor-specific rates and modes */
+
+/**
+ * DOC: Atheros XR
+ *
+ * Some of this information is based on Documentation from:
+ *
+ * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
+ *
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ *
+ * Please note that can you either use XR or TURBO but you cannot use both,
+ * they are exclusive.
+ *
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
+ */
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
+ * There is also a distinction between "static" and "dynamic" turbo modes:
+ *
+ * - Static: is the dumb version: devices set to this mode stick to it until
+ * the mode is turned off.
+ *
+ * - Dynamic: is the intelligent version, the network decides itself if it
+ * is ok to use turbo. As soon as traffic is detected on adjacent channels
+ * (which would get used in turbo mode), or when a non-turbo station joins
+ * the network, turbo mode won't be used until the situation changes again.
+ * Dynamic mode is achieved by Atheros' Adaptive Radio (AR) feature which
+ * monitors the used radio band in order to decide whether turbo mode may
+ * be used or not.
+ *
+ * This article claims Super G sticks to bonding of channels 5 and 6 for
+ * USA:
+ *
+ * http://www.pcworld.com/article/id,113428-page,1/article.html
+ *
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
+ *
+ * - Bursting: allows multiple frames to be sent at once, rather than pausing
+ * after each frame. Bursting is a standards-compliant feature that can be
+ * used with any Access Point.
+ *
+ * - Fast frames: increases the amount of information that can be sent per
+ * frame, also resulting in a reduction of transmission overhead. It is a
+ * proprietary feature that needs to be supported by the Access Point.
+ *
+ * - Compression: data frames are compressed in real time using a Lempel Ziv
+ * algorithm. This is done transparently. Once this feature is enabled,
+ * compression and decompression takes place inside the chipset, without
+ * putting additional load on the host CPU.
+ *
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
+ */
+
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
+enum ath5k_driver_mode {
+ AR5K_MODE_11A = 0,
+ AR5K_MODE_11B = 1,
+ AR5K_MODE_11G = 2,
+ AR5K_MODE_MAX = 3
+};
+
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
+enum ath5k_ant_mode {
+ AR5K_ANTMODE_DEFAULT = 0,
+ AR5K_ANTMODE_FIXED_A = 1,
+ AR5K_ANTMODE_FIXED_B = 2,
+ AR5K_ANTMODE_SINGLE_AP = 3,
+ AR5K_ANTMODE_SECTOR_AP = 4,
+ AR5K_ANTMODE_SECTOR_STA = 5,
+ AR5K_ANTMODE_DEBUG = 6,
+ AR5K_ANTMODE_MAX,
+};
+
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
+enum ath5k_bw_mode {
+ AR5K_BWMODE_DEFAULT = 0,
+ AR5K_BWMODE_5MHZ = 1,
+ AR5K_BWMODE_10MHZ = 2,
+ AR5K_BWMODE_40MHZ = 3
+};
+
+
+
+/****************\
+ TX DEFINITIONS
+\****************/
+
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
+ */
+struct ath5k_tx_status {
+ u16 ts_seqnum;
+ u16 ts_tstamp;
+ u8 ts_status;
+ u8 ts_final_idx;
+ u8 ts_final_retry;
+ s8 ts_rssi;
+ u8 ts_shortretry;
+ u8 ts_virtcol;
+ u8 ts_antenna;
+};
+
+#define AR5K_TXSTAT_ALTRATE 0x80
+#define AR5K_TXERR_XRETRY 0x01
+#define AR5K_TXERR_FILT 0x02
+#define AR5K_TXERR_FIFO 0x04
+
+/**
+ * enum ath5k_tx_queue - Queue types used to classify tx queues.
+ * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
+ * @AR5K_TX_QUEUE_DATA: A normal data queue
+ * @AR5K_TX_QUEUE_BEACON: The beacon queue
+ * @AR5K_TX_QUEUE_CAB: The after-beacon queue
+ * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
+ */
+enum ath5k_tx_queue {
+ AR5K_TX_QUEUE_INACTIVE = 0,
+ AR5K_TX_QUEUE_DATA,
+ AR5K_TX_QUEUE_BEACON,
+ AR5K_TX_QUEUE_CAB,
+ AR5K_TX_QUEUE_UAPSD,
+};
+
+#define AR5K_NUM_TX_QUEUES 10
+#define AR5K_NUM_TX_QUEUES_NOQCU 2
+
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
+ * These are the 4 Access Categories as defined in
+ * WME spec. 0 is the lowest priority and 4 is the
+ * highest. Normal data that hasn't been classified
+ * goes to the Best Effort AC.
+ */
+enum ath5k_tx_queue_subtype {
+ AR5K_WME_AC_BK = 0,
+ AR5K_WME_AC_BE,
+ AR5K_WME_AC_VI,
+ AR5K_WME_AC_VO,
+};
+
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
+ */
+enum ath5k_tx_queue_id {
+ AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
+ AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
+ AR5K_TX_QUEUE_ID_DATA_MIN = 0,
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3,
+ AR5K_TX_QUEUE_ID_UAPSD = 7,
+ AR5K_TX_QUEUE_ID_CAB = 8,
+ AR5K_TX_QUEUE_ID_BEACON = 9,
+};
+
+/*
+ * Flags to set hw queue's parameters...
+ */
+#define AR5K_TXQ_FLAG_TXOKINT_ENABLE 0x0001 /* Enable TXOK interrupt */
+#define AR5K_TXQ_FLAG_TXERRINT_ENABLE 0x0002 /* Enable TXERR interrupt */
+#define AR5K_TXQ_FLAG_TXEOLINT_ENABLE 0x0004 /* Enable TXEOL interrupt -not used- */
+#define AR5K_TXQ_FLAG_TXDESCINT_ENABLE 0x0008 /* Enable TXDESC interrupt -not used- */
+#define AR5K_TXQ_FLAG_TXURNINT_ENABLE 0x0010 /* Enable TXURN interrupt */
+#define AR5K_TXQ_FLAG_CBRORNINT_ENABLE 0x0020 /* Enable CBRORN interrupt */
+#define AR5K_TXQ_FLAG_CBRURNINT_ENABLE 0x0040 /* Enable CBRURN interrupt */
+#define AR5K_TXQ_FLAG_QTRIGINT_ENABLE 0x0080 /* Enable QTRIG interrupt */
+#define AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE 0x0100 /* Enable TXNOFRM interrupt */
+#define AR5K_TXQ_FLAG_BACKOFF_DISABLE 0x0200 /* Disable random post-backoff */
+#define AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE 0x0300 /* Enable ready time expiry policy (?)*/
+#define AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE 0x0800 /* Enable backoff while bursting */
+#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
+#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
+
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority. Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
+ */
+struct ath5k_txq {
+ unsigned int qnum;
+ u32 *link;
+ struct list_head q;
+ spinlock_t lock;
+ bool setup;
+ int txq_len;
+ int txq_max;
+ bool txq_poll_mark;
+ unsigned int txq_stuck;
+};
+
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
+ */
+struct ath5k_txq_info {
+ enum ath5k_tx_queue tqi_type;
+ enum ath5k_tx_queue_subtype tqi_subtype;
+ u16 tqi_flags;
+ u8 tqi_aifs;
+ u16 tqi_cw_min;
+ u16 tqi_cw_max;
+ u32 tqi_cbr_period;
+ u32 tqi_cbr_overflow_limit;
+ u32 tqi_burst_time;
+ u32 tqi_ready_time;
+};
+
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
+ */
+enum ath5k_pkt_type {
+ AR5K_PKT_TYPE_NORMAL = 0,
+ AR5K_PKT_TYPE_ATIM = 1,
+ AR5K_PKT_TYPE_PSPOLL = 2,
+ AR5K_PKT_TYPE_BEACON = 3,
+ AR5K_PKT_TYPE_PROBE_RESP = 4,
+ AR5K_PKT_TYPE_PIFS = 5,
+};
+
+/*
+ * TX power and TPC settings
+ */
+#define AR5K_TXPOWER_OFDM(_r, _v) ( \
+ ((0 & 1) << ((_v) + 6)) | \
+ (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \
+)
+
+#define AR5K_TXPOWER_CCK(_r, _v) ( \
+ (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
+)
+
+
+
+/****************\
+ RX DEFINITIONS
+\****************/
+
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
+ */
+struct ath5k_rx_status {
+ u16 rs_datalen;
+ u16 rs_tstamp;
+ u8 rs_status;
+ u8 rs_phyerr;
+ s8 rs_rssi;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+};
+
+#define AR5K_RXERR_CRC 0x01
+#define AR5K_RXERR_PHY 0x02
+#define AR5K_RXERR_FIFO 0x04
+#define AR5K_RXERR_DECRYPT 0x08
+#define AR5K_RXERR_MIC 0x10
+#define AR5K_RXKEYIX_INVALID ((u8) -1)
+#define AR5K_TXKEYIX_INVALID ((u32) -1)
+
+
+/**************************\
+ BEACON TIMERS DEFINITIONS
+\**************************/
+
+#define AR5K_BEACON_PERIOD 0x0000ffff
+#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
+#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
+
+
+/*
+ * TSF to TU conversion:
+ *
+ * TSF is a 64bit value in usec (microseconds).
+ * TU is a 32bit value and defined by IEEE802.11 (page 6) as "A measurement of
+ * time equal to 1024 usec", so it's roughly milliseconds (usec / 1024).
+ */
+#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
+
+
+
+/*******************************\
+ GAIN OPTIMIZATION DEFINITIONS
+\*******************************/
+
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
+enum ath5k_rfgain {
+ AR5K_RFGAIN_INACTIVE = 0,
+ AR5K_RFGAIN_ACTIVE,
+ AR5K_RFGAIN_READ_REQUESTED,
+ AR5K_RFGAIN_NEED_CHANGE,
+};
+
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
+struct ath5k_gain {
+ u8 g_step_idx;
+ u8 g_current;
+ u8 g_target;
+ u8 g_low;
+ u8 g_high;
+ u8 g_f_corr;
+ u8 g_state;
+};
+
+
+
+/********************\
+ COMMON DEFINITIONS
+\********************/
+
+#define AR5K_SLOT_TIME_9 396
+#define AR5K_SLOT_TIME_20 880
+#define AR5K_SLOT_TIME_MAX 0xffff
+
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
+ * TODO: Clean up
+ */
+struct ath5k_athchan_2ghz {
+ u32 a2_flags;
+ u16 a2_athchan;
+};
+
+/**
+ * enum ath5k_dmasize - DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+ AR5K_DMASIZE_4B = 0,
+ AR5K_DMASIZE_8B,
+ AR5K_DMASIZE_16B,
+ AR5K_DMASIZE_32B,
+ AR5K_DMASIZE_64B,
+ AR5K_DMASIZE_128B,
+ AR5K_DMASIZE_256B,
+ AR5K_DMASIZE_512B
+};
+
+
+
+/******************\
+ RATE DEFINITIONS
+\******************/
+
+/**
+ * DOC: Rate codes
+ *
+ * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
+ *
+ * The rate code is used to get the RX rate or set the TX rate on the
+ * hardware descriptors. It is also used for internal modulation control
+ * and settings.
+ *
+ * This is the hardware rate map we are aware of (html unfriendly):
+ *
+ * Rate code Rate (Kbps)
+ * --------- -----------
+ * 0x01 3000 (XR)
+ * 0x02 1000 (XR)
+ * 0x03 250 (XR)
+ * 0x04 - 05 -Reserved-
+ * 0x06 2000 (XR)
+ * 0x07 500 (XR)
+ * 0x08 48000 (OFDM)
+ * 0x09 24000 (OFDM)
+ * 0x0A 12000 (OFDM)
+ * 0x0B 6000 (OFDM)
+ * 0x0C 54000 (OFDM)
+ * 0x0D 36000 (OFDM)
+ * 0x0E 18000 (OFDM)
+ * 0x0F 9000 (OFDM)
+ * 0x10 - 17 -Reserved-
+ * 0x18 11000L (CCK)
+ * 0x19 5500L (CCK)
+ * 0x1A 2000L (CCK)
+ * 0x1B 1000L (CCK)
+ * 0x1C 11000S (CCK)
+ * 0x1D 5500S (CCK)
+ * 0x1E 2000S (CCK)
+ * 0x1F -Reserved-
+ *
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
+ *
+ * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
+ * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
+ * We handle this in ath5k_setup_bands().
+ */
+#define AR5K_MAX_RATES 32
+
+/* B */
+#define ATH5K_RATE_CODE_1M 0x1B
+#define ATH5K_RATE_CODE_2M 0x1A
+#define ATH5K_RATE_CODE_5_5M 0x19
+#define ATH5K_RATE_CODE_11M 0x18
+/* A and G */
+#define ATH5K_RATE_CODE_6M 0x0B
+#define ATH5K_RATE_CODE_9M 0x0F
+#define ATH5K_RATE_CODE_12M 0x0A
+#define ATH5K_RATE_CODE_18M 0x0E
+#define ATH5K_RATE_CODE_24M 0x09
+#define ATH5K_RATE_CODE_36M 0x0D
+#define ATH5K_RATE_CODE_48M 0x08
+#define ATH5K_RATE_CODE_54M 0x0C
+
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
+#define AR5K_SET_SHORT_PREAMBLE 0x04
+
+/*
+ * Crypto definitions
+ */
+
+#define AR5K_KEYCACHE_SIZE 8
+extern bool ath5k_modparam_nohwcrypt;
+
+/***********************\
+ HW RELATED DEFINITIONS
+\***********************/
+
+/*
+ * Misc definitions
+ */
+#define AR5K_RSSI_EP_MULTIPLIER (1 << 7)
+
+#define AR5K_ASSERT_ENTRY(_e, _s) do { \
+ if (_e >= _s) \
+ return false; \
+} while (0)
+
+/*
+ * Hardware interrupt abstraction
+ */
+
+/**
+ * enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ * not always fatal, on some chips we can continue operation
+ * without resetting the card, that's why %AR5K_INT_FATAL is not
+ * common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a
+ * descriptor's LinkPtr is NULL. For more details, refer to:
+ * "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ * increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
+ *
+ * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
+ * one of the PHY error counters reached the maximum value and
+ * should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
+ * @AR5K_INT_RXPHY: RX PHY Error
+ * @AR5K_INT_RXKCM: RX Key cache miss
+ * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
+ * beacon that must be handled in software. The alternative is if
+ * you have VEOL support, in that case you let the hardware deal
+ * with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
+ * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
+ * beacons from the AP have associated with, we should probably
+ * try to reassociate. When in IBSS mode this might mean we have
+ * not received any beacons from any local stations. Note that
+ * every station in an IBSS schedules to send beacons at the
+ * Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ * our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ * nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ * errors. Indicates we need to reset the card.
+ * @AR5K_INT_GLOBAL: Used to clear and set the IER
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ * bit value
+ *
+ * These are mapped to take advantage of some common bits
+ * between the MACs, to be able to set intr properties
+ * easier. Some of them are not used yet inside hw.c. Most map
+ * to the respective hw interrupt value as they are common among different
+ * MACs.
+ */
+enum ath5k_int {
+ AR5K_INT_RXOK = 0x00000001,
+ AR5K_INT_RXDESC = 0x00000002,
+ AR5K_INT_RXERR = 0x00000004,
+ AR5K_INT_RXNOFRM = 0x00000008,
+ AR5K_INT_RXEOL = 0x00000010,
+ AR5K_INT_RXORN = 0x00000020,
+ AR5K_INT_TXOK = 0x00000040,
+ AR5K_INT_TXDESC = 0x00000080,
+ AR5K_INT_TXERR = 0x00000100,
+ AR5K_INT_TXNOFRM = 0x00000200,
+ AR5K_INT_TXEOL = 0x00000400,
+ AR5K_INT_TXURN = 0x00000800,
+ AR5K_INT_MIB = 0x00001000,
+ AR5K_INT_SWI = 0x00002000,
+ AR5K_INT_RXPHY = 0x00004000,
+ AR5K_INT_RXKCM = 0x00008000,
+ AR5K_INT_SWBA = 0x00010000,
+ AR5K_INT_BRSSI = 0x00020000,
+ AR5K_INT_BMISS = 0x00040000,
+ AR5K_INT_FATAL = 0x00080000, /* Non common */
+ AR5K_INT_BNR = 0x00100000, /* Non common */
+ AR5K_INT_TIM = 0x00200000, /* Non common */
+ AR5K_INT_DTIM = 0x00400000, /* Non common */
+ AR5K_INT_DTIM_SYNC = 0x00800000, /* Non common */
+ AR5K_INT_GPIO = 0x01000000,
+ AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
+ AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
+ AR5K_INT_QCBRORN = 0x08000000, /* Non common */
+ AR5K_INT_QCBRURN = 0x10000000, /* Non common */
+ AR5K_INT_QTRIG = 0x20000000, /* Non common */
+ AR5K_INT_GLOBAL = 0x80000000,
+
+ AR5K_INT_TX_ALL = AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
+ | AR5K_INT_TXEOL
+ | AR5K_INT_TXURN,
+
+ AR5K_INT_RX_ALL = AR5K_INT_RXOK
+ | AR5K_INT_RXDESC
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXNOFRM
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN,
+
+ AR5K_INT_COMMON = AR5K_INT_RXOK
+ | AR5K_INT_RXDESC
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXNOFRM
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
+ | AR5K_INT_TXEOL
+ | AR5K_INT_TXURN
+ | AR5K_INT_MIB
+ | AR5K_INT_SWI
+ | AR5K_INT_RXPHY
+ | AR5K_INT_RXKCM
+ | AR5K_INT_SWBA
+ | AR5K_INT_BRSSI
+ | AR5K_INT_BMISS
+ | AR5K_INT_GPIO
+ | AR5K_INT_GLOBAL,
+
+ AR5K_INT_NOCARD = 0xffffffff
+};
+
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
+enum ath5k_calibration_mask {
+ AR5K_CALIBRATION_FULL = 0x01,
+ AR5K_CALIBRATION_SHORT = 0x02,
+ AR5K_CALIBRATION_NF = 0x04,
+ AR5K_CALIBRATION_ANI = 0x08,
+};
+
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
+ */
+enum ath5k_power_mode {
+ AR5K_PM_UNDEFINED = 0,
+ AR5K_PM_AUTO,
+ AR5K_PM_AWAKE,
+ AR5K_PM_FULL_SLEEP,
+ AR5K_PM_NETWORK_SLEEP,
+};
+
+/*
+ * These match net80211 definitions (not used in
+ * mac80211).
+ * TODO: Clean this up
+ */
+#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/
+#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/
+#define AR5K_LED_AUTH 2 /*IEEE80211_S_AUTH*/
+#define AR5K_LED_ASSOC 3 /*IEEE80211_S_ASSOC*/
+#define AR5K_LED_RUN 4 /*IEEE80211_S_RUN*/
+
+/* GPIO-controlled software LED */
+#define AR5K_SOFTLED_PIN 0
+#define AR5K_SOFTLED_ON 0
+#define AR5K_SOFTLED_OFF 1
+
+
+/* XXX: we *may* move cap_range stuff to struct wiphy */
+struct ath5k_capabilities {
+ /*
+ * Supported PHY modes
+ * (ie. AR5K_MODE_11A, AR5K_MODE_11B, ...)
+ */
+ DECLARE_BITMAP(cap_mode, AR5K_MODE_MAX);
+
+ /*
+ * Frequency range (without regulation restrictions)
+ */
+ struct {
+ u16 range_2ghz_min;
+ u16 range_2ghz_max;
+ u16 range_5ghz_min;
+ u16 range_5ghz_max;
+ } cap_range;
+
+ /*
+ * Values stored in the EEPROM (some of them...)
+ */
+ struct ath5k_eeprom_info cap_eeprom;
+
+ /*
+ * Queue information
+ */
+ struct {
+ u8 q_tx_num;
+ } cap_queues;
+
+ bool cap_has_phyerr_counters;
+ bool cap_has_mrr_support;
+ bool cap_needs_2GHz_ovr;
+};
+
+/* size of noise floor history (keep it a power of two) */
+#define ATH5K_NF_CAL_HIST_MAX 8
+struct ath5k_nfcal_hist {
+ s16 index; /* current index into nfval */
+ s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
+};
+
+#define ATH5K_LED_MAX_NAME_LEN 31
+
+/*
+ * State for LED triggers
+ */
+struct ath5k_led {
+ char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
+ struct ath5k_hw *ah; /* driver state */
+ struct led_classdev led_dev; /* led classdev */
+};
+
+/* Rfkill */
+struct ath5k_rfkill {
+ /* GPIO PIN for rfkill */
+ u16 gpio;
+ /* polarity of rfkill GPIO PIN */
+ bool polarity;
+ /* RFKILL toggle tasklet */
+ struct tasklet_struct toggleq;
+};
+
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
+ * and the MAC headers for each packet
+ */
+ unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
+ * and the MAC headers and padding for
+ * each packet.
+ */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+ unsigned int rxeol_intr;
+};
+
+/*
+ * Misc defines
+ */
+
+#define AR5K_MAX_GPIO 10
+#define AR5K_MAX_RF_BANKS 8
+
+#if CHAN_DEBUG
+#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
+#else
+#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
+#endif
+
+#define ATH_RXBUF 40 /* number of RX buffers */
+#define ATH_TXBUF 200 /* number of TX buffers */
+#define ATH_BCBUF 4 /* number of beacon buffers */
+#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
+#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
+
+/* Driver state associated with an instance of a device */
+struct ath5k_hw {
+ struct ath_common common;
+
+ struct pci_dev *pdev;
+ struct device *dev; /* for dma mapping */
+ int irq;
+ u16 devid;
+ void __iomem *iobase; /* address of the device */
+ struct mutex lock; /* dev-level lock */
+ struct ieee80211_hw *hw; /* IEEE 802.11 common */
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_channel channels[ATH_CHAN_MAX];
+ struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ enum nl80211_iftype opmode;
+
+#ifdef CONFIG_ATH5K_DEBUG
+ struct ath5k_dbg_info debug; /* debug info */
+#endif /* CONFIG_ATH5K_DEBUG */
+
+ struct ath5k_buf *bufptr; /* allocated buffer ptr */
+ struct ath5k_desc *desc; /* TX/RX descriptors */
+ dma_addr_t desc_daddr; /* DMA (physical) address */
+ size_t desc_len; /* size of TX/RX descriptors */
+
+ DECLARE_BITMAP(status, 4);
+#define ATH_STAT_INVALID 0 /* disable hardware accesses */
+#define ATH_STAT_PROMISC 1
+#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
+#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
+#define ATH_STAT_RESET 4 /* hw reset */
+
+ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
+ unsigned int fif_filter_flags; /* Current FIF_* filter flags */
+ struct ieee80211_channel *curchan; /* current h/w channel */
+
+ u16 nvifs;
+
+ enum ath5k_int imask; /* interrupt mask copy */
+
+ spinlock_t irqlock;
+ bool rx_pending; /* rx tasklet pending */
+ bool tx_pending; /* tx tasklet pending */
+
+ u8 bssidmask[ETH_ALEN];
+
+ unsigned int led_pin, /* GPIO pin for driving LED */
+ led_on; /* pin setting for LED on */
+
+ struct work_struct reset_work; /* deferred chip reset */
+ struct work_struct calib_work; /* deferred phy calibration */
+
+ struct list_head rxbuf; /* receive buffer */
+ spinlock_t rxbuflock;
+ u32 *rxlink; /* link ptr in last RX desc */
+ struct tasklet_struct rxtq; /* rx intr tasklet */
+ struct ath5k_led rx_led; /* rx led */
+
+ struct list_head txbuf; /* transmit buffer */
+ spinlock_t txbuflock;
+ unsigned int txbuf_len; /* buf count in txbuf list */
+ struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
+ struct tasklet_struct txtq; /* tx intr tasklet */
+ struct ath5k_led tx_led; /* tx led */
+
+ struct ath5k_rfkill rf_kill;
+
+ spinlock_t block; /* protects beacon */
+ struct tasklet_struct beacontq; /* beacon intr tasklet */
+ struct list_head bcbuf; /* beacon buffer */
+ struct ieee80211_vif *bslot[ATH_BCBUF];
+ u16 num_ap_vifs;
+ u16 num_adhoc_vifs;
+ u16 num_mesh_vifs;
+ unsigned int bhalq, /* SW q for outgoing beacons */
+ bmisscount, /* missed beacon transmits */
+ bintval, /* beacon interval in TU */
+ bsent;
+ unsigned int nexttbtt; /* next beacon time in TU */
+ struct ath5k_txq *cabq; /* content after beacon */
+
+ bool assoc; /* associate state */
+ bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
+
+ struct delayed_work tx_complete_work;
+
+ struct survey_info survey; /* collected survey info */
+
+ enum ath5k_int ah_imr;
+
+ struct ieee80211_channel *ah_current_channel;
+ bool ah_iq_cal_needed;
+ bool ah_single_chip;
+
+ enum ath5k_version ah_version;
+ enum ath5k_radio ah_radio;
+ u32 ah_mac_srev;
+ u16 ah_mac_version;
+ u16 ah_phy_revision;
+ u16 ah_radio_5ghz_revision;
+ u16 ah_radio_2ghz_revision;
+
+#define ah_modes ah_capabilities.cap_mode
+#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
+
+ u8 ah_retry_long;
+ u8 ah_retry_short;
+
+ u32 ah_use_32khz_clock;
+
+ u8 ah_coverage_class;
+ bool ah_ack_bitrate_high;
+ u8 ah_bwmode;
+ bool ah_short_slot;
+
+ /* Antenna Control */
+ u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
+ u8 ah_ant_mode;
+ u8 ah_tx_ant;
+ u8 ah_def_ant;
+
+ struct ath5k_capabilities ah_capabilities;
+
+ struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
+ u32 ah_txq_status;
+ u32 ah_txq_imr_txok;
+ u32 ah_txq_imr_txerr;
+ u32 ah_txq_imr_txurn;
+ u32 ah_txq_imr_txdesc;
+ u32 ah_txq_imr_txeol;
+ u32 ah_txq_imr_cbrorn;
+ u32 ah_txq_imr_cbrurn;
+ u32 ah_txq_imr_qtrig;
+ u32 ah_txq_imr_nofrm;
+
+ u32 ah_txq_isr_txok_all;
+ u32 ah_txq_isr_txurn;
+ u32 ah_txq_isr_qcborn;
+ u32 ah_txq_isr_qcburn;
+ u32 ah_txq_isr_qtrig;
+
+ u32 *ah_rf_banks;
+ size_t ah_rf_banks_size;
+ size_t ah_rf_regs_count;
+ struct ath5k_gain ah_gain;
+ u8 ah_offset[AR5K_MAX_RF_BANKS];
+
+
+ struct {
+ /* Temporary tables used for interpolation */
+ u8 tmpL[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_POWER_TABLE_SIZE];
+ u8 tmpR[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_POWER_TABLE_SIZE];
+ u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2];
+ u16 txp_rates_power_table[AR5K_MAX_RATES];
+ u8 txp_min_idx;
+ bool txp_tpc;
+ /* Values in 0.25dB units */
+ s16 txp_min_pwr;
+ s16 txp_max_pwr;
+ s16 txp_cur_pwr;
+ /* Values in 0.5dB units */
+ s16 txp_offset;
+ s16 txp_ofdm;
+ s16 txp_cck_ofdm_gainf_delta;
+ /* Value in dB units */
+ s16 txp_cck_ofdm_pwr_delta;
+ bool txp_setup;
+ int txp_requested; /* Requested tx power in dBm */
+ } ah_txpower;
+
+ struct ath5k_nfcal_hist ah_nfcal_hist;
+
+ /* average beacon RSSI in our BSS (used by ANI) */
+ struct ewma ah_beacon_rssi_avg;
+
+ /* noise floor from last periodic calibration */
+ s32 ah_noise_floor;
+
+ /* Calibration timestamp */
+ unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_short;
+ unsigned long ah_cal_next_ani;
+
+ /* Calibration mask */
+ u8 ah_cal_mask;
+
+ /*
+ * Function pointers
+ */
+ int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
+ unsigned int, unsigned int, int, enum ath5k_pkt_type,
+ unsigned int, unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int, unsigned int);
+ int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
+ struct ath5k_tx_status *);
+ int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
+ struct ath5k_rx_status *);
+};
+
+struct ath_bus_ops {
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ int (*eeprom_read_mac)(struct ath5k_hw *ah, u8 *mac);
+};
+
+/*
+ * Prototypes
+ */
+extern const struct ieee80211_ops ath5k_hw_ops;
+
+/* Initialization and detach functions */
+int ath5k_hw_init(struct ath5k_hw *ah);
+void ath5k_hw_deinit(struct ath5k_hw *ah);
+
+int ath5k_sysfs_register(struct ath5k_hw *ah);
+void ath5k_sysfs_unregister(struct ath5k_hw *ah);
+
+/*Chip id helper functions */
+int ath5k_hw_read_srev(struct ath5k_hw *ah);
+
+/* LED functions */
+int ath5k_init_leds(struct ath5k_hw *ah);
+void ath5k_led_enable(struct ath5k_hw *ah);
+void ath5k_led_off(struct ath5k_hw *ah);
+void ath5k_unregister_leds(struct ath5k_hw *ah);
+
+
+/* Reset Functions */
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+int ath5k_hw_on_hold(struct ath5k_hw *ah);
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool fast, bool skip_pcu);
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set);
+/* Power management functions */
+
+
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
+
+
+/* DMA Related Functions */
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue);
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+ u32 phys_addr);
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
+/* Interrupt handling */
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
+/* Init/Stop functions */
+void ath5k_hw_dma_init(struct ath5k_hw *ah);
+int ath5k_hw_dma_stop(struct ath5k_hw *ah);
+
+/* EEPROM access functions */
+int ath5k_eeprom_init(struct ath5k_hw *ah);
+void ath5k_eeprom_detach(struct ath5k_hw *ah);
+int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
+
+/* Protocol Control Unit Functions */
+/* Helpers */
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ int len, struct ieee80211_rate *rate, bool shortpre);
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
+/* RX filter control*/
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
+void ath5k_hw_set_bssid(struct ath5k_hw *ah);
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+/* Receive (DRU) start/stop functions */
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
+/* Beacon control functions */
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
+void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+ u32 interval);
+bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
+/* Init function */
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
+
+/* Queue Control Unit, DFS Control Unit Functions */
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info);
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *queue_info);
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+ enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue);
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time);
+/* Init function */
+int ath5k_hw_init_queues(struct ath5k_hw *ah);
+
+/* Hardware Descriptor Functions */
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ u32 size, unsigned int flags);
+int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
+ u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
+
+
+/* GPIO Functions */
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level);
+
+
+/* RFkill Functions */
+void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+
+
+/* Misc functions TODO: Cleanup */
+int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+
+
+/* Initial register settings functions */
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+
+
+/* PHY functions */
+/* Misc PHY functions */
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+/* Gain_F optimization */
+enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
+/* PHY/RF channel functions */
+bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+/* PHY calibration */
+void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
+int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
+/* Spur mitigation */
+bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
+/* Antenna control */
+void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
+void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
+/* TX power setup */
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+/* Init function */
+int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 mode, bool fast);
+
+/*
+ * Functions used internally
+ */
+
+static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
+{
+ return &ah->common;
+}
+
+static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
+{
+ return &(ath5k_hw_common(ah)->regulatory);
+}
+
+#ifdef CONFIG_ATH5K_AHB
+#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000)
+
+static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
+{
+ /* On AR2315 and AR2317 the PCI clock domain registers
+ * are outside of the WMAC register space */
+ if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
+ (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
+ return AR5K_AR2315_PCI_BASE + reg;
+
+ return ah->iobase + reg;
+}
+
+static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
+{
+ return ioread32(ath5k_ahb_reg(ah, reg));
+}
+
+static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
+{
+ iowrite32(val, ath5k_ahb_reg(ah, reg));
+}
+
+#else
+
+static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
+{
+ return ioread32(ah->iobase + reg);
+}
+
+static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
+{
+ iowrite32(val, ah->iobase + reg);
+}
+
+#endif
+
+static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah)
+{
+ return ath5k_hw_common(ah)->bus_ops->ath_bus_type;
+}
+
+static inline void ath5k_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+static inline bool ath5k_hw_nvram_read(struct ath5k_hw *ah, u32 off, u16 *data)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return common->bus_ops->eeprom_read(common, off, data);
+}
+
+static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
+{
+ u32 retval = 0, bit, i;
+
+ for (i = 0; i < bits; i++) {
+ bit = (val >> i) & 1;
+ retval = (retval << 1) | bit;
+ }
+
+ return retval;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
new file mode 100644
index 000000000..66b636615
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* Attach/Detach Functions and helpers *
+\*************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+/**
+ * ath5k_hw_post() - Power On Self Test helper function
+ * @ah: The &struct ath5k_hw
+ */
+static int ath5k_hw_post(struct ath5k_hw *ah)
+{
+
+ static const u32 static_pattern[4] = {
+ 0x55555555, 0xaaaaaaaa,
+ 0x66666666, 0x99999999
+ };
+ static const u16 regs[2] = { AR5K_STA_ID0, AR5K_PHY(8) };
+ int i, c;
+ u16 cur_reg;
+ u32 var_pattern;
+ u32 init_val;
+ u32 cur_val;
+
+ for (c = 0; c < 2; c++) {
+
+ cur_reg = regs[c];
+
+ /* Save previous value */
+ init_val = ath5k_hw_reg_read(ah, cur_reg);
+
+ for (i = 0; i < 256; i++) {
+ var_pattern = i << 16 | i;
+ ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+ cur_val = ath5k_hw_reg_read(ah, cur_reg);
+
+ if (cur_val != var_pattern) {
+ ATH5K_ERR(ah, "POST Failed !!!\n");
+ return -EAGAIN;
+ }
+
+ /* Found on ndiswrapper dumps */
+ var_pattern = 0x0039080f;
+ ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+ }
+
+ for (i = 0; i < 4; i++) {
+ var_pattern = static_pattern[i];
+ ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+ cur_val = ath5k_hw_reg_read(ah, cur_reg);
+
+ if (cur_val != var_pattern) {
+ ATH5K_ERR(ah, "POST Failed !!!\n");
+ return -EAGAIN;
+ }
+
+ /* Found on ndiswrapper dumps */
+ var_pattern = 0x003b080f;
+ ath5k_hw_reg_write(ah, var_pattern, cur_reg);
+ }
+
+ /* Restore previous value */
+ ath5k_hw_reg_write(ah, init_val, cur_reg);
+
+ }
+
+ return 0;
+
+}
+
+/**
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
+ * @ah: The &struct ath5k_hw associated with the device
+ *
+ * Check if the device is supported, perform a POST and initialize the needed
+ * structs. Returns -ENOMEM if we don't have memory for the needed structs,
+ * -ENODEV if the device is not supported or prints an error msg if something
+ * else went wrong.
+ */
+int ath5k_hw_init(struct ath5k_hw *ah)
+{
+ static const u8 zero_mac[ETH_ALEN] = { };
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct pci_dev *pdev = ah->pdev;
+ struct ath5k_eeprom_info *ee;
+ int ret;
+ u32 srev;
+
+ /*
+ * HW information
+ */
+ ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
+ ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+ ah->ah_imr = 0;
+ ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+ ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
+ ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
+ ah->ah_noise_floor = -95; /* until first NF calibration is run */
+ ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
+ ah->ah_current_channel = &ah->channels[0];
+
+ /*
+ * Find the mac version
+ */
+ ath5k_hw_read_srev(ah);
+ srev = ah->ah_mac_srev;
+ if (srev < AR5K_SREV_AR5311)
+ ah->ah_version = AR5K_AR5210;
+ else if (srev < AR5K_SREV_AR5212)
+ ah->ah_version = AR5K_AR5211;
+ else
+ ah->ah_version = AR5K_AR5212;
+
+ /* Get the MAC version */
+ ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
+
+ /* Fill the ath5k_hw struct with the needed functions */
+ ret = ath5k_hw_init_desc_functions(ah);
+ if (ret)
+ goto err;
+
+ /* Bring device out of sleep and reset its units */
+ ret = ath5k_hw_nic_wakeup(ah, NULL);
+ if (ret)
+ goto err;
+
+ /* Get PHY and RADIO revisions */
+ ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
+ 0xffffffff;
+ ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
+ IEEE80211_BAND_5GHZ);
+
+ /* Try to identify radio chip based on its srev */
+ switch (ah->ah_radio_5ghz_revision & 0xf0) {
+ case AR5K_SREV_RAD_5111:
+ ah->ah_radio = AR5K_RF5111;
+ ah->ah_single_chip = false;
+ ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+ IEEE80211_BAND_2GHZ);
+ break;
+ case AR5K_SREV_RAD_5112:
+ case AR5K_SREV_RAD_2112:
+ ah->ah_radio = AR5K_RF5112;
+ ah->ah_single_chip = false;
+ ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+ IEEE80211_BAND_2GHZ);
+ break;
+ case AR5K_SREV_RAD_2413:
+ ah->ah_radio = AR5K_RF2413;
+ ah->ah_single_chip = true;
+ break;
+ case AR5K_SREV_RAD_5413:
+ ah->ah_radio = AR5K_RF5413;
+ ah->ah_single_chip = true;
+ break;
+ case AR5K_SREV_RAD_2316:
+ ah->ah_radio = AR5K_RF2316;
+ ah->ah_single_chip = true;
+ break;
+ case AR5K_SREV_RAD_2317:
+ ah->ah_radio = AR5K_RF2317;
+ ah->ah_single_chip = true;
+ break;
+ case AR5K_SREV_RAD_5424:
+ if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
+ ah->ah_mac_version == AR5K_SREV_AR2417) {
+ ah->ah_radio = AR5K_RF2425;
+ ah->ah_single_chip = true;
+ } else {
+ ah->ah_radio = AR5K_RF5413;
+ ah->ah_single_chip = true;
+ }
+ break;
+ default:
+ /* Identify radio based on mac/phy srev */
+ if (ah->ah_version == AR5K_AR5210) {
+ ah->ah_radio = AR5K_RF5110;
+ ah->ah_single_chip = false;
+ } else if (ah->ah_version == AR5K_AR5211) {
+ ah->ah_radio = AR5K_RF5111;
+ ah->ah_single_chip = false;
+ ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
+ IEEE80211_BAND_2GHZ);
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
+ ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
+ ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
+ ah->ah_radio = AR5K_RF2425;
+ ah->ah_single_chip = true;
+ ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
+ } else if (srev == AR5K_SREV_AR5213A &&
+ ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
+ ah->ah_radio = AR5K_RF5112;
+ ah->ah_single_chip = false;
+ ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
+ ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
+ ah->ah_radio = AR5K_RF2316;
+ ah->ah_single_chip = true;
+ ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
+ ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
+ ah->ah_radio = AR5K_RF5413;
+ ah->ah_single_chip = true;
+ ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
+ ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
+ ah->ah_radio = AR5K_RF2413;
+ ah->ah_single_chip = true;
+ ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
+ } else {
+ ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
+
+ /* Return on unsupported chips (unsupported eeprom etc) */
+ if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
+ ATH5K_ERR(ah, "Device not yet supported.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * POST
+ */
+ ret = ath5k_hw_post(ah);
+ if (ret)
+ goto err;
+
+ /* Enable pci core retry fix on Hainan (5213A) and later chips */
+ if (srev >= AR5K_SREV_AR5213A)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_RETRY_FIX);
+
+ /*
+ * Get card capabilities, calibration values etc
+ * TODO: EEPROM work
+ */
+ ret = ath5k_eeprom_init(ah);
+ if (ret) {
+ ATH5K_ERR(ah, "unable to init EEPROM\n");
+ goto err;
+ }
+
+ ee = &ah->ah_capabilities.cap_eeprom;
+
+ /*
+ * Write PCI-E power save settings
+ */
+ if ((ah->ah_version == AR5K_AR5212) && pdev && (pci_is_pcie(pdev))) {
+ ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
+ ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
+
+ /* Shut off RX when elecidle is asserted */
+ ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
+ ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
+
+ /* If serdes programming is enabled, increase PCI-E
+ * tx power for systems with long trace from host
+ * to minicard connector. */
+ if (ee->ee_serdes)
+ ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES);
+ else
+ ath5k_hw_reg_write(ah, 0xf6800579, AR5K_PCIE_SERDES);
+
+ /* Shut off PLL and CLKREQ active in L1 */
+ ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES);
+
+ /* Preserve other settings */
+ ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES);
+ ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES);
+ ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES);
+
+ /* Reset SERDES to load new settings */
+ ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
+ usleep_range(1000, 1500);
+ }
+
+ /* Get misc capabilities */
+ ret = ath5k_hw_set_capabilities(ah);
+ if (ret) {
+ ATH5K_ERR(ah, "unable to get device capabilities\n");
+ goto err;
+ }
+
+ /* Crypto settings */
+ common->keymax = (ah->ah_version == AR5K_AR5210 ?
+ AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
+
+ if (srev >= AR5K_SREV_AR5212_V4 &&
+ (ee->ee_version < AR5K_EEPROM_VERSION_5_0 ||
+ !AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
+ common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
+
+ if (srev >= AR5K_SREV_AR2414) {
+ common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
+ AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
+ AR5K_MISC_MODE_COMBINED_MIC);
+ }
+
+ /* MAC address is cleared until add_interface */
+ ath5k_hw_set_lladdr(ah, zero_mac);
+
+ /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
+ memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
+ ath5k_hw_set_bssid(ah);
+ ath5k_hw_set_opmode(ah, ah->opmode);
+
+ ath5k_hw_rfgain_opt_init(ah);
+
+ ath5k_hw_init_nfcal_hist(ah);
+
+ /* turn on HW LEDs */
+ ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
+ * @ah: The &struct ath5k_hw
+ */
+void ath5k_hw_deinit(struct ath5k_hw *ah)
+{
+ __set_bit(ATH_STAT_INVALID, ah->status);
+
+ kfree(ah->ah_rf_banks);
+
+ ath5k_eeprom_detach(ah);
+
+ /* assume interrupts are down */
+}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
new file mode 100644
index 000000000..a6131825c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -0,0 +1,3205 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2006 Devicescape Software, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/hardirq.h>
+#include <linux/if.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/cache.h>
+#include <linux/ethtool.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/nl80211.h>
+
+#include <net/cfg80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <asm/unaligned.h>
+
+#include <net/mac80211.h>
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+#include "ath5k.h"
+#include "../regd.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+bool ath5k_modparam_nohwcrypt;
+module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool modparam_fastchanswitch;
+module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
+MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+
+static bool ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
+
+/* Module info */
+MODULE_AUTHOR("Jiri Slaby");
+MODULE_AUTHOR("Nick Kossifidis");
+MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int ath5k_init(struct ieee80211_hw *hw);
+static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
+ bool skip_pcu);
+
+/* Known SREVs */
+static const struct ath5k_srev_name srev_names[] = {
+#ifdef CONFIG_ATH5K_AHB
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
+ { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
+#else
+ { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
+ { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
+ { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
+ { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
+ { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
+ { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
+ { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
+ { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
+ { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
+ { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
+ { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
+ { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
+ { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
+ { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
+ { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
+ { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
+ { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
+ { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
+#endif
+ { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
+ { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
+ { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
+ { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
+ { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
+ { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
+ { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
+ { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
+ { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
+ { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
+ { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
+ { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
+ { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
+ { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
+ { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
+#ifdef CONFIG_ATH5K_AHB
+ { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
+ { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
+#endif
+ { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
+};
+
+static const struct ieee80211_rate ath5k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH5K_RATE_CODE_1M, },
+ { .bitrate = 20,
+ .hw_value = ATH5K_RATE_CODE_2M,
+ .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH5K_RATE_CODE_5_5M,
+ .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH5K_RATE_CODE_11M,
+ .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 60,
+ .hw_value = ATH5K_RATE_CODE_6M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 90,
+ .hw_value = ATH5K_RATE_CODE_9M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 120,
+ .hw_value = ATH5K_RATE_CODE_12M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 180,
+ .hw_value = ATH5K_RATE_CODE_18M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 240,
+ .hw_value = ATH5K_RATE_CODE_24M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 360,
+ .hw_value = ATH5K_RATE_CODE_36M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 480,
+ .hw_value = ATH5K_RATE_CODE_48M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+ { .bitrate = 540,
+ .hw_value = ATH5K_RATE_CODE_54M,
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
+};
+
+static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
+{
+ u64 tsf = ath5k_hw_get_tsf64(ah);
+
+ if ((tsf & 0x7fff) < rstamp)
+ tsf -= 0x8000;
+
+ return (tsf & ~0x7fff) | rstamp;
+}
+
+const char *
+ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
+{
+ const char *name = "xxxxx";
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
+ if (srev_names[i].sr_type != type)
+ continue;
+
+ if ((val & 0xf0) == srev_names[i].sr_val)
+ name = srev_names[i].sr_name;
+
+ if ((val & 0xff) == srev_names[i].sr_val) {
+ name = srev_names[i].sr_name;
+ break;
+ }
+ }
+
+ return name;
+}
+static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
+ return ath5k_hw_reg_read(ah, reg_offset);
+}
+
+static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
+ ath5k_hw_reg_write(ah, val, reg_offset);
+}
+
+static const struct ath_ops ath5k_common_ops = {
+ .read = ath5k_ioread32,
+ .write = ath5k_iowrite32,
+};
+
+/***********************\
+* Driver Initialization *
+\***********************/
+
+static void ath5k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
+
+ ath_reg_notifier_apply(wiphy, request, regulatory);
+}
+
+/********************\
+* Channel/mode setup *
+\********************/
+
+/*
+ * Returns true for the channel numbers used.
+ */
+#ifdef CONFIG_ATH5K_TEST_CHANNELS
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+{
+ return true;
+}
+
+#else
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+{
+ if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ return true;
+
+ return /* UNII 1,2 */
+ (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+ /* midband */
+ ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
+ /* UNII-3 */
+ ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+ /* 802.11j 5.030-5.080 GHz (20MHz) */
+ (chan == 8 || chan == 12 || chan == 16) ||
+ /* 802.11j 4.9GHz (20MHz) */
+ (chan == 184 || chan == 188 || chan == 192 || chan == 196));
+}
+#endif
+
+static unsigned int
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+ unsigned int mode, unsigned int max)
+{
+ unsigned int count, size, freq, ch;
+ enum ieee80211_band band;
+
+ switch (mode) {
+ case AR5K_MODE_11A:
+ /* 1..220, but 2GHz frequencies are filtered by check_channel */
+ size = 220;
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ case AR5K_MODE_11B:
+ case AR5K_MODE_11G:
+ size = 26;
+ band = IEEE80211_BAND_2GHZ;
+ break;
+ default:
+ ATH5K_WARN(ah, "bad mode, not copying channels\n");
+ return 0;
+ }
+
+ count = 0;
+ for (ch = 1; ch <= size && count < max; ch++) {
+ freq = ieee80211_channel_to_frequency(ch, band);
+
+ if (freq == 0) /* mapping failed - not a standard channel */
+ continue;
+
+ /* Write channel info, needed for ath5k_channel_ok() */
+ channels[count].center_freq = freq;
+ channels[count].band = band;
+ channels[count].hw_value = mode;
+
+ /* Check if channel is supported by the chipset */
+ if (!ath5k_channel_ok(ah, &channels[count]))
+ continue;
+
+ if (!ath5k_is_standard_channel(ch, band))
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+static void
+ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
+{
+ u8 i;
+
+ for (i = 0; i < AR5K_MAX_RATES; i++)
+ ah->rate_idx[b->band][i] = -1;
+
+ for (i = 0; i < b->n_bitrates; i++) {
+ ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
+ if (b->bitrates[i].hw_value_short)
+ ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
+ }
+}
+
+static int
+ath5k_setup_bands(struct ieee80211_hw *hw)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ieee80211_supported_band *sband;
+ int max_c, count_c = 0;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+ max_c = ARRAY_SIZE(ah->channels);
+
+ /* 2GHz band */
+ sband = &ah->sbands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
+
+ if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
+ /* G mode */
+ memcpy(sband->bitrates, &ath5k_rates[0],
+ sizeof(struct ieee80211_rate) * 12);
+ sband->n_bitrates = 12;
+
+ sband->channels = ah->channels;
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
+ AR5K_MODE_11G, max_c);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ count_c = sband->n_channels;
+ max_c -= count_c;
+ } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
+ /* B mode */
+ memcpy(sband->bitrates, &ath5k_rates[0],
+ sizeof(struct ieee80211_rate) * 4);
+ sband->n_bitrates = 4;
+
+ /* 5211 only supports B rates and uses 4bit rate codes
+ * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
+ * fix them up here:
+ */
+ if (ah->ah_version == AR5K_AR5211) {
+ for (i = 0; i < 4; i++) {
+ sband->bitrates[i].hw_value =
+ sband->bitrates[i].hw_value & 0xF;
+ sband->bitrates[i].hw_value_short =
+ sband->bitrates[i].hw_value_short & 0xF;
+ }
+ }
+
+ sband->channels = ah->channels;
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
+ AR5K_MODE_11B, max_c);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ count_c = sband->n_channels;
+ max_c -= count_c;
+ }
+ ath5k_setup_rate_idx(ah, sband);
+
+ /* 5GHz band, A mode */
+ if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
+ sband = &ah->sbands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
+
+ memcpy(sband->bitrates, &ath5k_rates[4],
+ sizeof(struct ieee80211_rate) * 8);
+ sband->n_bitrates = 8;
+
+ sband->channels = &ah->channels[count_c];
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
+ AR5K_MODE_11A, max_c);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ }
+ ath5k_setup_rate_idx(ah, sband);
+
+ ath5k_debug_dump_bands(ah);
+
+ return 0;
+}
+
+/*
+ * Set/change channels. We always reset the chip.
+ * To accomplish this we must first cleanup any pending DMA,
+ * then restart stuff after a la ath5k_init.
+ *
+ * Called with ah->lock.
+ */
+int
+ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
+{
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "channel set, resetting (%u -> %u MHz)\n",
+ ah->curchan->center_freq, chandef->chan->center_freq);
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ ah->ah_bwmode = AR5K_BWMODE_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ ah->ah_bwmode = AR5K_BWMODE_10MHZ;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /*
+ * To switch channels clear any pending DMA operations;
+ * wait long enough for the RX fifo to drain, reset the
+ * hardware at the new frequency, and then re-enable
+ * the relevant bits of the h/w.
+ */
+ return ath5k_reset(ah, chandef->chan, true);
+}
+
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath5k_vif_iter_data *iter_data = data;
+ int i;
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+
+ if (iter_data->hw_macaddr)
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^ mac[i]);
+
+ if (!iter_data->found_active) {
+ iter_data->found_active = true;
+ memcpy(iter_data->active_mac, mac, ETH_ALEN);
+ }
+
+ if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
+ if (ether_addr_equal(iter_data->hw_macaddr, mac))
+ iter_data->need_set_hw_addr = false;
+
+ if (!iter_data->any_assoc) {
+ if (avf->assoc)
+ iter_data->any_assoc = true;
+ }
+
+ /* Calculate combined mode - when APs are active, operate in AP mode.
+ * Otherwise use the mode of the new interface. This can currently
+ * only deal with combinations of APs and STAs. Only one ad-hoc
+ * interfaces is allowed.
+ */
+ if (avf->opmode == NL80211_IFTYPE_AP)
+ iter_data->opmode = NL80211_IFTYPE_AP;
+ else {
+ if (avf->opmode == NL80211_IFTYPE_STATION)
+ iter_data->n_stas++;
+ if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
+ iter_data->opmode = avf->opmode;
+ }
+}
+
+void
+ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ath5k_vif_iter_data iter_data;
+ u32 rfilt;
+
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ iter_data.hw_macaddr = common->macaddr;
+ eth_broadcast_addr(iter_data.mask);
+ iter_data.found_active = false;
+ iter_data.need_set_hw_addr = true;
+ iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
+ iter_data.n_stas = 0;
+
+ if (vif)
+ ath5k_vif_iter(&iter_data, vif->addr, vif);
+
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
+ memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
+
+ ah->opmode = iter_data.opmode;
+ if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
+ /* Nothing active, default to station mode */
+ ah->opmode = NL80211_IFTYPE_STATION;
+
+ ath5k_hw_set_opmode(ah, ah->opmode);
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
+ ah->opmode, ath_opmode_to_string(ah->opmode));
+
+ if (iter_data.need_set_hw_addr && iter_data.found_active)
+ ath5k_hw_set_lladdr(ah, iter_data.active_mac);
+
+ if (ath5k_hw_hasbssidmask(ah))
+ ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
+
+ /* Set up RX Filter */
+ if (iter_data.n_stas > 1) {
+ /* If you have multiple STA interfaces connected to
+ * different APs, ARPs are not received (most of the time?)
+ * Enabling PROMISC appears to fix that problem.
+ */
+ ah->filter_flags |= AR5K_RX_FILTER_PROM;
+ }
+
+ rfilt = ah->filter_flags;
+ ath5k_hw_set_rx_filter(ah, rfilt);
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
+}
+
+static inline int
+ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
+{
+ int rix;
+
+ /* return base rate on errors */
+ if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
+ "hw_rix out of bounds: %x\n", hw_rix))
+ return 0;
+
+ rix = ah->rate_idx[ah->curchan->band][hw_rix];
+ if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
+ rix = 0;
+
+ return rix;
+}
+
+/***************\
+* Buffers setup *
+\***************/
+
+static
+struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct sk_buff *skb;
+
+ /*
+ * Allocate buffer with headroom_needed space for the
+ * fake physical layer header at the start.
+ */
+ skb = ath_rxbuf_alloc(common,
+ common->rx_bufsize,
+ GFP_ATOMIC);
+
+ if (!skb) {
+ ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
+ common->rx_bufsize);
+ return NULL;
+ }
+
+ *skb_addr = dma_map_single(ah->dev,
+ skb->data, common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
+ ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static int
+ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
+{
+ struct sk_buff *skb = bf->skb;
+ struct ath5k_desc *ds;
+ int ret;
+
+ if (!skb) {
+ skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
+ if (!skb)
+ return -ENOMEM;
+ bf->skb = skb;
+ }
+
+ /*
+ * Setup descriptors. For receive we always terminate
+ * the descriptor list with a self-linked entry so we'll
+ * not get overrun under high load (as can happen with a
+ * 5212 when ANI processing enables PHY error frames).
+ *
+ * To ensure the last descriptor is self-linked we create
+ * each descriptor as self-linked and add it to the end. As
+ * each additional descriptor is added the previous self-linked
+ * entry is "fixed" naturally. This should be safe even
+ * if DMA is happening. When processing RX interrupts we
+ * never remove/process the last, self-linked, entry on the
+ * descriptor list. This ensures the hardware always has
+ * someplace to write a new frame.
+ */
+ ds = bf->desc;
+ ds->ds_link = bf->daddr; /* link to self */
+ ds->ds_data = bf->skbaddr;
+ ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret) {
+ ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
+ return ret;
+ }
+
+ if (ah->rxlink != NULL)
+ *ah->rxlink = bf->daddr;
+ ah->rxlink = &ds->ds_link;
+ return 0;
+}
+
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
+static struct ieee80211_rate *
+ath5k_get_rate(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *info,
+ struct ath5k_buf *bf, int idx)
+{
+ /*
+ * convert a ieee80211_tx_rate RC-table entry to
+ * the respective ieee80211_rate struct
+ */
+ if (bf->rates[idx].idx < 0) {
+ return NULL;
+ }
+
+ return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
+}
+
+static u16
+ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
+ const struct ieee80211_tx_info *info,
+ struct ath5k_buf *bf, int idx)
+{
+ struct ieee80211_rate *rate;
+ u16 hw_rate;
+ u8 rc_flags;
+
+ rate = ath5k_get_rate(hw, info, bf, idx);
+ if (!rate)
+ return 0;
+
+ rc_flags = bf->rates[idx].flags;
+ hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
+ rate->hw_value_short : rate->hw_value;
+
+ return hw_rate;
+}
+
+static int
+ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
+ struct ath5k_txq *txq, int padsize,
+ struct ieee80211_tx_control *control)
+{
+ struct ath5k_desc *ds = bf->desc;
+ struct sk_buff *skb = bf->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
+ struct ieee80211_rate *rate;
+ unsigned int mrr_rate[3], mrr_tries[3];
+ int i, ret;
+ u16 hw_rate;
+ u16 cts_rate = 0;
+ u16 duration = 0;
+ u8 rc_flags;
+
+ flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
+
+ /* XXX endianness */
+ bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ah->dev, bf->skbaddr))
+ return -ENOSPC;
+
+ ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
+ ARRAY_SIZE(bf->rates));
+
+ rate = ath5k_get_rate(ah->hw, info, bf, 0);
+
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ flags |= AR5K_TXDESC_NOACK;
+
+ rc_flags = info->control.rates[0].flags;
+
+ hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
+
+ pktlen = skb->len;
+
+ /* FIXME: If we are in g mode and rate is a CCK rate
+ * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
+ * from tx power (value is in dB units already) */
+ if (info->control.hw_key) {
+ keyidx = info->control.hw_key->hw_key_idx;
+ pktlen += info->control.hw_key->icv_len;
+ }
+ if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ flags |= AR5K_TXDESC_RTSENA;
+ cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
+ info->control.vif, pktlen, info));
+ }
+ if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ flags |= AR5K_TXDESC_CTSENA;
+ cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
+ info->control.vif, pktlen, info));
+ }
+
+ ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
+ get_hw_packet_type(skb),
+ (ah->ah_txpower.txp_requested * 2),
+ hw_rate,
+ bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
+ cts_rate, duration);
+ if (ret)
+ goto err_unmap;
+
+ /* Set up MRR descriptor */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+
+ for (i = 0; i < 3; i++) {
+
+ rate = ath5k_get_rate(ah->hw, info, bf, i);
+ if (!rate)
+ break;
+
+ mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
+ mrr_tries[i] = bf->rates[i].count;
+ }
+
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
+ }
+
+ ds->ds_link = 0;
+ ds->ds_data = bf->skbaddr;
+
+ spin_lock_bh(&txq->lock);
+ list_add_tail(&bf->list, &txq->q);
+ txq->txq_len++;
+ if (txq->link == NULL) /* is this first packet? */
+ ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
+ else /* no, so only link it */
+ *txq->link = bf->daddr;
+
+ txq->link = &ds->ds_link;
+ ath5k_hw_start_tx_dma(ah, txq->qnum);
+ mmiowb();
+ spin_unlock_bh(&txq->lock);
+
+ return 0;
+err_unmap:
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*******************\
+* Descriptors setup *
+\*******************/
+
+static int
+ath5k_desc_alloc(struct ath5k_hw *ah)
+{
+ struct ath5k_desc *ds;
+ struct ath5k_buf *bf;
+ dma_addr_t da;
+ unsigned int i;
+ int ret;
+
+ /* allocate descriptors */
+ ah->desc_len = sizeof(struct ath5k_desc) *
+ (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
+
+ ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
+ &ah->desc_daddr, GFP_KERNEL);
+ if (ah->desc == NULL) {
+ ATH5K_ERR(ah, "can't allocate descriptors\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ ds = ah->desc;
+ da = ah->desc_daddr;
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
+ ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
+
+ bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
+ sizeof(struct ath5k_buf), GFP_KERNEL);
+ if (bf == NULL) {
+ ATH5K_ERR(ah, "can't allocate bufptr\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ah->bufptr = bf;
+
+ INIT_LIST_HEAD(&ah->rxbuf);
+ for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
+ bf->desc = ds;
+ bf->daddr = da;
+ list_add_tail(&bf->list, &ah->rxbuf);
+ }
+
+ INIT_LIST_HEAD(&ah->txbuf);
+ ah->txbuf_len = ATH_TXBUF;
+ for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
+ bf->desc = ds;
+ bf->daddr = da;
+ list_add_tail(&bf->list, &ah->txbuf);
+ }
+
+ /* beacon buffers */
+ INIT_LIST_HEAD(&ah->bcbuf);
+ for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
+ bf->desc = ds;
+ bf->daddr = da;
+ list_add_tail(&bf->list, &ah->bcbuf);
+ }
+
+ return 0;
+err_free:
+ dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
+err:
+ ah->desc = NULL;
+ return ret;
+}
+
+void
+ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
+{
+ BUG_ON(!bf);
+ if (!bf->skb)
+ return;
+ dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
+ DMA_TO_DEVICE);
+ ieee80211_free_txskb(ah->hw, bf->skb);
+ bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
+}
+
+void
+ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ BUG_ON(!bf);
+ if (!bf->skb)
+ return;
+ dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(bf->skb);
+ bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
+}
+
+static void
+ath5k_desc_free(struct ath5k_hw *ah)
+{
+ struct ath5k_buf *bf;
+
+ list_for_each_entry(bf, &ah->txbuf, list)
+ ath5k_txbuf_free_skb(ah, bf);
+ list_for_each_entry(bf, &ah->rxbuf, list)
+ ath5k_rxbuf_free_skb(ah, bf);
+ list_for_each_entry(bf, &ah->bcbuf, list)
+ ath5k_txbuf_free_skb(ah, bf);
+
+ /* Free memory associated with all descriptors */
+ dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
+ ah->desc = NULL;
+ ah->desc_daddr = 0;
+
+ kfree(ah->bufptr);
+ ah->bufptr = NULL;
+}
+
+
+/**************\
+* Queues setup *
+\**************/
+
+static struct ath5k_txq *
+ath5k_txq_setup(struct ath5k_hw *ah,
+ int qtype, int subtype)
+{
+ struct ath5k_txq *txq;
+ struct ath5k_txq_info qi = {
+ .tqi_subtype = subtype,
+ /* XXX: default values not correct for B and XR channels,
+ * but who cares? */
+ .tqi_aifs = AR5K_TUNE_AIFS,
+ .tqi_cw_min = AR5K_TUNE_CWMIN,
+ .tqi_cw_max = AR5K_TUNE_CWMAX
+ };
+ int qnum;
+
+ /*
+ * Enable interrupts only for EOL and DESC conditions.
+ * We mark tx descriptors to receive a DESC interrupt
+ * when a tx queue gets deep; otherwise we wait for the
+ * EOL to reap descriptors. Note that this is done to
+ * reduce interrupt load and this only defers reaping
+ * descriptors, never transmitting frames. Aside from
+ * reducing interrupts this also permits more concurrency.
+ * The only potential downside is if the tx queue backs
+ * up in which case the top half of the kernel may backup
+ * due to a lack of tx descriptors.
+ */
+ qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
+ AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
+ qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
+ if (qnum < 0) {
+ /*
+ * NB: don't print a message, this happens
+ * normally on parts with too few tx queues
+ */
+ return ERR_PTR(qnum);
+ }
+ txq = &ah->txqs[qnum];
+ if (!txq->setup) {
+ txq->qnum = qnum;
+ txq->link = NULL;
+ INIT_LIST_HEAD(&txq->q);
+ spin_lock_init(&txq->lock);
+ txq->setup = true;
+ txq->txq_len = 0;
+ txq->txq_max = ATH5K_TXQ_LEN_MAX;
+ txq->txq_poll_mark = false;
+ txq->txq_stuck = 0;
+ }
+ return &ah->txqs[qnum];
+}
+
+static int
+ath5k_beaconq_setup(struct ath5k_hw *ah)
+{
+ struct ath5k_txq_info qi = {
+ /* XXX: default values not correct for B and XR channels,
+ * but who cares? */
+ .tqi_aifs = AR5K_TUNE_AIFS,
+ .tqi_cw_min = AR5K_TUNE_CWMIN,
+ .tqi_cw_max = AR5K_TUNE_CWMAX,
+ /* NB: for dynamic turbo, don't enable any other interrupts */
+ .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
+ };
+
+ return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
+}
+
+static int
+ath5k_beaconq_config(struct ath5k_hw *ah)
+{
+ struct ath5k_txq_info qi;
+ int ret;
+
+ ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
+ if (ret)
+ goto err;
+
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ /*
+ * Always burst out beacon and CAB traffic
+ * (aifs = cwmin = cwmax = 0)
+ */
+ qi.tqi_aifs = 0;
+ qi.tqi_cw_min = 0;
+ qi.tqi_cw_max = 0;
+ } else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ /*
+ * Adhoc mode; backoff between 0 and (2 * cw_min).
+ */
+ qi.tqi_aifs = 0;
+ qi.tqi_cw_min = 0;
+ qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
+ }
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
+ qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
+
+ ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
+ if (ret) {
+ ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
+ "hardware queue!\n", __func__);
+ goto err;
+ }
+ ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
+
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ qi.tqi_ready_time = (ah->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
+}
+
+/**
+ * ath5k_drain_tx_buffs - Empty tx buffers
+ *
+ * @ah The &struct ath5k_hw
+ *
+ * Empty tx buffers from all queues in preparation
+ * of a reset or during shutdown.
+ *
+ * NB: this assumes output has been stopped and
+ * we do not need to block ath5k_tx_tasklet
+ */
+static void
+ath5k_drain_tx_buffs(struct ath5k_hw *ah)
+{
+ struct ath5k_txq *txq;
+ struct ath5k_buf *bf, *bf0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ if (ah->txqs[i].setup) {
+ txq = &ah->txqs[i];
+ spin_lock_bh(&txq->lock);
+ list_for_each_entry_safe(bf, bf0, &txq->q, list) {
+ ath5k_debug_printtxbuf(ah, bf);
+
+ ath5k_txbuf_free_skb(ah, bf);
+
+ spin_lock(&ah->txbuflock);
+ list_move_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
+ txq->txq_len--;
+ spin_unlock(&ah->txbuflock);
+ }
+ txq->link = NULL;
+ txq->txq_poll_mark = false;
+ spin_unlock_bh(&txq->lock);
+ }
+ }
+}
+
+static void
+ath5k_txq_release(struct ath5k_hw *ah)
+{
+ struct ath5k_txq *txq = ah->txqs;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
+ if (txq->setup) {
+ ath5k_hw_release_tx_queue(ah, txq->qnum);
+ txq->setup = false;
+ }
+}
+
+
+/*************\
+* RX Handling *
+\*************/
+
+/*
+ * Enable the receive h/w following a reset.
+ */
+static int
+ath5k_rx_start(struct ath5k_hw *ah)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ath5k_buf *bf;
+ int ret;
+
+ common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ spin_lock_bh(&ah->rxbuflock);
+ ah->rxlink = NULL;
+ list_for_each_entry(bf, &ah->rxbuf, list) {
+ ret = ath5k_rxbuf_setup(ah, bf);
+ if (ret != 0) {
+ spin_unlock_bh(&ah->rxbuflock);
+ goto err;
+ }
+ }
+ bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
+ ath5k_hw_set_rxdp(ah, bf->daddr);
+ spin_unlock_bh(&ah->rxbuflock);
+
+ ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
+ ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
+ ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
+
+ return 0;
+err:
+ return ret;
+}
+
+/*
+ * Disable the receive logic on PCU (DRU)
+ * In preparation for a shutdown.
+ *
+ * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
+ * does.
+ */
+static void
+ath5k_rx_stop(struct ath5k_hw *ah)
+{
+
+ ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
+ ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
+
+ ath5k_debug_printrxbuffs(ah);
+}
+
+static unsigned int
+ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int keyix, hlen;
+
+ if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
+ rs->rs_keyix != AR5K_RXKEYIX_INVALID)
+ return RX_FLAG_DECRYPTED;
+
+ /* Apparently when a default key is used to decrypt the packet
+ the hw does not set the index used to decrypt. In such cases
+ get the index from the packet. */
+ hlen = ieee80211_hdrlen(hdr->frame_control);
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
+ skb->len >= hlen + 4) {
+ keyix = skb->data[hlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ return RX_FLAG_DECRYPTED;
+ }
+
+ return 0;
+}
+
+
+static void
+ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
+ struct ieee80211_rx_status *rxs)
+{
+ u64 tsf, bc_tstamp;
+ u32 hw_tu;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+
+ if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
+ /*
+ * Received an IBSS beacon with the same BSSID. Hardware *must*
+ * have updated the local TSF. We have to work around various
+ * hardware bugs, though...
+ */
+ tsf = ath5k_hw_get_tsf64(ah);
+ bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
+ hw_tu = TSF_TO_TU(tsf);
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
+ (unsigned long long)bc_tstamp,
+ (unsigned long long)rxs->mactime,
+ (unsigned long long)(rxs->mactime - bc_tstamp),
+ (unsigned long long)tsf);
+
+ /*
+ * Sometimes the HW will give us a wrong tstamp in the rx
+ * status, causing the timestamp extension to go wrong.
+ * (This seems to happen especially with beacon frames bigger
+ * than 78 byte (incl. FCS))
+ * But we know that the receive timestamp must be later than the
+ * timestamp of the beacon since HW must have synced to that.
+ *
+ * NOTE: here we assume mactime to be after the frame was
+ * received, not like mac80211 which defines it at the start.
+ */
+ if (bc_tstamp > rxs->mactime) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "fixing mactime from %llx to %llx\n",
+ (unsigned long long)rxs->mactime,
+ (unsigned long long)tsf);
+ rxs->mactime = tsf;
+ }
+
+ /*
+ * Local TSF might have moved higher than our beacon timers,
+ * in that case we have to update them to continue sending
+ * beacons. This also takes care of synchronizing beacon sending
+ * times with other stations.
+ */
+ if (hw_tu >= ah->nexttbtt)
+ ath5k_beacon_update_timers(ah, bc_tstamp);
+
+ /* Check if the beacon timers are still correct, because a TSF
+ * update might have created a window between them - for a
+ * longer description see the comment of this function: */
+ if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
+ ath5k_beacon_update_timers(ah, bc_tstamp);
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "fixed beacon timers after beacon receive\n");
+ }
+ }
+}
+
+/*
+ * Compute padding position. skb must contain an IEEE 802.11 frame
+ */
+static int ath5k_common_padpos(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
+
+ if (ieee80211_has_a4(frame_control))
+ padpos += ETH_ALEN;
+
+ if (ieee80211_is_data_qos(frame_control))
+ padpos += IEEE80211_QOS_CTL_LEN;
+
+ return padpos;
+}
+
+/*
+ * This function expects an 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enough header room.
+ */
+static int ath5k_add_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len > padpos) {
+
+ if (skb_headroom(skb) < padsize)
+ return -1;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ return padsize;
+ }
+
+ return 0;
+}
+
+/*
+ * The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = 4 - (hdrlen & 3); however, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. We must not try to
+ * remove padding from short control frames that do not have a
+ * payload.
+ *
+ * This function expects an 802.11 frame and returns the number of
+ * bytes removed.
+ */
+static int ath5k_remove_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len >= padpos + padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
+
+ return 0;
+}
+
+static void
+ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
+{
+ struct ieee80211_rx_status *rxs;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ ath5k_remove_padding(skb);
+
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ rxs->flag = 0;
+ if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+ if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+
+ /*
+ * always extend the mac timestamp, since this information is
+ * also needed for proper IBSS merging.
+ *
+ * XXX: it might be too late to do it here, since rs_tstamp is
+ * 15bit only. that means TSF extension has to be done within
+ * 32768usec (about 32ms). it might be necessary to move this to
+ * the interrupt handler, like it is done in madwifi.
+ */
+ rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
+ rxs->flag |= RX_FLAG_MACTIME_END;
+
+ rxs->freq = ah->curchan->center_freq;
+ rxs->band = ah->curchan->band;
+
+ rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
+
+ rxs->antenna = rs->rs_antenna;
+
+ if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
+ ah->stats.antenna_rx[rs->rs_antenna]++;
+ else
+ ah->stats.antenna_rx[0]++; /* invalid */
+
+ rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
+ rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rxs->flag |= RX_FLAG_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rxs->flag |= RX_FLAG_10MHZ;
+ break;
+ default:
+ break;
+ }
+
+ if (rs->rs_rate ==
+ ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
+ rxs->flag |= RX_FLAG_SHORTPRE;
+
+ trace_ath5k_rx(ah, skb);
+
+ if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
+ ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+
+ /* check beacons in IBSS mode */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(ah, skb, rxs);
+ }
+
+ ieee80211_rx(ah->hw, skb);
+}
+
+/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
+ *
+ * Check if we want to further process this frame or not. Also update
+ * statistics. Return true if we want this frame, false if not.
+ */
+static bool
+ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
+{
+ ah->stats.rx_all_count++;
+ ah->stats.rx_bytes_count += rs->rs_datalen;
+
+ if (unlikely(rs->rs_status)) {
+ unsigned int filters;
+
+ if (rs->rs_status & AR5K_RXERR_CRC)
+ ah->stats.rxerr_crc++;
+ if (rs->rs_status & AR5K_RXERR_FIFO)
+ ah->stats.rxerr_fifo++;
+ if (rs->rs_status & AR5K_RXERR_PHY) {
+ ah->stats.rxerr_phy++;
+ if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
+ ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
+
+ /*
+ * Treat packets that underwent a CCK or OFDM reset as having a bad CRC.
+ * These restarts happen when the radio resynchronizes to a stronger frame
+ * while receiving a weaker frame. Here we receive the prefix of the weak
+ * frame. Since these are incomplete packets, mark their CRC as invalid.
+ */
+ if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
+ rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
+ rs->rs_status |= AR5K_RXERR_CRC;
+ rs->rs_status &= ~AR5K_RXERR_PHY;
+ } else {
+ return false;
+ }
+ }
+ if (rs->rs_status & AR5K_RXERR_DECRYPT) {
+ /*
+ * Decrypt error. If the error occurred
+ * because there was no hardware key, then
+ * let the frame through so the upper layers
+ * can process it. This is necessary for 5210
+ * parts which have no way to setup a ``clear''
+ * key cache entry.
+ *
+ * XXX do key cache faulting
+ */
+ ah->stats.rxerr_decrypt++;
+ if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
+ !(rs->rs_status & AR5K_RXERR_CRC))
+ return true;
+ }
+ if (rs->rs_status & AR5K_RXERR_MIC) {
+ ah->stats.rxerr_mic++;
+ return true;
+ }
+
+ /*
+ * Reject any frames with non-crypto errors, and take into account the
+ * current FIF_* filters.
+ */
+ filters = AR5K_RXERR_DECRYPT;
+ if (ah->fif_filter_flags & FIF_FCSFAIL)
+ filters |= AR5K_RXERR_CRC;
+
+ if (rs->rs_status & ~filters)
+ return false;
+ }
+
+ if (unlikely(rs->rs_more)) {
+ ah->stats.rxerr_jumbo++;
+ return false;
+ }
+ return true;
+}
+
+static void
+ath5k_set_current_imask(struct ath5k_hw *ah)
+{
+ enum ath5k_int imask;
+ unsigned long flags;
+
+ if (test_bit(ATH_STAT_RESET, ah->status))
+ return;
+
+ spin_lock_irqsave(&ah->irqlock, flags);
+ imask = ah->imask;
+ if (ah->rx_pending)
+ imask &= ~AR5K_INT_RX_ALL;
+ if (ah->tx_pending)
+ imask &= ~AR5K_INT_TX_ALL;
+ ath5k_hw_set_imr(ah, imask);
+ spin_unlock_irqrestore(&ah->irqlock, flags);
+}
+
+static void
+ath5k_tasklet_rx(unsigned long data)
+{
+ struct ath5k_rx_status rs = {};
+ struct sk_buff *skb, *next_skb;
+ dma_addr_t next_skb_addr;
+ struct ath5k_hw *ah = (void *)data;
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ath5k_buf *bf;
+ struct ath5k_desc *ds;
+ int ret;
+
+ spin_lock(&ah->rxbuflock);
+ if (list_empty(&ah->rxbuf)) {
+ ATH5K_WARN(ah, "empty rx buf pool\n");
+ goto unlock;
+ }
+ do {
+ bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
+ BUG_ON(bf->skb == NULL);
+ skb = bf->skb;
+ ds = bf->desc;
+
+ /* bail if HW is still using self-linked descriptor */
+ if (ath5k_hw_get_rxdp(ah) == bf->daddr)
+ break;
+
+ ret = ah->ah_proc_rx_desc(ah, ds, &rs);
+ if (unlikely(ret == -EINPROGRESS))
+ break;
+ else if (unlikely(ret)) {
+ ATH5K_ERR(ah, "error in processing rx descriptor\n");
+ ah->stats.rxerr_proc++;
+ break;
+ }
+
+ if (ath5k_receive_frame_ok(ah, &rs)) {
+ next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
+
+ /*
+ * If we can't replace bf->skb with a new skb under
+ * memory pressure, just skip this packet
+ */
+ if (!next_skb)
+ goto next;
+
+ dma_unmap_single(ah->dev, bf->skbaddr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, rs.rs_datalen);
+
+ ath5k_receive_frame(ah, skb, &rs);
+
+ bf->skb = next_skb;
+ bf->skbaddr = next_skb_addr;
+ }
+next:
+ list_move_tail(&bf->list, &ah->rxbuf);
+ } while (ath5k_rxbuf_setup(ah, bf) == 0);
+unlock:
+ spin_unlock(&ah->rxbuflock);
+ ah->rx_pending = false;
+ ath5k_set_current_imask(ah);
+}
+
+
+/*************\
+* TX Handling *
+\*************/
+
+void
+ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq, struct ieee80211_tx_control *control)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ath5k_buf *bf;
+ unsigned long flags;
+ int padsize;
+
+ trace_ath5k_tx(ah, skb, txq);
+
+ /*
+ * The hardware expects the header padded to 4 byte boundaries.
+ * If this is not the case, we add the padding after the header.
+ */
+ padsize = ath5k_add_padding(skb);
+ if (padsize < 0) {
+ ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
+ " headroom to pad");
+ goto drop_packet;
+ }
+
+ if (txq->txq_len >= txq->txq_max &&
+ txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
+ ieee80211_stop_queue(hw, txq->qnum);
+
+ spin_lock_irqsave(&ah->txbuflock, flags);
+ if (list_empty(&ah->txbuf)) {
+ ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
+ ieee80211_stop_queues(hw);
+ goto drop_packet;
+ }
+ bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
+ list_del(&bf->list);
+ ah->txbuf_len--;
+ if (list_empty(&ah->txbuf))
+ ieee80211_stop_queues(hw);
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
+
+ bf->skb = skb;
+
+ if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
+ bf->skb = NULL;
+ spin_lock_irqsave(&ah->txbuflock, flags);
+ list_add_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
+ goto drop_packet;
+ }
+ return;
+
+drop_packet:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static void
+ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
+ struct ath5k_txq *txq, struct ath5k_tx_status *ts,
+ struct ath5k_buf *bf)
+{
+ struct ieee80211_tx_info *info;
+ u8 tries[3];
+ int i;
+ int size = 0;
+
+ ah->stats.tx_all_count++;
+ ah->stats.tx_bytes_count += skb->len;
+ info = IEEE80211_SKB_CB(skb);
+
+ size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+ memcpy(info->status.rates, bf->rates, size);
+
+ tries[0] = info->status.rates[0].count;
+ tries[1] = info->status.rates[1].count;
+ tries[2] = info->status.rates[2].count;
+
+ ieee80211_tx_info_clear_status(info);
+
+ for (i = 0; i < ts->ts_final_idx; i++) {
+ struct ieee80211_tx_rate *r =
+ &info->status.rates[i];
+
+ r->count = tries[i];
+ }
+
+ info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
+ info->status.rates[ts->ts_final_idx + 1].idx = -1;
+
+ if (unlikely(ts->ts_status)) {
+ ah->stats.ack_fail++;
+ if (ts->ts_status & AR5K_TXERR_FILT) {
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ ah->stats.txerr_filt++;
+ }
+ if (ts->ts_status & AR5K_TXERR_XRETRY)
+ ah->stats.txerr_retry++;
+ if (ts->ts_status & AR5K_TXERR_FIFO)
+ ah->stats.txerr_fifo++;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ts_rssi;
+
+ /* count the successful attempt as well */
+ info->status.rates[ts->ts_final_idx].count++;
+ }
+
+ /*
+ * Remove MAC header padding before giving the frame
+ * back to mac80211.
+ */
+ ath5k_remove_padding(skb);
+
+ if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
+ ah->stats.antenna_tx[ts->ts_antenna]++;
+ else
+ ah->stats.antenna_tx[0]++; /* invalid */
+
+ trace_ath5k_tx_complete(ah, skb, txq, ts);
+ ieee80211_tx_status(ah->hw, skb);
+}
+
+static void
+ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
+{
+ struct ath5k_tx_status ts = {};
+ struct ath5k_buf *bf, *bf0;
+ struct ath5k_desc *ds;
+ struct sk_buff *skb;
+ int ret;
+
+ spin_lock(&txq->lock);
+ list_for_each_entry_safe(bf, bf0, &txq->q, list) {
+
+ txq->txq_poll_mark = false;
+
+ /* skb might already have been processed last time. */
+ if (bf->skb != NULL) {
+ ds = bf->desc;
+
+ ret = ah->ah_proc_tx_desc(ah, ds, &ts);
+ if (unlikely(ret == -EINPROGRESS))
+ break;
+ else if (unlikely(ret)) {
+ ATH5K_ERR(ah,
+ "error %d while processing "
+ "queue %u\n", ret, txq->qnum);
+ break;
+ }
+
+ skb = bf->skb;
+ bf->skb = NULL;
+
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
+ DMA_TO_DEVICE);
+ ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
+ }
+
+ /*
+ * It's possible that the hardware can say the buffer is
+ * completed when it hasn't yet loaded the ds_link from
+ * host memory and moved on.
+ * Always keep the last descriptor to avoid HW races...
+ */
+ if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
+ spin_lock(&ah->txbuflock);
+ list_move_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
+ txq->txq_len--;
+ spin_unlock(&ah->txbuflock);
+ }
+ }
+ spin_unlock(&txq->lock);
+ if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
+ ieee80211_wake_queue(ah->hw, txq->qnum);
+}
+
+static void
+ath5k_tasklet_tx(unsigned long data)
+{
+ int i;
+ struct ath5k_hw *ah = (void *)data;
+
+ for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
+ if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
+ ath5k_tx_processq(ah, &ah->txqs[i]);
+
+ ah->tx_pending = false;
+ ath5k_set_current_imask(ah);
+}
+
+
+/*****************\
+* Beacon handling *
+\*****************/
+
+/*
+ * Setup the beacon frame for transmit.
+ */
+static int
+ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
+{
+ struct sk_buff *skb = bf->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath5k_desc *ds;
+ int ret = 0;
+ u8 antenna;
+ u32 flags;
+ const int padsize = 0;
+
+ bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
+ "skbaddr %llx\n", skb, skb->data, skb->len,
+ (unsigned long long)bf->skbaddr);
+
+ if (dma_mapping_error(ah->dev, bf->skbaddr)) {
+ ATH5K_ERR(ah, "beacon DMA mapping failed\n");
+ dev_kfree_skb_any(skb);
+ bf->skb = NULL;
+ return -EIO;
+ }
+
+ ds = bf->desc;
+ antenna = ah->ah_tx_ant;
+
+ flags = AR5K_TXDESC_NOACK;
+ if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
+ ds->ds_link = bf->daddr; /* self-linked */
+ flags |= AR5K_TXDESC_VEOL;
+ } else
+ ds->ds_link = 0;
+
+ /*
+ * If we use multiple antennas on AP and use
+ * the Sectored AP scenario, switch antenna every
+ * 4 beacons to make sure everybody hears our AP.
+ * When a client tries to associate, hw will keep
+ * track of the tx antenna to be used for this client
+ * automatically, based on ACKed packets.
+ *
+ * Note: AP still listens and transmits RTS on the
+ * default antenna which is supposed to be an omni.
+ *
+ * Note2: On sectored scenarios it's possible to have
+ * multiple antennas (1 omni -- the default -- and 14
+ * sectors), so if we choose to actually support this
+ * mode, we need to allow the user to set how many antennas
+ * we have and tweak the code below to send beacons
+ * on all of them.
+ */
+ if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
+ antenna = ah->bsent & 4 ? 2 : 1;
+
+
+ /* FIXME: If we are in g mode and rate is a CCK rate
+ * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
+ * from tx power (value is in dB units already) */
+ ds->ds_data = bf->skbaddr;
+ ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
+ AR5K_PKT_TYPE_BEACON,
+ (ah->ah_txpower.txp_requested * 2),
+ ieee80211_get_tx_rate(ah->hw, info)->hw_value,
+ 1, AR5K_TXKEYIX_INVALID,
+ antenna, flags, 0, 0);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+err_unmap:
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*
+ * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
+ * this is called only once at config_bss time, for AP we do it every
+ * SWBA interrupt so that the TIM will reflect buffered frames.
+ *
+ * Called with the beacon lock.
+ */
+int
+ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int ret;
+ struct ath5k_hw *ah = hw->priv;
+ struct ath5k_vif *avf;
+ struct sk_buff *skb;
+
+ if (WARN_ON(!vif)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ avf = (void *)vif->drv_priv;
+ ath5k_txbuf_free_skb(ah, avf->bbuf);
+ avf->bbuf->skb = skb;
+ ret = ath5k_beacon_setup(ah, avf->bbuf);
+out:
+ return ret;
+}
+
+/*
+ * Transmit a beacon frame at SWBA. Dynamic updates to the
+ * frame contents are done as needed and the slot time is
+ * also adjusted based on current state.
+ *
+ * This is called from software irq context (beacontq tasklets)
+ * or user context from ath5k_beacon_config.
+ */
+static void
+ath5k_beacon_send(struct ath5k_hw *ah)
+{
+ struct ieee80211_vif *vif;
+ struct ath5k_vif *avf;
+ struct ath5k_buf *bf;
+ struct sk_buff *skb;
+ int err;
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
+
+ /*
+ * Check if the previous beacon has gone out. If
+ * not, don't don't try to post another: skip this
+ * period and wait for the next. Missed beacons
+ * indicate a problem and should not occur. If we
+ * miss too many consecutive beacons reset the device.
+ */
+ if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
+ ah->bmisscount++;
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "missed %u consecutive beacons\n", ah->bmisscount);
+ if (ah->bmisscount > 10) { /* NB: 10 is a guess */
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "stuck beacon time (%u missed)\n",
+ ah->bmisscount);
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "stuck beacon, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+ return;
+ }
+ if (unlikely(ah->bmisscount != 0)) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "resume beacon xmit after %u misses\n",
+ ah->bmisscount);
+ ah->bmisscount = 0;
+ }
+
+ if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
+ ah->num_mesh_vifs > 1) ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ u64 tsf = ath5k_hw_get_tsf64(ah);
+ u32 tsftu = TSF_TO_TU(tsf);
+ int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
+ vif = ah->bslot[(slot + 1) % ATH_BCBUF];
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "tsf %llx tsftu %x intval %u slot %u vif %p\n",
+ (unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
+ } else /* only one interface */
+ vif = ah->bslot[0];
+
+ if (!vif)
+ return;
+
+ avf = (void *)vif->drv_priv;
+ bf = avf->bbuf;
+
+ /*
+ * Stop any current dma and put the new frame on the queue.
+ * This should never fail since we check above that no frames
+ * are still pending on the queue.
+ */
+ if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
+ ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
+ /* NB: hw still stops DMA, so proceed */
+ }
+
+ /* refresh the beacon for AP or MESH mode */
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ err = ath5k_beacon_update(ah->hw, vif);
+ if (err)
+ return;
+ }
+
+ if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
+ ah->opmode == NL80211_IFTYPE_MONITOR)) {
+ ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
+ return;
+ }
+
+ trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
+
+ ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
+ ath5k_hw_start_tx_dma(ah, ah->bhalq);
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
+ ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
+
+ skb = ieee80211_get_buffered_bc(ah->hw, vif);
+ while (skb) {
+ ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
+
+ if (ah->cabq->txq_len >= ah->cabq->txq_max)
+ break;
+
+ skb = ieee80211_get_buffered_bc(ah->hw, vif);
+ }
+
+ ah->bsent++;
+}
+
+/**
+ * ath5k_beacon_update_timers - update beacon timers
+ *
+ * @ah: struct ath5k_hw pointer we are operating on
+ * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
+ * beacon timer update based on the current HW TSF.
+ *
+ * Calculate the next target beacon transmit time (TBTT) based on the timestamp
+ * of a received beacon or the current local hardware TSF and write it to the
+ * beacon timer registers.
+ *
+ * This is called in a variety of situations, e.g. when a beacon is received,
+ * when a TSF update has been detected, but also when an new IBSS is created or
+ * when we otherwise know we have to update the timers, but we keep it in this
+ * function to have it all together in one place.
+ */
+void
+ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
+{
+ u32 nexttbtt, intval, hw_tu, bc_tu;
+ u64 hw_tsf;
+
+ intval = ah->bintval & AR5K_BEACON_PERIOD;
+ if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
+ + ah->num_mesh_vifs > 1) {
+ intval /= ATH_BCBUF; /* staggered multi-bss beacons */
+ if (intval < 15)
+ ATH5K_WARN(ah, "intval %u is too low, min 15\n",
+ intval);
+ }
+ if (WARN_ON(!intval))
+ return;
+
+ /* beacon TSF converted to TU */
+ bc_tu = TSF_TO_TU(bc_tsf);
+
+ /* current TSF converted to TU */
+ hw_tsf = ath5k_hw_get_tsf64(ah);
+ hw_tu = TSF_TO_TU(hw_tsf);
+
+#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
+ /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
+ * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
+ * configuration we need to make sure it is bigger than that. */
+
+ if (bc_tsf == -1) {
+ /*
+ * no beacons received, called internally.
+ * just need to refresh timers based on HW TSF.
+ */
+ nexttbtt = roundup(hw_tu + FUDGE, intval);
+ } else if (bc_tsf == 0) {
+ /*
+ * no beacon received, probably called by ath5k_reset_tsf().
+ * reset TSF to start with 0.
+ */
+ nexttbtt = intval;
+ intval |= AR5K_BEACON_RESET_TSF;
+ } else if (bc_tsf > hw_tsf) {
+ /*
+ * beacon received, SW merge happened but HW TSF not yet updated.
+ * not possible to reconfigure timers yet, but next time we
+ * receive a beacon with the same BSSID, the hardware will
+ * automatically update the TSF and then we need to reconfigure
+ * the timers.
+ */
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "need to wait for HW TSF sync\n");
+ return;
+ } else {
+ /*
+ * most important case for beacon synchronization between STA.
+ *
+ * beacon received and HW TSF has been already updated by HW.
+ * update next TBTT based on the TSF of the beacon, but make
+ * sure it is ahead of our local TSF timer.
+ */
+ nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
+ }
+#undef FUDGE
+
+ ah->nexttbtt = nexttbtt;
+
+ intval |= AR5K_BEACON_ENA;
+ ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
+
+ /*
+ * debugging output last in order to preserve the time critical aspect
+ * of this function
+ */
+ if (bc_tsf == -1)
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "reconfigured timers based on HW TSF\n");
+ else if (bc_tsf == 0)
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "reset HW TSF and timers\n");
+ else
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "updated timers based on beacon TSF\n");
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
+ "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
+ (unsigned long long) bc_tsf,
+ (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
+ intval & AR5K_BEACON_PERIOD,
+ intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
+ intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
+}
+
+/**
+ * ath5k_beacon_config - Configure the beacon queues and interrupts
+ *
+ * @ah: struct ath5k_hw pointer we are operating on
+ *
+ * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
+ * interrupts to detect TSF updates only.
+ */
+void
+ath5k_beacon_config(struct ath5k_hw *ah)
+{
+ spin_lock_bh(&ah->block);
+ ah->bmisscount = 0;
+ ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
+
+ if (ah->enable_beacon) {
+ /*
+ * In IBSS mode we use a self-linked tx descriptor and let the
+ * hardware send the beacons automatically. We have to load it
+ * only once here.
+ * We use the SWBA interrupt only to keep track of the beacon
+ * timers in order to detect automatic TSF updates.
+ */
+ ath5k_beaconq_config(ah);
+
+ ah->imask |= AR5K_INT_SWBA;
+
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ if (ath5k_hw_hasveol(ah))
+ ath5k_beacon_send(ah);
+ } else
+ ath5k_beacon_update_timers(ah, -1);
+ } else {
+ ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
+ }
+
+ ath5k_hw_set_imr(ah, ah->imask);
+ mmiowb();
+ spin_unlock_bh(&ah->block);
+}
+
+static void ath5k_tasklet_beacon(unsigned long data)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) data;
+
+ /*
+ * Software beacon alert--time to send a beacon.
+ *
+ * In IBSS mode we use this interrupt just to
+ * keep track of the next TBTT (target beacon
+ * transmission time) in order to detect whether
+ * automatic TSF updates happened.
+ */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ /* XXX: only if VEOL supported */
+ u64 tsf = ath5k_hw_get_tsf64(ah);
+ ah->nexttbtt += ah->bintval;
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "SWBA nexttbtt: %x hw_tu: %x "
+ "TSF: %llx\n",
+ ah->nexttbtt,
+ TSF_TO_TU(tsf),
+ (unsigned long long) tsf);
+ } else {
+ spin_lock(&ah->block);
+ ath5k_beacon_send(ah);
+ spin_unlock(&ah->block);
+ }
+}
+
+
+/********************\
+* Interrupt handling *
+\********************/
+
+static void
+ath5k_intr_calibration_poll(struct ath5k_hw *ah)
+{
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run ANI only when calibration is not active */
+
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ tasklet_schedule(&ah->ani_tasklet);
+
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run calibration only when another calibration
+ * is not running.
+ *
+ * Note: This is for both full/short calibration,
+ * if it's time for a full one, ath5k_calibrate_work will deal
+ * with it. */
+
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+ ieee80211_queue_work(ah->hw, &ah->calib_work);
+ }
+ /* we could use SWI to generate enough interrupts to meet our
+ * calibration interval requirements, if necessary:
+ * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
+}
+
+static void
+ath5k_schedule_rx(struct ath5k_hw *ah)
+{
+ ah->rx_pending = true;
+ tasklet_schedule(&ah->rxtq);
+}
+
+static void
+ath5k_schedule_tx(struct ath5k_hw *ah)
+{
+ ah->tx_pending = true;
+ tasklet_schedule(&ah->txtq);
+}
+
+static irqreturn_t
+ath5k_intr(int irq, void *dev_id)
+{
+ struct ath5k_hw *ah = dev_id;
+ enum ath5k_int status;
+ unsigned int counter = 1000;
+
+
+ /*
+ * If hw is not ready (or detached) and we get an
+ * interrupt, or if we have no interrupts pending
+ * (that means it's not for us) skip it.
+ *
+ * NOTE: Group 0/1 PCI interface registers are not
+ * supported on WiSOCs, so we can't check for pending
+ * interrupts (ISR belongs to another register group
+ * so we are ok).
+ */
+ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
+ return IRQ_NONE;
+
+ /** Main loop **/
+ do {
+ ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
+ status, ah->imask);
+
+ /*
+ * Fatal hw error -> Log and reset
+ *
+ * Fatal errors are unrecoverable so we have to
+ * reset the card. These errors include bus and
+ * dma errors.
+ */
+ if (unlikely(status & AR5K_INT_FATAL)) {
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "fatal int, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+ /*
+ * RX Overrun -> Count and reset if needed
+ *
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ */
+ } else if (unlikely(status & AR5K_INT_RXORN)) {
+
+ /*
+ * Older chipsets need a reset to come out of this
+ * condition, but we treat it as RX for newer chips.
+ * We don't know exactly which versions need a reset
+ * this guess is copied from the HAL.
+ */
+ ah->stats.rxorn_intr++;
+
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "rx overrun, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ } else
+ ath5k_schedule_rx(ah);
+
+ } else {
+
+ /* Software Beacon Alert -> Schedule beacon tasklet */
+ if (status & AR5K_INT_SWBA)
+ tasklet_hi_schedule(&ah->beacontq);
+
+ /*
+ * No more RX descriptors -> Just count
+ *
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work at
+ * least on older hardware revs.
+ */
+ if (status & AR5K_INT_RXEOL)
+ ah->stats.rxeol_intr++;
+
+
+ /* TX Underrun -> Bump tx trigger level */
+ if (status & AR5K_INT_TXURN)
+ ath5k_hw_update_tx_triglevel(ah, true);
+
+ /* RX -> Schedule rx tasklet */
+ if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
+ ath5k_schedule_rx(ah);
+
+ /* TX -> Schedule tx tasklet */
+ if (status & (AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXEOL))
+ ath5k_schedule_tx(ah);
+
+ /* Missed beacon -> TODO
+ if (status & AR5K_INT_BMISS)
+ */
+
+ /* MIB event -> Update counters and notify ANI */
+ if (status & AR5K_INT_MIB) {
+ ah->stats.mib_intr++;
+ ath5k_hw_update_mib_counters(ah);
+ ath5k_ani_mib_intr(ah);
+ }
+
+ /* GPIO -> Notify RFKill layer */
+ if (status & AR5K_INT_GPIO)
+ tasklet_schedule(&ah->rf_kill.toggleq);
+
+ }
+
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ break;
+
+ } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
+
+ /*
+ * Until we handle rx/tx interrupts mask them on IMR
+ *
+ * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+ * and unset after we 've handled the interrupts.
+ */
+ if (ah->rx_pending || ah->tx_pending)
+ ath5k_set_current_imask(ah);
+
+ if (unlikely(!counter))
+ ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
+
+ /* Fire up calibration poll */
+ ath5k_intr_calibration_poll(ah);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Periodically recalibrate the PHY to account
+ * for temperature/environment changes.
+ */
+static void
+ath5k_calibrate_work(struct work_struct *work)
+{
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ calib_work);
+
+ /* Should we run a full calibration ? */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "running full calibration\n");
+
+ if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+ /*
+ * Rfgain is out of bounds, reset the chip
+ * to load new gain values.
+ */
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "got new rfgain, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+ } else
+ ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
+
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
+ ieee80211_frequency_to_channel(ah->curchan->center_freq),
+ ah->curchan->hw_value);
+
+ if (ath5k_hw_phy_calibrate(ah, ah->curchan))
+ ATH5K_ERR(ah, "calibration of channel %u failed\n",
+ ieee80211_frequency_to_channel(
+ ah->curchan->center_freq));
+
+ /* Clear calibration flags */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
+}
+
+
+static void
+ath5k_tasklet_ani(unsigned long data)
+{
+ struct ath5k_hw *ah = (void *)data;
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
+ ath5k_ani_calibration(ah);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
+}
+
+
+static void
+ath5k_tx_complete_poll_work(struct work_struct *work)
+{
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ tx_complete_work.work);
+ struct ath5k_txq *txq;
+ int i;
+ bool needreset = false;
+
+ if (!test_bit(ATH_STAT_STARTED, ah->status))
+ return;
+
+ mutex_lock(&ah->lock);
+
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ if (ah->txqs[i].setup) {
+ txq = &ah->txqs[i];
+ spin_lock_bh(&txq->lock);
+ if (txq->txq_len > 1) {
+ if (txq->txq_poll_mark) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
+ "TX queue stuck %d\n",
+ txq->qnum);
+ needreset = true;
+ txq->txq_stuck++;
+ spin_unlock_bh(&txq->lock);
+ break;
+ } else {
+ txq->txq_poll_mark = true;
+ }
+ }
+ spin_unlock_bh(&txq->lock);
+ }
+ }
+
+ if (needreset) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "TX queues stuck, resetting\n");
+ ath5k_reset(ah, NULL, true);
+ }
+
+ mutex_unlock(&ah->lock);
+
+ ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
+ msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
+}
+
+
+/*************************\
+* Initialization routines *
+\*************************/
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 4, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
+int
+ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = ah->hw;
+ struct ath_common *common;
+ int ret;
+ int csz;
+
+ /* Initialize driver private data */
+ SET_IEEE80211_DEV(hw, ah->dev);
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_RC_TABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
+ /* SW support for IBSS_RSN is provided by mac80211 */
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+
+ /* both antennas can be configured as RX or TX */
+ hw->wiphy->available_antennas_tx = 0x3;
+ hw->wiphy->available_antennas_rx = 0x3;
+
+ hw->extra_tx_headroom = 2;
+
+ /*
+ * Mark the device as detached to avoid processing
+ * interrupts until setup is complete.
+ */
+ __set_bit(ATH_STAT_INVALID, ah->status);
+
+ ah->opmode = NL80211_IFTYPE_STATION;
+ ah->bintval = 1000;
+ mutex_init(&ah->lock);
+ spin_lock_init(&ah->rxbuflock);
+ spin_lock_init(&ah->txbuflock);
+ spin_lock_init(&ah->block);
+ spin_lock_init(&ah->irqlock);
+
+ /* Setup interrupt handler */
+ ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
+ if (ret) {
+ ATH5K_ERR(ah, "request_irq failed\n");
+ goto err;
+ }
+
+ common = ath5k_hw_common(ah);
+ common->ops = &ath5k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = hw;
+ common->priv = ah;
+ common->clockrate = 40;
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath5k_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ spin_lock_init(&common->cc_lock);
+
+ /* Initialize device */
+ ret = ath5k_hw_init(ah);
+ if (ret)
+ goto err_irq;
+
+ /* Set up multi-rate retry capabilities */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ hw->max_rates = 4;
+ hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+ AR5K_INIT_RETRY_LONG);
+ }
+
+ hw->vif_data_size = sizeof(struct ath5k_vif);
+
+ /* Finish private driver data initialization */
+ ret = ath5k_init(hw);
+ if (ret)
+ goto err_ah;
+
+ ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
+ ah->ah_mac_srev,
+ ah->ah_phy_revision);
+
+ if (!ah->ah_single_chip) {
+ /* Single chip radio (!RF5111) */
+ if (ah->ah_radio_5ghz_revision &&
+ !ah->ah_radio_2ghz_revision) {
+ /* No 5GHz support -> report 2GHz radio */
+ if (!test_bit(AR5K_MODE_11A,
+ ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
+ /* No 2GHz support (5110 and some
+ * 5GHz only cards) -> report 5GHz radio */
+ } else if (!test_bit(AR5K_MODE_11B,
+ ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
+ /* Multiband radio */
+ } else {
+ ATH5K_INFO(ah, "RF%s multiband radio found"
+ " (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
+ }
+ }
+ /* Multi chip radio (RF5111 - RF2111) ->
+ * report both 2GHz/5GHz radios */
+ else if (ah->ah_radio_5ghz_revision &&
+ ah->ah_radio_2ghz_revision) {
+ ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
+ ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ ah->ah_radio_2ghz_revision),
+ ah->ah_radio_2ghz_revision);
+ }
+ }
+
+ ath5k_debug_init_device(ah);
+
+ /* ready to process interrupts */
+ __clear_bit(ATH_STAT_INVALID, ah->status);
+
+ return 0;
+err_ah:
+ ath5k_hw_deinit(ah);
+err_irq:
+ free_irq(ah->irq, ah);
+err:
+ return ret;
+}
+
+static int
+ath5k_stop_locked(struct ath5k_hw *ah)
+{
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
+ test_bit(ATH_STAT_INVALID, ah->status));
+
+ /*
+ * Shutdown the hardware and driver:
+ * stop output from above
+ * disable interrupts
+ * turn off timers
+ * turn off the radio
+ * clear transmit machinery
+ * clear receive machinery
+ * drain and release tx queues
+ * reclaim beacon resources
+ * power down hardware
+ *
+ * Note that some of this work is not possible if the
+ * hardware is gone (invalid).
+ */
+ ieee80211_stop_queues(ah->hw);
+
+ if (!test_bit(ATH_STAT_INVALID, ah->status)) {
+ ath5k_led_off(ah);
+ ath5k_hw_set_imr(ah, 0);
+ synchronize_irq(ah->irq);
+ ath5k_rx_stop(ah);
+ ath5k_hw_dma_stop(ah);
+ ath5k_drain_tx_buffs(ah);
+ ath5k_hw_phy_disable(ah);
+ }
+
+ return 0;
+}
+
+int ath5k_start(struct ieee80211_hw *hw)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_common *common = ath5k_hw_common(ah);
+ int ret, i;
+
+ mutex_lock(&ah->lock);
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
+
+ /*
+ * Stop anything previously setup. This is safe
+ * no matter this is the first time through or not.
+ */
+ ath5k_stop_locked(ah);
+
+ /*
+ * The basic interface to setting the hardware in a good
+ * state is ``reset''. On return the hardware is known to
+ * be powered up and with interrupts disabled. This must
+ * be followed by initialization of the appropriate bits
+ * and then setup of the interrupt mask.
+ */
+ ah->curchan = ah->hw->conf.chandef.chan;
+ ah->imask = AR5K_INT_RXOK
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXEOL
+ | AR5K_INT_FATAL
+ | AR5K_INT_GLOBAL
+ | AR5K_INT_MIB;
+
+ ret = ath5k_reset(ah, NULL, false);
+ if (ret)
+ goto done;
+
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_start(ah);
+
+ /*
+ * Reset the key cache since some parts do not reset the
+ * contents on initial power up or resume from suspend.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath_hw_keyreset(common, (u16) i);
+
+ /* Use higher rates for acks instead of base
+ * rate */
+ ah->ah_ack_bitrate_high = true;
+
+ for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
+ ah->bslot[i] = NULL;
+
+ ret = 0;
+done:
+ mmiowb();
+ mutex_unlock(&ah->lock);
+
+ set_bit(ATH_STAT_STARTED, ah->status);
+ ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
+ msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
+
+ return ret;
+}
+
+static void ath5k_stop_tasklets(struct ath5k_hw *ah)
+{
+ ah->rx_pending = false;
+ ah->tx_pending = false;
+ tasklet_kill(&ah->rxtq);
+ tasklet_kill(&ah->txtq);
+ tasklet_kill(&ah->beacontq);
+ tasklet_kill(&ah->ani_tasklet);
+}
+
+/*
+ * Stop the device, grabbing the top-level lock to protect
+ * against concurrent entry through ath5k_init (which can happen
+ * if another thread does a system call and the thread doing the
+ * stop is preempted).
+ */
+void ath5k_stop(struct ieee80211_hw *hw)
+{
+ struct ath5k_hw *ah = hw->priv;
+ int ret;
+
+ mutex_lock(&ah->lock);
+ ret = ath5k_stop_locked(ah);
+ if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
+ /*
+ * Don't set the card in full sleep mode!
+ *
+ * a) When the device is in this state it must be carefully
+ * woken up or references to registers in the PCI clock
+ * domain may freeze the bus (and system). This varies
+ * by chip and is mostly an issue with newer parts
+ * (madwifi sources mentioned srev >= 0x78) that go to
+ * sleep more quickly.
+ *
+ * b) On older chips full sleep results a weird behaviour
+ * during wakeup. I tested various cards with srev < 0x78
+ * and they don't wake up after module reload, a second
+ * module reload is needed to bring the card up again.
+ *
+ * Until we figure out what's going on don't enable
+ * full chip reset on any chip (this is what Legacy HAL
+ * and Sam's HAL do anyway). Instead Perform a full reset
+ * on the device (same as initial state after attach) and
+ * leave it idle (keep MAC/BB on warm reset) */
+ ret = ath5k_hw_on_hold(ah);
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "putting device to sleep\n");
+ }
+
+ mmiowb();
+ mutex_unlock(&ah->lock);
+
+ ath5k_stop_tasklets(ah);
+
+ clear_bit(ATH_STAT_STARTED, ah->status);
+ cancel_delayed_work_sync(&ah->tx_complete_work);
+
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_stop(ah);
+}
+
+/*
+ * Reset the hardware. If chan is not NULL, then also pause rx/tx
+ * and change to the given channel.
+ *
+ * This should be called with ah->lock.
+ */
+static int
+ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
+ bool skip_pcu)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ int ret, ani_mode;
+ bool fast = chan && modparam_fastchanswitch ? 1 : 0;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
+
+ __set_bit(ATH_STAT_RESET, ah->status);
+
+ ath5k_hw_set_imr(ah, 0);
+ synchronize_irq(ah->irq);
+ ath5k_stop_tasklets(ah);
+
+ /* Save ani mode and disable ANI during
+ * reset. If we don't we might get false
+ * PHY error interrupts. */
+ ani_mode = ah->ani_state.ani_mode;
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
+
+ /* We are going to empty hw queues
+ * so we should also free any remaining
+ * tx buffers */
+ ath5k_drain_tx_buffs(ah);
+
+ /* Stop PCU */
+ ath5k_hw_stop_rx_pcu(ah);
+
+ /* Stop DMA
+ *
+ * Note: If DMA didn't stop continue
+ * since only a reset will fix it.
+ */
+ ret = ath5k_hw_dma_stop(ah);
+
+ /* RF Bus grant won't work if we have pending
+ * frames
+ */
+ if (ret && fast) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "DMA didn't stop, falling back to normal reset\n");
+ fast = false;
+ }
+
+ if (chan)
+ ah->curchan = chan;
+
+ ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
+ if (ret) {
+ ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
+ goto err;
+ }
+
+ ret = ath5k_rx_start(ah);
+ if (ret) {
+ ATH5K_ERR(ah, "can't start recv logic\n");
+ goto err;
+ }
+
+ ath5k_ani_init(ah, ani_mode);
+
+ /*
+ * Set calibration intervals
+ *
+ * Note: We don't need to run calibration imediately
+ * since some initial calibration is done on reset
+ * even for fast channel switching. Also on scanning
+ * this will get set again and again and it won't get
+ * executed unless we connect somewhere and spend some
+ * time on the channel (that's what calibration needs
+ * anyway to be accurate).
+ */
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
+ ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+
+ /* clear survey data and cycle counters */
+ memset(&ah->survey, 0, sizeof(ah->survey));
+ spin_lock_bh(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ spin_unlock_bh(&common->cc_lock);
+
+ /*
+ * Change channels and update the h/w rate map if we're switching;
+ * e.g. 11a to 11b/g.
+ *
+ * We may be doing a reset in response to an ioctl that changes the
+ * channel so update any state that might change as a result.
+ *
+ * XXX needed?
+ */
+/* ath5k_chan_change(ah, c); */
+
+ __clear_bit(ATH_STAT_RESET, ah->status);
+
+ ath5k_beacon_config(ah);
+ /* intrs are enabled by ath5k_beacon_config */
+
+ ieee80211_wake_queues(ah->hw);
+
+ return 0;
+err:
+ return ret;
+}
+
+static void ath5k_reset_work(struct work_struct *work)
+{
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ reset_work);
+
+ mutex_lock(&ah->lock);
+ ath5k_reset(ah, NULL, true);
+ mutex_unlock(&ah->lock);
+}
+
+static int
+ath5k_init(struct ieee80211_hw *hw)
+{
+
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
+ struct ath5k_txq *txq;
+ u8 mac[ETH_ALEN] = {};
+ int ret;
+
+
+ /*
+ * Collect the channel list. The 802.11 layer
+ * is responsible for filtering this list based
+ * on settings like the phy mode and regulatory
+ * domain restrictions.
+ */
+ ret = ath5k_setup_bands(hw);
+ if (ret) {
+ ATH5K_ERR(ah, "can't get channels\n");
+ goto err;
+ }
+
+ /*
+ * Allocate tx+rx descriptors and populate the lists.
+ */
+ ret = ath5k_desc_alloc(ah);
+ if (ret) {
+ ATH5K_ERR(ah, "can't allocate descriptors\n");
+ goto err;
+ }
+
+ /*
+ * Allocate hardware transmit queues: one queue for
+ * beacon frames and one data queue for each QoS
+ * priority. Note that hw functions handle resetting
+ * these queues at the needed time.
+ */
+ ret = ath5k_beaconq_setup(ah);
+ if (ret < 0) {
+ ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
+ goto err_desc;
+ }
+ ah->bhalq = ret;
+ ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
+ if (IS_ERR(ah->cabq)) {
+ ATH5K_ERR(ah, "can't setup cab queue\n");
+ ret = PTR_ERR(ah->cabq);
+ goto err_bhal;
+ }
+
+ /* 5211 and 5212 usually support 10 queues but we better rely on the
+ * capability information */
+ if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
+ /* This order matches mac80211's queue priority, so we can
+ * directly use the mac80211 queue number without any mapping */
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ hw->queues = 4;
+ } else {
+ /* older hardware (5210) can only support one data queue */
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ hw->queues = 1;
+ }
+
+ tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
+ tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
+ tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
+ tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
+
+ INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
+ INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
+
+ ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
+ if (ret) {
+ ATH5K_ERR(ah, "unable to read address from EEPROM\n");
+ goto err_queues;
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, mac);
+ /* All MAC address bits matter for ACKs */
+ ath5k_update_bssid_mask_and_opmode(ah, NULL);
+
+ regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
+ ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
+ if (ret) {
+ ATH5K_ERR(ah, "can't initialize regulatory system\n");
+ goto err_queues;
+ }
+
+ ret = ieee80211_register_hw(hw);
+ if (ret) {
+ ATH5K_ERR(ah, "can't register ieee80211 hw\n");
+ goto err_queues;
+ }
+
+ if (!ath_is_world_regd(regulatory))
+ regulatory_hint(hw->wiphy, regulatory->alpha2);
+
+ ath5k_init_leds(ah);
+
+ ath5k_sysfs_register(ah);
+
+ return 0;
+err_queues:
+ ath5k_txq_release(ah);
+err_bhal:
+ ath5k_hw_release_tx_queue(ah, ah->bhalq);
+err_desc:
+ ath5k_desc_free(ah);
+err:
+ return ret;
+}
+
+void
+ath5k_deinit_ah(struct ath5k_hw *ah)
+{
+ struct ieee80211_hw *hw = ah->hw;
+
+ /*
+ * NB: the order of these is important:
+ * o call the 802.11 layer before detaching ath5k_hw to
+ * ensure callbacks into the driver to delete global
+ * key cache entries can be handled
+ * o reclaim the tx queue data structures after calling
+ * the 802.11 layer as we'll get called back to reclaim
+ * node state and potentially want to use them
+ * o to cleanup the tx queues the hal is called, so detach
+ * it last
+ * XXX: ??? detach ath5k_hw ???
+ * Other than that, it's straightforward...
+ */
+ ieee80211_unregister_hw(hw);
+ ath5k_desc_free(ah);
+ ath5k_txq_release(ah);
+ ath5k_hw_release_tx_queue(ah, ah->bhalq);
+ ath5k_unregister_leds(ah);
+
+ ath5k_sysfs_unregister(ah);
+ /*
+ * NB: can't reclaim these until after ieee80211_ifdetach
+ * returns because we'll get called back to reclaim node
+ * state and potentially want to use them.
+ */
+ ath5k_hw_deinit(ah);
+ free_irq(ah->irq, ah);
+}
+
+bool
+ath5k_any_vif_assoc(struct ath5k_hw *ah)
+{
+ struct ath5k_vif_iter_data iter_data;
+ iter_data.hw_macaddr = NULL;
+ iter_data.any_assoc = false;
+ iter_data.need_set_hw_addr = false;
+ iter_data.found_active = true;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
+ return iter_data.any_assoc;
+}
+
+void
+ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
+{
+ struct ath5k_hw *ah = hw->priv;
+ u32 rfilt;
+ rfilt = ath5k_hw_get_rx_filter(ah);
+ if (enable)
+ rfilt |= AR5K_RX_FILTER_BEACON;
+ else
+ rfilt &= ~AR5K_RX_FILTER_BEACON;
+ ath5k_hw_set_rx_filter(ah, rfilt);
+ ah->filter_flags = rfilt;
+}
+
+void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ah && ah->hw)
+ printk("%s" pr_fmt("%s: %pV"),
+ level, wiphy_name(ah->hw->wiphy), &vaf);
+ else
+ printk("%s" pr_fmt("%pV"), level, &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
new file mode 100644
index 000000000..97469d0fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+/*
+ * Definitions for the Atheros Wireless LAN controller driver.
+ */
+#ifndef _DEV_ATH5K_BASE_H
+#define _DEV_ATH5K_BASE_H
+
+struct ieee80211_vif;
+struct ieee80211_hw;
+struct ath5k_hw;
+struct ath5k_txq;
+struct ieee80211_channel;
+struct ath_bus_ops;
+struct ieee80211_tx_control;
+enum nl80211_iftype;
+
+enum ath5k_srev_type {
+ AR5K_VERSION_MAC,
+ AR5K_VERSION_RAD,
+};
+
+struct ath5k_srev_name {
+ const char *sr_name;
+ enum ath5k_srev_type sr_type;
+ u_int sr_val;
+};
+
+struct ath5k_buf {
+ struct list_head list;
+ struct ath5k_desc *desc; /* virtual addr of desc */
+ dma_addr_t daddr; /* physical addr of desc */
+ struct sk_buff *skb; /* skbuff for buf */
+ dma_addr_t skbaddr; /* physical addr of skb data */
+ struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */
+};
+
+struct ath5k_vif {
+ bool assoc; /* are we associated or not */
+ enum nl80211_iftype opmode;
+ int bslot;
+ struct ath5k_buf *bbuf; /* beacon buffer */
+};
+
+struct ath5k_vif_iter_data {
+ const u8 *hw_macaddr;
+ u8 mask[ETH_ALEN];
+ u8 active_mac[ETH_ALEN]; /* first active MAC */
+ bool need_set_hw_addr;
+ bool found_active;
+ bool any_assoc;
+ enum nl80211_iftype opmode;
+ int n_stas;
+};
+
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
+bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
+
+int ath5k_start(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw);
+
+void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_hw *ah);
+void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
+ struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
+void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq, struct ieee80211_tx_control *control);
+
+const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
+
+int ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
+void ath5k_deinit_ah(struct ath5k_hw *ah);
+
+/* Check whether BSSID mask is supported */
+#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
+
+/* Check whether virtual EOL is supported */
+#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
+
+#endif /* _DEV_ATH5K_BASE_H */
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
new file mode 100644
index 000000000..994169ad3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/**************\
+* Capabilities *
+\**************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include "../regd.h"
+
+/*
+ * Fill the capabilities struct
+ * TODO: Merge this with EEPROM code when we are done with it
+ */
+int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
+{
+ struct ath5k_capabilities *caps = &ah->ah_capabilities;
+ u16 ee_header;
+
+ /* Capabilities stored in the EEPROM */
+ ee_header = caps->cap_eeprom.ee_header;
+
+ if (ah->ah_version == AR5K_AR5210) {
+ /*
+ * Set radio capabilities
+ * (The AR5110 only supports the middle 5GHz band)
+ */
+ caps->cap_range.range_5ghz_min = 5120;
+ caps->cap_range.range_5ghz_max = 5430;
+ caps->cap_range.range_2ghz_min = 0;
+ caps->cap_range.range_2ghz_max = 0;
+
+ /* Set supported modes */
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
+ } else {
+ /*
+ * XXX The transceiver supports frequencies from 4920 to 6100MHz
+ * XXX and from 2312 to 2732MHz. There are problems with the
+ * XXX current ieee80211 implementation because the IEEE
+ * XXX channel mapping does not support negative channel
+ * XXX numbers (2312MHz is channel -19). Of course, this
+ * XXX doesn't matter because these channels are out of the
+ * XXX legal range.
+ */
+
+ /*
+ * Set radio capabilities
+ */
+
+ if (AR5K_EEPROM_HDR_11A(ee_header)) {
+ if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+ caps->cap_range.range_5ghz_min = 4920;
+ else
+ caps->cap_range.range_5ghz_min = 5005;
+ caps->cap_range.range_5ghz_max = 6100;
+
+ /* Set supported modes */
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
+ }
+
+ /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
+ * connected */
+ if (AR5K_EEPROM_HDR_11B(ee_header) ||
+ (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)) {
+ /* 2312 */
+ caps->cap_range.range_2ghz_min = 2412;
+ caps->cap_range.range_2ghz_max = 2732;
+
+ /* Override 2GHz modes on SoCs that need it
+ * NOTE: cap_needs_2GHz_ovr gets set from
+ * ath_ahb_probe */
+ if (!caps->cap_needs_2GHz_ovr) {
+ if (AR5K_EEPROM_HDR_11B(ee_header))
+ __set_bit(AR5K_MODE_11B,
+ caps->cap_mode);
+
+ if (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)
+ __set_bit(AR5K_MODE_11G,
+ caps->cap_mode);
+ }
+ }
+ }
+
+ if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
+ __clear_bit(AR5K_MODE_11A, caps->cap_mode);
+
+ /* Set number of supported TX queues */
+ if (ah->ah_version == AR5K_AR5210)
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
+ else
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+
+ /* Newer hardware has PHY error counters */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
+ caps->cap_has_phyerr_counters = true;
+ else
+ caps->cap_has_phyerr_counters = false;
+
+ /* MACs since AR5212 have MRR support */
+ if (ah->ah_version == AR5K_AR5212)
+ caps->cap_has_mrr_support = true;
+ else
+ caps->cap_has_mrr_support = false;
+
+ return 0;
+}
+
+/*
+ * TODO: Following functions should be part of a new function
+ * set_capability
+ */
+
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
+ u16 assoc_id)
+{
+ if (ah->ah_version == AR5K_AR5210) {
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
+ AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
+{
+ if (ah->ah_version == AR5K_AR5210) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
+ AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
+ return 0;
+ }
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
new file mode 100644
index 000000000..c70782e8f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -0,0 +1,1139 @@
+/*
+ * Copyright (c) 2007-2008 Bruno Randolf <bruno@thinktube.com>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2006 Devicescape Software, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include "debug.h"
+#include "ath5k.h"
+#include "reg.h"
+#include "base.h"
+
+static unsigned int ath5k_debug;
+module_param_named(debug, ath5k_debug, uint, 0);
+
+
+/* debugfs: registers */
+
+struct reg {
+ const char *name;
+ int addr;
+};
+
+#define REG_STRUCT_INIT(r) { #r, r }
+
+/* just a few random registers, might want to add more */
+static const struct reg regs[] = {
+ REG_STRUCT_INIT(AR5K_CR),
+ REG_STRUCT_INIT(AR5K_RXDP),
+ REG_STRUCT_INIT(AR5K_CFG),
+ REG_STRUCT_INIT(AR5K_IER),
+ REG_STRUCT_INIT(AR5K_BCR),
+ REG_STRUCT_INIT(AR5K_RTSD0),
+ REG_STRUCT_INIT(AR5K_RTSD1),
+ REG_STRUCT_INIT(AR5K_TXCFG),
+ REG_STRUCT_INIT(AR5K_RXCFG),
+ REG_STRUCT_INIT(AR5K_RXJLA),
+ REG_STRUCT_INIT(AR5K_MIBC),
+ REG_STRUCT_INIT(AR5K_TOPS),
+ REG_STRUCT_INIT(AR5K_RXNOFRM),
+ REG_STRUCT_INIT(AR5K_TXNOFRM),
+ REG_STRUCT_INIT(AR5K_RPGTO),
+ REG_STRUCT_INIT(AR5K_RFCNT),
+ REG_STRUCT_INIT(AR5K_MISC),
+ REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT),
+ REG_STRUCT_INIT(AR5K_ISR),
+ REG_STRUCT_INIT(AR5K_PISR),
+ REG_STRUCT_INIT(AR5K_SISR0),
+ REG_STRUCT_INIT(AR5K_SISR1),
+ REG_STRUCT_INIT(AR5K_SISR2),
+ REG_STRUCT_INIT(AR5K_SISR3),
+ REG_STRUCT_INIT(AR5K_SISR4),
+ REG_STRUCT_INIT(AR5K_IMR),
+ REG_STRUCT_INIT(AR5K_PIMR),
+ REG_STRUCT_INIT(AR5K_SIMR0),
+ REG_STRUCT_INIT(AR5K_SIMR1),
+ REG_STRUCT_INIT(AR5K_SIMR2),
+ REG_STRUCT_INIT(AR5K_SIMR3),
+ REG_STRUCT_INIT(AR5K_SIMR4),
+ REG_STRUCT_INIT(AR5K_DCM_ADDR),
+ REG_STRUCT_INIT(AR5K_DCCFG),
+ REG_STRUCT_INIT(AR5K_CCFG),
+ REG_STRUCT_INIT(AR5K_CPC0),
+ REG_STRUCT_INIT(AR5K_CPC1),
+ REG_STRUCT_INIT(AR5K_CPC2),
+ REG_STRUCT_INIT(AR5K_CPC3),
+ REG_STRUCT_INIT(AR5K_CPCOVF),
+ REG_STRUCT_INIT(AR5K_RESET_CTL),
+ REG_STRUCT_INIT(AR5K_SLEEP_CTL),
+ REG_STRUCT_INIT(AR5K_INTPEND),
+ REG_STRUCT_INIT(AR5K_SFR),
+ REG_STRUCT_INIT(AR5K_PCICFG),
+ REG_STRUCT_INIT(AR5K_GPIOCR),
+ REG_STRUCT_INIT(AR5K_GPIODO),
+ REG_STRUCT_INIT(AR5K_SREV),
+};
+
+static void *reg_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL;
+}
+
+static void reg_stop(struct seq_file *seq, void *p)
+{
+ /* nothing to do */
+}
+
+static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
+{
+ ++*pos;
+ return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL;
+}
+
+static int reg_show(struct seq_file *seq, void *p)
+{
+ struct ath5k_hw *ah = seq->private;
+ struct reg *r = p;
+ seq_printf(seq, "%-25s0x%08x\n", r->name,
+ ath5k_hw_reg_read(ah, r->addr));
+ return 0;
+}
+
+static const struct seq_operations register_seq_ops = {
+ .start = reg_start,
+ .next = reg_next,
+ .stop = reg_stop,
+ .show = reg_show
+};
+
+static int open_file_registers(struct inode *inode, struct file *file)
+{
+ struct seq_file *s;
+ int res;
+ res = seq_open(file, &register_seq_ops);
+ if (res == 0) {
+ s = file->private_data;
+ s->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations fops_registers = {
+ .open = open_file_registers,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: beacons */
+
+static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[500];
+ unsigned int len = 0;
+ unsigned int v;
+ u64 tsf;
+
+ v = ath5k_hw_reg_read(ah, AR5K_BEACON);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
+ "AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
+ (v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
+
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
+ "AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
+
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
+ "AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
+
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ "AR5K_TIMER0 (TBTT)", v, v);
+
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ "AR5K_TIMER1 (DMA)", v, v >> 3);
+
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ "AR5K_TIMER2 (SWBA)", v, v >> 3);
+
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ "AR5K_TIMER3 (ATIM)", v, v);
+
+ tsf = ath5k_hw_get_tsf64(ah);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "TSF\t\t0x%016llx\tTU: %08x\n",
+ (unsigned long long)tsf, TSF_TO_TU(tsf));
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_beacon(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (strncmp(buf, "disable", 7) == 0) {
+ AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
+ pr_info("debugfs disable beacons\n");
+ } else if (strncmp(buf, "enable", 6) == 0) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
+ pr_info("debugfs enable beacons\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_beacon = {
+ .read = read_file_beacon,
+ .write = write_file_beacon,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+/* debugfs: reset */
+
+static ssize_t write_file_reset(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ return count;
+}
+
+static const struct file_operations fops_reset = {
+ .write = write_file_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+
+/* debugfs: debug level */
+
+static const struct {
+ enum ath5k_debug_level level;
+ const char *name;
+ const char *desc;
+} dbg_info[] = {
+ { ATH5K_DEBUG_RESET, "reset", "reset and initialization" },
+ { ATH5K_DEBUG_INTR, "intr", "interrupt handling" },
+ { ATH5K_DEBUG_MODE, "mode", "mode init/setup" },
+ { ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" },
+ { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
+ { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
+ { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
+ { ATH5K_DEBUG_LED, "led", "LED management" },
+ { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
+ { ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
+ { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
+ { ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
+ { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
+};
+
+static ssize_t read_file_debug(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
+
+ for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%10s %c 0x%08x - %s\n", dbg_info[i].name,
+ ah->debug.level & dbg_info[i].level ? '+' : ' ',
+ dbg_info[i].level, dbg_info[i].desc);
+ }
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%10s %c 0x%08x - %s\n", dbg_info[i].name,
+ ah->debug.level == dbg_info[i].level ? '+' : ' ',
+ dbg_info[i].level, dbg_info[i].desc);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_debug(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
+ if (strncmp(buf, dbg_info[i].name,
+ strlen(dbg_info[i].name)) == 0) {
+ ah->debug.level ^= dbg_info[i].level; /* toggle bit */
+ break;
+ }
+ }
+ return count;
+}
+
+static const struct file_operations fops_debug = {
+ .read = read_file_debug,
+ .write = write_file_debug,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+/* debugfs: antenna */
+
+static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+ unsigned int v;
+
+ len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
+ ah->ah_ant_mode);
+ len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
+ ah->ah_def_ant);
+ len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
+ ah->ah_tx_ant);
+
+ len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "[antenna %d]\t%d\t%d\n",
+ i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
+ }
+ len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
+ ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
+
+ v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
+
+ v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
+ (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
+
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
+ (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
+
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
+ (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
+
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
+ (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_antenna(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (strncmp(buf, "diversity", 9) == 0) {
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ pr_info("debug: enable diversity\n");
+ } else if (strncmp(buf, "fixed-a", 7) == 0) {
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
+ pr_info("debug: fixed antenna A\n");
+ } else if (strncmp(buf, "fixed-b", 7) == 0) {
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
+ pr_info("debug: fixed antenna B\n");
+ } else if (strncmp(buf, "clear", 5) == 0) {
+ for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
+ ah->stats.antenna_rx[i] = 0;
+ ah->stats.antenna_tx[i] = 0;
+ }
+ pr_info("debug: cleared antenna stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_antenna = {
+ .read = read_file_antenna,
+ .write = write_file_antenna,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* debugfs: misc */
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ u32 filt = ath5k_hw_get_rx_filter(ah);
+
+ len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
+ ah->bssidmask);
+ len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
+ filt);
+ if (filt & AR5K_RX_FILTER_UCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ if (filt & AR5K_RX_FILTER_MCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ if (filt & AR5K_RX_FILTER_BCAST)
+ len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ if (filt & AR5K_RX_FILTER_CONTROL)
+ len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ if (filt & AR5K_RX_FILTER_BEACON)
+ len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ if (filt & AR5K_RX_FILTER_PROM)
+ len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ if (filt & AR5K_RX_FILTER_XRPOLL)
+ len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL");
+ if (filt & AR5K_RX_FILTER_PROBEREQ)
+ len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ if (filt & AR5K_RX_FILTER_PHYERR_5212)
+ len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
+ if (filt & AR5K_RX_FILTER_RADARERR_5212)
+ len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
+ if (filt & AR5K_RX_FILTER_PHYERR_5211)
+ snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
+ if (filt & AR5K_RX_FILTER_RADARERR_5211)
+ len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
+
+ len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
+ ath_opmode_to_string(ah->opmode), ah->opmode);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_misc = {
+ .read = read_file_misc,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: frameerrors */
+
+static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
+ char buf[700];
+ unsigned int len = 0;
+ int i;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "RX\n---------------------\n");
+ len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
+ st->rxerr_crc,
+ st->rx_all_count > 0 ?
+ st->rxerr_crc * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
+ st->rxerr_phy,
+ st->rx_all_count > 0 ?
+ st->rxerr_phy * 100 / st->rx_all_count : 0);
+ for (i = 0; i < 32; i++) {
+ if (st->rxerr_phy_code[i])
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " phy_err[%u]\t%u\n",
+ i, st->rxerr_phy_code[i]);
+ }
+
+ len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ st->rxerr_fifo,
+ st->rx_all_count > 0 ?
+ st->rxerr_fifo * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
+ st->rxerr_decrypt,
+ st->rx_all_count > 0 ?
+ st->rxerr_decrypt * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
+ st->rxerr_mic,
+ st->rx_all_count > 0 ?
+ st->rxerr_mic * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
+ st->rxerr_proc,
+ st->rx_all_count > 0 ?
+ st->rxerr_proc * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
+ st->rxerr_jumbo,
+ st->rx_all_count > 0 ?
+ st->rxerr_jumbo * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
+ st->rx_all_count);
+ len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
+ st->rx_bytes_count);
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nTX\n---------------------\n");
+ len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
+ st->txerr_retry,
+ st->tx_all_count > 0 ?
+ st->txerr_retry * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ st->txerr_fifo,
+ st->tx_all_count > 0 ?
+ st->txerr_fifo * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
+ st->txerr_filt,
+ st->tx_all_count > 0 ?
+ st->txerr_filt * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
+ st->tx_all_count);
+ len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
+ st->tx_bytes_count);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_frameerrors(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (strncmp(buf, "clear", 5) == 0) {
+ st->rxerr_crc = 0;
+ st->rxerr_phy = 0;
+ st->rxerr_fifo = 0;
+ st->rxerr_decrypt = 0;
+ st->rxerr_mic = 0;
+ st->rxerr_proc = 0;
+ st->rxerr_jumbo = 0;
+ st->rx_all_count = 0;
+ st->txerr_retry = 0;
+ st->txerr_fifo = 0;
+ st->txerr_filt = 0;
+ st->tx_all_count = 0;
+ pr_info("debug: cleared frameerrors stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_frameerrors = {
+ .read = read_file_frameerrors,
+ .write = write_file_frameerrors,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+/* debugfs: ani */
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
+ struct ath5k_ani_state *as = &ah->ani_state;
+
+ char buf[700];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "HW has PHY error counters:\t%s\n",
+ ah->ah_capabilities.cap_has_phyerr_counters ?
+ "yes" : "no");
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "HW max spur immunity level:\t%d\n",
+ as->max_spur_level);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nANI state\n--------------------------------------------\n");
+ len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
+ switch (as->ani_mode) {
+ case ATH5K_ANI_MODE_OFF:
+ len += snprintf(buf + len, sizeof(buf) - len, "OFF\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_LOW:
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "MANUAL LOW\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_HIGH:
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "MANUAL HIGH\n");
+ break;
+ case ATH5K_ANI_MODE_AUTO:
+ len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n");
+ break;
+ default:
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "??? (not good)\n");
+ break;
+ }
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "noise immunity level:\t\t%d\n",
+ as->noise_imm_level);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "spur immunity level:\t\t%d\n",
+ as->spur_level);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "firstep level:\t\t\t%d\n",
+ as->firstep_level);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "OFDM weak signal detection:\t%s\n",
+ as->ofdm_weak_sig ? "on" : "off");
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "CCK weak signal detection:\t%s\n",
+ as->cck_weak_sig ? "on" : "off");
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "\nMIB INTERRUPTS:\t\t%u\n",
+ st->mib_intr);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "beacon RSSI average:\t%d\n",
+ (int)ewma_read(&ah->ah_beacon_rssi_avg));
+
+#define CC_PRINT(_struct, _field) \
+ _struct._field, \
+ _struct.cycles > 0 ? \
+ _struct._field * 100 / _struct.cycles : 0
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt tx\t\t%u\t(%d%%)\n",
+ CC_PRINT(as->last_cc, tx_frame));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt rx\t\t%u\t(%d%%)\n",
+ CC_PRINT(as->last_cc, rx_frame));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt busy\t\t%u\t(%d%%)\n",
+ CC_PRINT(as->last_cc, rx_busy));
+#undef CC_PRINT
+ len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
+ as->last_cc.cycles);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "listen time\t\t%d\tlast: %d\n",
+ as->listen_time, as->last_listen);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->ofdm_errors, as->last_ofdm_errors,
+ as->sum_ofdm_errors);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->cck_errors, as->last_cck_errors,
+ as->sum_cck_errors);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
+ ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
+ ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (strncmp(buf, "sens-low", 8) == 0) {
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ } else if (strncmp(buf, "sens-high", 9) == 0) {
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ } else if (strncmp(buf, "ani-off", 7) == 0) {
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
+ } else if (strncmp(buf, "ani-on", 6) == 0) {
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
+ } else if (strncmp(buf, "noise-low", 9) == 0) {
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ } else if (strncmp(buf, "noise-high", 10) == 0) {
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ } else if (strncmp(buf, "spur-low", 8) == 0) {
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ } else if (strncmp(buf, "spur-high", 9) == 0) {
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ani_state.max_spur_level);
+ } else if (strncmp(buf, "fir-low", 7) == 0) {
+ ath5k_ani_set_firstep_level(ah, 0);
+ } else if (strncmp(buf, "fir-high", 8) == 0) {
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ } else if (strncmp(buf, "ofdm-off", 8) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ } else if (strncmp(buf, "ofdm-on", 7) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ } else if (strncmp(buf, "cck-off", 7) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (strncmp(buf, "cck-on", 6) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ }
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+/* debugfs: queues etc */
+
+static ssize_t read_file_queue(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+
+ struct ath5k_txq *txq;
+ struct ath5k_buf *bf, *bf0;
+ int i, n;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "available txbuffers: %d\n", ah->txbuf_len);
+
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ txq = &ah->txqs[i];
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%02d: %ssetup\n", i, txq->setup ? "" : "not ");
+
+ if (!txq->setup)
+ continue;
+
+ n = 0;
+ spin_lock_bh(&txq->lock);
+ list_for_each_entry_safe(bf, bf0, &txq->q, list)
+ n++;
+ spin_unlock_bh(&txq->lock);
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " len: %d bufs: %d\n", txq->txq_len, n);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ " stuck: %d\n", txq->txq_stuck);
+ }
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_queue(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_hw *ah = file->private_data;
+ char buf[20];
+
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (strncmp(buf, "start", 5) == 0)
+ ieee80211_wake_queues(ah->hw);
+ else if (strncmp(buf, "stop", 4) == 0)
+ ieee80211_stop_queues(ah->hw);
+
+ return count;
+}
+
+
+static const struct file_operations fops_queue = {
+ .read = read_file_queue,
+ .write = write_file_queue,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* debugfs: eeprom */
+
+struct eeprom_private {
+ u16 *buf;
+ int len;
+};
+
+static int open_file_eeprom(struct inode *inode, struct file *file)
+{
+ struct eeprom_private *ep;
+ struct ath5k_hw *ah = inode->i_private;
+ bool res;
+ int i, ret;
+ u32 eesize;
+ u16 val, *buf;
+
+ /* Get eeprom size */
+
+ res = ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_UPPER, &val);
+ if (!res)
+ return -EACCES;
+
+ if (val == 0) {
+ eesize = AR5K_EEPROM_INFO_MAX + AR5K_EEPROM_INFO_BASE;
+ } else {
+ eesize = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_LOWER, &val);
+ eesize = eesize | val;
+ }
+
+ if (eesize > 4096)
+ return -EINVAL;
+
+ /* Create buffer and read in eeprom */
+
+ buf = vmalloc(eesize);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < eesize; ++i) {
+ AR5K_EEPROM_READ(i, val);
+ buf[i] = val;
+ }
+
+ /* Create private struct and assign to file */
+
+ ep = kmalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ ret = -ENOMEM;
+ goto freebuf;
+ }
+
+ ep->buf = buf;
+ ep->len = i;
+
+ file->private_data = (void *)ep;
+
+ return 0;
+
+freebuf:
+ vfree(buf);
+err:
+ return ret;
+
+}
+
+static ssize_t read_file_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct eeprom_private *ep = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos, ep->buf, ep->len);
+}
+
+static int release_file_eeprom(struct inode *inode, struct file *file)
+{
+ struct eeprom_private *ep = file->private_data;
+
+ vfree(ep->buf);
+ kfree(ep);
+
+ return 0;
+}
+
+static const struct file_operations fops_eeprom = {
+ .open = open_file_eeprom,
+ .read = read_file_eeprom,
+ .release = release_file_eeprom,
+ .owner = THIS_MODULE,
+};
+
+
+void
+ath5k_debug_init_device(struct ath5k_hw *ah)
+{
+ struct dentry *phydir;
+
+ ah->debug.level = ath5k_debug;
+
+ phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
+ if (!phydir)
+ return;
+
+ debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah,
+ &fops_debug);
+
+ debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers);
+
+ debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah,
+ &fops_beacon);
+
+ debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset);
+
+ debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah,
+ &fops_antenna);
+
+ debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc);
+
+ debugfs_create_file("eeprom", S_IRUSR, phydir, ah, &fops_eeprom);
+
+ debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah,
+ &fops_frameerrors);
+
+ debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani);
+
+ debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah,
+ &fops_queue);
+
+ debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir,
+ &ah->ah_use_32khz_clock);
+}
+
+/* functions used in other places */
+
+void
+ath5k_debug_dump_bands(struct ath5k_hw *ah)
+{
+ unsigned int b, i;
+
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
+ return;
+
+ BUG_ON(!ah->sbands);
+
+ for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ struct ieee80211_supported_band *band = &ah->sbands[b];
+ char bname[6];
+ switch (band->band) {
+ case IEEE80211_BAND_2GHZ:
+ strcpy(bname, "2 GHz");
+ break;
+ case IEEE80211_BAND_5GHZ:
+ strcpy(bname, "5 GHz");
+ break;
+ default:
+ printk(KERN_DEBUG "Band not supported: %d\n",
+ band->band);
+ return;
+ }
+ printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname,
+ band->n_channels, band->n_bitrates);
+ printk(KERN_DEBUG " channels:\n");
+ for (i = 0; i < band->n_channels; i++)
+ printk(KERN_DEBUG " %3d %d %.4x %.4x\n",
+ ieee80211_frequency_to_channel(
+ band->channels[i].center_freq),
+ band->channels[i].center_freq,
+ band->channels[i].hw_value,
+ band->channels[i].flags);
+ printk(KERN_DEBUG " rates:\n");
+ for (i = 0; i < band->n_bitrates; i++)
+ printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n",
+ band->bitrates[i].bitrate,
+ band->bitrates[i].hw_value,
+ band->bitrates[i].flags,
+ band->bitrates[i].hw_value_short);
+ }
+}
+
+static inline void
+ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
+ struct ath5k_rx_status *rs)
+{
+ struct ath5k_desc *ds = bf->desc;
+ struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx;
+
+ printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n",
+ ds, (unsigned long long)bf->daddr,
+ ds->ds_link, ds->ds_data,
+ rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
+ rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
+ !done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
+}
+
+void
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
+{
+ struct ath5k_desc *ds;
+ struct ath5k_buf *bf;
+ struct ath5k_rx_status rs = {};
+ int status;
+
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
+ return;
+
+ printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
+ ath5k_hw_get_rxdp(ah), ah->rxlink);
+
+ spin_lock_bh(&ah->rxbuflock);
+ list_for_each_entry(bf, &ah->rxbuf, list) {
+ ds = bf->desc;
+ status = ah->ah_proc_rx_desc(ah, ds, &rs);
+ if (!status)
+ ath5k_debug_printrxbuf(bf, status == 0, &rs);
+ }
+ spin_unlock_bh(&ah->rxbuflock);
+}
+
+void
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
+{
+ struct ath5k_desc *ds = bf->desc;
+ struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
+ struct ath5k_tx_status ts = {};
+ int done;
+
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
+ return;
+
+ done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
+
+ printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
+ "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
+ ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1,
+ td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3,
+ td->tx_stat.tx_status_0, td->tx_stat.tx_status_1,
+ done ? ' ' : (ts.ts_status == 0) ? '*' : '!');
+}
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
new file mode 100644
index 000000000..0a3f916a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2007 Bruno Randolf <bruno@thinktube.com>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2006 Devicescape Software, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ATH5K_DEBUG_H
+#define _ATH5K_DEBUG_H
+
+struct ath5k_hw;
+struct sk_buff;
+struct ath5k_buf;
+
+struct ath5k_dbg_info {
+ unsigned int level; /* debug level */
+};
+
+/**
+ * enum ath5k_debug_level - ath5k debug level
+ *
+ * @ATH5K_DEBUG_RESET: reset processing
+ * @ATH5K_DEBUG_INTR: interrupt handling
+ * @ATH5K_DEBUG_MODE: mode init/setup
+ * @ATH5K_DEBUG_XMIT: basic xmit operation
+ * @ATH5K_DEBUG_BEACON: beacon handling
+ * @ATH5K_DEBUG_CALIBRATE: periodic calibration
+ * @ATH5K_DEBUG_TXPOWER: transmit power setting
+ * @ATH5K_DEBUG_LED: led management
+ * @ATH5K_DEBUG_DUMP_RX: print received skb content
+ * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
+ * @ATH5K_DEBUG_DUMPBANDS: dump bands
+ * @ATH5K_DEBUG_DMA: debug dma start/stop
+ * @ATH5K_DEBUG_TRACE: trace function calls
+ * @ATH5K_DEBUG_DESC: descriptor setup
+ * @ATH5K_DEBUG_ANY: show at any debug level
+ *
+ * The debug level is used to control the amount and type of debugging output
+ * we want to see. The debug level is given in calls to ATH5K_DBG to specify
+ * where the message should appear, and the user can control the debugging
+ * messages he wants to see, either by the module parameter 'debug' on module
+ * load, or dynamically by using debugfs 'ath5k/phyX/debug'. these levels can
+ * be combined together by bitwise OR.
+ */
+enum ath5k_debug_level {
+ ATH5K_DEBUG_RESET = 0x00000001,
+ ATH5K_DEBUG_INTR = 0x00000002,
+ ATH5K_DEBUG_MODE = 0x00000004,
+ ATH5K_DEBUG_XMIT = 0x00000008,
+ ATH5K_DEBUG_BEACON = 0x00000010,
+ ATH5K_DEBUG_CALIBRATE = 0x00000020,
+ ATH5K_DEBUG_TXPOWER = 0x00000040,
+ ATH5K_DEBUG_LED = 0x00000080,
+ ATH5K_DEBUG_DUMPBANDS = 0x00000400,
+ ATH5K_DEBUG_DMA = 0x00000800,
+ ATH5K_DEBUG_ANI = 0x00002000,
+ ATH5K_DEBUG_DESC = 0x00004000,
+ ATH5K_DEBUG_ANY = 0xffffffff
+};
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \
+ if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \
+ ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+#define ATH5K_DBG_UNLIMIT(_sc, _m, _fmt, ...) do { \
+ if (unlikely((_sc)->debug.level & (_m))) \
+ ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+void
+ath5k_debug_init_device(struct ath5k_hw *ah);
+
+void
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah);
+
+void
+ath5k_debug_dump_bands(struct ath5k_hw *ah);
+
+void
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
+
+#else /* no debugging */
+
+#include <linux/compiler.h>
+
+static inline __printf(3, 4) void
+ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
+
+static inline __printf(3, 4) void
+ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
+{}
+
+static inline void
+ath5k_debug_init_device(struct ath5k_hw *ah) {}
+
+static inline void
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah) {}
+
+static inline void
+ath5k_debug_dump_bands(struct ath5k_hw *ah) {}
+
+static inline void
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) {}
+
+#endif /* ifdef CONFIG_ATH5K_DEBUG */
+
+#endif /* ifndef _ATH5K_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
new file mode 100644
index 000000000..bd8d4392d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/******************************\
+ Hardware Descriptor Functions
+\******************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
+/************************\
+* TX Control descriptors *
+\************************/
+
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
+ */
+static int
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
+{
+ u32 frame_type;
+ struct ath5k_hw_2w_tx_ctl *tx_ctl;
+ unsigned int frame_len;
+
+ tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
+
+ /*
+ * Validate input
+ * - Zero retries don't make sense.
+ * - A zero rate will put the HW into a mode where it continuously sends
+ * noise on the channel, so it is important to avoid this.
+ */
+ if (unlikely(tx_tries0 == 0)) {
+ ATH5K_ERR(ah, "zero retries\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (unlikely(tx_rate0 == 0)) {
+ ATH5K_ERR(ah, "zero rate\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Clear descriptor */
+ memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
+
+ /* Setup control descriptor */
+
+ /* Verify and set frame length */
+
+ /* remove padding we might have added before */
+ frame_len = pkt_len - padsize + FCS_LEN;
+
+ if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
+ return -EINVAL;
+
+ tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
+
+ /* Verify and set buffer length */
+
+ /* NB: beacon's BufLen must be a multiple of 4 bytes */
+ if (type == AR5K_PKT_TYPE_BEACON)
+ pkt_len = roundup(pkt_len, 4);
+
+ if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
+ return -EINVAL;
+
+ tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
+
+ /*
+ * Verify and set header length (only 5210)
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
+ return -EINVAL;
+ tx_ctl->tx_control_0 |=
+ AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
+ }
+
+ /*Differences between 5210-5211*/
+ if (ah->ah_version == AR5K_AR5210) {
+ switch (type) {
+ case AR5K_PKT_TYPE_BEACON:
+ case AR5K_PKT_TYPE_PROBE_RESP:
+ frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+ break;
+ case AR5K_PKT_TYPE_PIFS:
+ frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+ break;
+ default:
+ frame_type = type;
+ break;
+ }
+
+ tx_ctl->tx_control_0 |=
+ AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
+ AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
+
+ } else {
+ tx_ctl->tx_control_0 |=
+ AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
+ AR5K_REG_SM(antenna_mode,
+ AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
+ tx_ctl->tx_control_1 |=
+ AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
+ }
+
+#define _TX_FLAGS(_c, _flag) \
+ if (flags & AR5K_TXDESC_##_flag) { \
+ tx_ctl->tx_control_##_c |= \
+ AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
+ }
+#define _TX_FLAGS_5211(_c, _flag) \
+ if (flags & AR5K_TXDESC_##_flag) { \
+ tx_ctl->tx_control_##_c |= \
+ AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
+ }
+ _TX_FLAGS(0, CLRDMASK);
+ _TX_FLAGS(0, INTREQ);
+ _TX_FLAGS(0, RTSENA);
+
+ if (ah->ah_version == AR5K_AR5211) {
+ _TX_FLAGS_5211(0, VEOL);
+ _TX_FLAGS_5211(1, NOACK);
+ }
+
+#undef _TX_FLAGS
+#undef _TX_FLAGS_5211
+
+ /*
+ * WEP crap
+ */
+ if (key_index != AR5K_TXKEYIX_INVALID) {
+ tx_ctl->tx_control_0 |=
+ AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
+ tx_ctl->tx_control_1 |=
+ AR5K_REG_SM(key_index,
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
+ }
+
+ /*
+ * RTS/CTS Duration [5210 ?]
+ */
+ if ((ah->ah_version == AR5K_AR5210) &&
+ (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
+ tx_ctl->tx_control_1 |= rtscts_duration &
+ AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
+ */
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
+{
+ struct ath5k_hw_4w_tx_ctl *tx_ctl;
+ unsigned int frame_len;
+
+ /*
+ * Use local variables for these to reduce load/store access on
+ * uncached memory
+ */
+ u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
+
+ tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
+
+ /*
+ * Validate input
+ * - Zero retries don't make sense.
+ * - A zero rate will put the HW into a mode where it continuously sends
+ * noise on the channel, so it is important to avoid this.
+ */
+ if (unlikely(tx_tries0 == 0)) {
+ ATH5K_ERR(ah, "zero retries\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (unlikely(tx_rate0 == 0)) {
+ ATH5K_ERR(ah, "zero rate\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ tx_power += ah->ah_txpower.txp_offset;
+ if (tx_power > AR5K_TUNE_MAX_TXPOWER)
+ tx_power = AR5K_TUNE_MAX_TXPOWER;
+
+ /* Clear descriptor status area */
+ memset(&desc->ud.ds_tx5212.tx_stat, 0,
+ sizeof(desc->ud.ds_tx5212.tx_stat));
+
+ /* Setup control descriptor */
+
+ /* Verify and set frame length */
+
+ /* remove padding we might have added before */
+ frame_len = pkt_len - padsize + FCS_LEN;
+
+ if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
+ return -EINVAL;
+
+ txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
+
+ /* Verify and set buffer length */
+
+ /* NB: beacon's BufLen must be a multiple of 4 bytes */
+ if (type == AR5K_PKT_TYPE_BEACON)
+ pkt_len = roundup(pkt_len, 4);
+
+ if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
+ return -EINVAL;
+
+ txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
+
+ txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
+ AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
+ txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
+ txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
+ txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
+
+#define _TX_FLAGS(_c, _flag) \
+ if (flags & AR5K_TXDESC_##_flag) { \
+ txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
+ }
+
+ _TX_FLAGS(0, CLRDMASK);
+ _TX_FLAGS(0, VEOL);
+ _TX_FLAGS(0, INTREQ);
+ _TX_FLAGS(0, RTSENA);
+ _TX_FLAGS(0, CTSENA);
+ _TX_FLAGS(1, NOACK);
+
+#undef _TX_FLAGS
+
+ /*
+ * WEP crap
+ */
+ if (key_index != AR5K_TXKEYIX_INVALID) {
+ txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
+ txctl1 |= AR5K_REG_SM(key_index,
+ AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
+ }
+
+ /*
+ * RTS/CTS
+ */
+ if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
+ if ((flags & AR5K_TXDESC_RTSENA) &&
+ (flags & AR5K_TXDESC_CTSENA))
+ return -EINVAL;
+ txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
+ txctl3 |= AR5K_REG_SM(rtscts_rate,
+ AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
+ }
+
+ tx_ctl->tx_control_0 = txctl0;
+ tx_ctl->tx_control_1 = txctl1;
+ tx_ctl->tx_control_2 = txctl2;
+ tx_ctl->tx_control_3 = txctl3;
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
+ */
+int
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u_int tx_rate1, u_int tx_tries1,
+ u_int tx_rate2, u_int tx_tries2,
+ u_int tx_rate3, u_int tx_tries3)
+{
+ struct ath5k_hw_4w_tx_ctl *tx_ctl;
+
+ /* no mrr support for cards older than 5212 */
+ if (ah->ah_version < AR5K_AR5212)
+ return 0;
+
+ /*
+ * Rates can be 0 as long as the retry count is 0 too.
+ * A zero rate and nonzero retry count will put the HW into a mode where
+ * it continuously sends noise on the channel, so it is important to
+ * avoid this.
+ */
+ if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
+ (tx_rate2 == 0 && tx_tries2 != 0) ||
+ (tx_rate3 == 0 && tx_tries3 != 0))) {
+ ATH5K_ERR(ah, "zero rate\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (ah->ah_version == AR5K_AR5212) {
+ tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
+
+#define _XTX_TRIES(_n) \
+ if (tx_tries##_n) { \
+ tx_ctl->tx_control_2 |= \
+ AR5K_REG_SM(tx_tries##_n, \
+ AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
+ tx_ctl->tx_control_3 |= \
+ AR5K_REG_SM(tx_rate##_n, \
+ AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
+ }
+
+ _XTX_TRIES(1);
+ _XTX_TRIES(2);
+ _XTX_TRIES(3);
+
+#undef _XTX_TRIES
+
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/***********************\
+* TX Status descriptors *
+\***********************/
+
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
+ */
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
+{
+ struct ath5k_hw_tx_status *tx_status;
+
+ tx_status = &desc->ud.ds_tx5210.tx_stat;
+
+ /* No frame has been send or error */
+ if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
+ return -EINPROGRESS;
+
+ /*
+ * Get descriptor status
+ */
+ ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
+ AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
+ ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
+ AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
+ ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
+ AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
+ /*TODO: ts->ts_virtcol + test*/
+ ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
+ AR5K_DESC_TX_STATUS1_SEQ_NUM);
+ ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
+ AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
+ ts->ts_antenna = 1;
+ ts->ts_status = 0;
+ ts->ts_final_idx = 0;
+
+ if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
+ if (tx_status->tx_status_0 &
+ AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
+ ts->ts_status |= AR5K_TXERR_XRETRY;
+
+ if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
+ ts->ts_status |= AR5K_TXERR_FIFO;
+
+ if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
+ ts->ts_status |= AR5K_TXERR_FILT;
+ }
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
+ */
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
+{
+ struct ath5k_hw_tx_status *tx_status;
+ u32 txstat0, txstat1;
+
+ tx_status = &desc->ud.ds_tx5212.tx_stat;
+
+ txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
+
+ /* No frame has been send or error */
+ if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
+ return -EINPROGRESS;
+
+ txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
+
+ /*
+ * Get descriptor status
+ */
+ ts->ts_tstamp = AR5K_REG_MS(txstat0,
+ AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
+ ts->ts_shortretry = AR5K_REG_MS(txstat0,
+ AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
+ ts->ts_final_retry = AR5K_REG_MS(txstat0,
+ AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
+ ts->ts_seqnum = AR5K_REG_MS(txstat1,
+ AR5K_DESC_TX_STATUS1_SEQ_NUM);
+ ts->ts_rssi = AR5K_REG_MS(txstat1,
+ AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
+ ts->ts_antenna = (txstat1 &
+ AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
+ ts->ts_status = 0;
+
+ ts->ts_final_idx = AR5K_REG_MS(txstat1,
+ AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
+
+ /* TX error */
+ if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
+ if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
+ ts->ts_status |= AR5K_TXERR_XRETRY;
+
+ if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
+ ts->ts_status |= AR5K_TXERR_FIFO;
+
+ if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
+ ts->ts_status |= AR5K_TXERR_FILT;
+ }
+
+ return 0;
+}
+
+
+/****************\
+* RX Descriptors *
+\****************/
+
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
+ */
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
+{
+ struct ath5k_hw_rx_ctl *rx_ctl;
+
+ rx_ctl = &desc->ud.ds_rx.rx_ctl;
+
+ /*
+ * Clear the descriptor
+ * If we don't clean the status descriptor,
+ * while scanning we get too many results,
+ * most of them virtual, after some secs
+ * of scanning system hangs. M.F.
+ */
+ memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
+
+ if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
+ return -EINVAL;
+
+ /* Setup descriptor */
+ rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
+
+ if (flags & AR5K_RXDESC_INTREQ)
+ rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
+ */
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
+{
+ struct ath5k_hw_rx_status *rx_status;
+
+ rx_status = &desc->ud.ds_rx.rx_stat;
+
+ /* No frame received / not ready */
+ if (unlikely(!(rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_DONE)))
+ return -EINPROGRESS;
+
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+
+ /*
+ * Frame receive status
+ */
+ rs->rs_datalen = rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
+ rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
+ rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
+ rs->rs_more = !!(rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_MORE);
+ /* TODO: this timestamp is 13 bit, later on we assume 15 bit!
+ * also the HAL code for 5210 says the timestamp is bits [10..22] of the
+ * TSF, and extends the timestamp here to 15 bit.
+ * we need to check on 5210...
+ */
+ rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
+ AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
+
+ if (ah->ah_version == AR5K_AR5211)
+ rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
+ else
+ rs->rs_antenna = (rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
+ ? 2 : 1;
+
+ /*
+ * Key table status
+ */
+ if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
+ rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
+ AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
+ else
+ rs->rs_keyix = AR5K_RXKEYIX_INVALID;
+
+ /*
+ * Receive/descriptor errors
+ */
+ if (!(rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ if (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
+ rs->rs_status |= AR5K_RXERR_CRC;
+
+ /* only on 5210 */
+ if ((ah->ah_version == AR5K_AR5210) &&
+ (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
+ rs->rs_status |= AR5K_RXERR_FIFO;
+
+ if (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
+ rs->rs_status |= AR5K_RXERR_PHY;
+ rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
+ AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
+ }
+
+ if (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
+ rs->rs_status |= AR5K_RXERR_DECRYPT;
+ }
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
+ */
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
+{
+ struct ath5k_hw_rx_status *rx_status;
+ u32 rxstat0, rxstat1;
+
+ rx_status = &desc->ud.ds_rx.rx_stat;
+ rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
+
+ /* No frame received / not ready */
+ if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
+ return -EINPROGRESS;
+
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+ rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
+
+ /*
+ * Frame receive status
+ */
+ rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
+ rs->rs_rssi = AR5K_REG_MS(rxstat0,
+ AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
+ rs->rs_rate = AR5K_REG_MS(rxstat0,
+ AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
+ rs->rs_antenna = AR5K_REG_MS(rxstat0,
+ AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
+ rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
+ rs->rs_tstamp = AR5K_REG_MS(rxstat1,
+ AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
+
+ /*
+ * Key table status
+ */
+ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
+ rs->rs_keyix = AR5K_REG_MS(rxstat1,
+ AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
+ else
+ rs->rs_keyix = AR5K_RXKEYIX_INVALID;
+
+ /*
+ * Receive/descriptor errors
+ */
+ if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
+ rs->rs_status |= AR5K_RXERR_CRC;
+
+ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
+ rs->rs_status |= AR5K_RXERR_PHY;
+ rs->rs_phyerr = AR5K_REG_MS(rxstat1,
+ AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
+ }
+
+ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
+ rs->rs_status |= AR5K_RXERR_DECRYPT;
+
+ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
+ rs->rs_status |= AR5K_RXERR_MIC;
+ }
+ return 0;
+}
+
+
+/********\
+* Attach *
+\********/
+
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
+ */
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+{
+ if (ah->ah_version == AR5K_AR5212) {
+ ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
+ ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
+ ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
+ } else if (ah->ah_version <= AR5K_AR5211) {
+ ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
+ ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
+ ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
+ } else
+ return -ENOTSUPP;
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
new file mode 100644
index 000000000..8d6c01a49
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*
+ * RX/TX descriptor structures
+ */
+
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
+ */
+struct ath5k_hw_rx_ctl {
+ u32 rx_control_0;
+ u32 rx_control_1;
+} __packed __aligned(4);
+
+/* RX control word 1 fields/flags */
+#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
+
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
+ * 5210, 5211 and 5212 differ only in the fields and flags defined below
+ */
+struct ath5k_hw_rx_status {
+ u32 rx_status_0;
+ u32 rx_status_1;
+} __packed __aligned(4);
+
+/* 5210/5211 */
+/* RX status word 0 fields/flags */
+#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27
+
+/* RX status word 1 fields/flags */
+#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
+#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decryption CRC failure */
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decryption key index */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
+#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */
+
+/* 5212 */
+/* RX status word 0 fields/flags */
+#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
+
+/* RX status word 1 fields/flags */
+#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */
+#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */
+#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
+#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8
+
+/**
+ * enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
+ */
+enum ath5k_phy_error_code {
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0,
+ AR5K_RX_PHY_ERROR_TIMING = 1,
+ AR5K_RX_PHY_ERROR_PARITY = 2,
+ AR5K_RX_PHY_ERROR_RATE = 3,
+ AR5K_RX_PHY_ERROR_LENGTH = 4,
+ AR5K_RX_PHY_ERROR_RADAR = 5,
+ AR5K_RX_PHY_ERROR_SERVICE = 6,
+ AR5K_RX_PHY_ERROR_TOR = 7,
+ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
+ AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
+ AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
+ AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
+ AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
+ AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
+ AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
+ AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
+ AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
+ AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
+ AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
+ AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
+};
+
+/**
+ * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ */
+struct ath5k_hw_2w_tx_ctl {
+ u32 tx_control_0;
+ u32 tx_control_1;
+} __packed __aligned(4);
+
+/* TX control word 0 fields/flags */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18
+#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */
+#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */
+#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
+ (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
+ AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26
+#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */
+
+/* TX control word 1 fields/flags */
+#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \
+ (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20
+#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */
+#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */
+
+/* Frame types */
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
+
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
+ */
+struct ath5k_hw_4w_tx_ctl {
+ u32 tx_control_0;
+ u32 tx_control_1;
+ u32 tx_control_2;
+ u32 tx_control_3;
+} __packed __aligned(4);
+
+/* TX control word 0 fields/flags */
+#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16
+#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */
+#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
+#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */
+#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */
+
+/* TX control word 1 fields/flags */
+#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20
+#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29
+
+/* TX control word 2 fields/flags */
+#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */
+#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
+
+/* TX control word 3 fields/flags */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
+
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
+ */
+struct ath5k_hw_tx_status {
+ u32 tx_status_0;
+ u32 tx_status_1;
+} __packed __aligned(4);
+
+/* TX status word 0 fields/flags */
+#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
+#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */
+#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */
+#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */
+/* according to the HAL sources the spec has short/long retry counts reversed.
+ * we have it reversed to the HAL sources as well, for 5210 and 5211.
+ * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
+ * but used respectively as SHORT and LONG retry count in the code later. This
+ * is consistent with the definitions here... TODO: check */
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
+
+/* TX status word 1 fields/flags */
+#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21
+#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
+#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
+
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
+ */
+struct ath5k_hw_5210_tx_desc {
+ struct ath5k_hw_2w_tx_ctl tx_ctl;
+ struct ath5k_hw_tx_status tx_stat;
+} __packed __aligned(4);
+
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
+ */
+struct ath5k_hw_5212_tx_desc {
+ struct ath5k_hw_4w_tx_ctl tx_ctl;
+ struct ath5k_hw_tx_status tx_stat;
+} __packed __aligned(4);
+
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
+ */
+struct ath5k_hw_all_rx_desc {
+ struct ath5k_hw_rx_ctl rx_ctl;
+ struct ath5k_hw_rx_status rx_stat;
+} __packed __aligned(4);
+
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
+ * This is read and written to by the hardware
+ */
+struct ath5k_desc {
+ u32 ds_link;
+ u32 ds_data;
+
+ union {
+ struct ath5k_hw_5210_tx_desc ds_tx5210;
+ struct ath5k_hw_5212_tx_desc ds_tx5212;
+ struct ath5k_hw_all_rx_desc ds_rx;
+ } ud;
+} __packed __aligned(4);
+
+#define AR5K_RXDESC_INTREQ 0x0020
+
+#define AR5K_TXDESC_CLRDMASK 0x0001
+#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
+#define AR5K_TXDESC_RTSENA 0x0004
+#define AR5K_TXDESC_CTSENA 0x0008
+#define AR5K_TXDESC_INTREQ 0x0010
+#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
new file mode 100644
index 000000000..e6c52f7c2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* DMA and interrupt masking functions *
+\*************************************/
+
+/**
+ * DOC: DMA and interrupt masking functions
+ *
+ * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
+ * handle queue setup for 5210 chipset (rest are handled on qcu.c).
+ * Also we setup interrupt mask register (IMR) and read the various interrupt
+ * status registers (ISR).
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+
+/*********\
+* Receive *
+\*********/
+
+/**
+ * ath5k_hw_start_rx_dma() - Start DMA receive
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
+ ath5k_hw_reg_read(ah, AR5K_CR);
+}
+
+/**
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
+ * @ah: The &struct ath5k_hw
+ */
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+{
+ unsigned int i;
+
+ ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
+
+ /*
+ * It may take some time to disable the DMA receive unit
+ */
+ for (i = 1000; i > 0 &&
+ (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
+ i--)
+ udelay(100);
+
+ if (!i)
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "failed to stop RX DMA !\n");
+
+ return i ? 0 : -EBUSY;
+}
+
+/**
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
+ * @ah: The &struct ath5k_hw
+ */
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+{
+ return ath5k_hw_reg_read(ah, AR5K_RXDP);
+}
+
+/**
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
+ * @ah: The &struct ath5k_hw
+ * @phys_addr: RX descriptor address
+ *
+ * Returns -EIO if rx is active
+ */
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+{
+ if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "tried to set RXDP while rx was active !\n");
+ return -EIO;
+ }
+
+ ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
+ return 0;
+}
+
+
+/**********\
+* Transmit *
+\**********/
+
+/**
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Start DMA transmit for a specific queue and since 5210 doesn't have
+ * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
+ * queue for normal data and one queue for beacons). For queue setup
+ * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
+ * of range or if queue is already disabled.
+ *
+ * NOTE: Must be called after setting up tx control descriptor for that
+ * queue (see below).
+ */
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+{
+ u32 tx_queue;
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /* Return if queue is declared inactive */
+ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return -EINVAL;
+
+ if (ah->ah_version == AR5K_AR5210) {
+ tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
+
+ /*
+ * Set the queue by type on 5210
+ */
+ switch (ah->ah_txq[queue].tqi_type) {
+ case AR5K_TX_QUEUE_DATA:
+ tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
+ ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
+ AR5K_BSR);
+ break;
+ case AR5K_TX_QUEUE_CAB:
+ tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
+ ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
+ AR5K_BCR_BDMAE, AR5K_BSR);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Start queue */
+ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
+ ath5k_hw_reg_read(ah, AR5K_CR);
+ } else {
+ /* Return if queue is disabled */
+ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
+ return -EIO;
+
+ /* Start queue */
+ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
+ }
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Stop DMA transmit on a specific hw queue and drain queue so we don't
+ * have any pending frames. Returns -EBUSY if we still have pending frames,
+ * -EINVAL if queue number is out of range or inactive.
+ */
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+{
+ unsigned int i = 40;
+ u32 tx_queue, pending;
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /* Return if queue is declared inactive */
+ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return -EINVAL;
+
+ if (ah->ah_version == AR5K_AR5210) {
+ tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
+
+ /*
+ * Set by queue type
+ */
+ switch (ah->ah_txq[queue].tqi_type) {
+ case AR5K_TX_QUEUE_DATA:
+ tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ case AR5K_TX_QUEUE_CAB:
+ /* XXX Fix me... */
+ tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
+ ath5k_hw_reg_write(ah, 0, AR5K_BSR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop queue */
+ ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
+ ath5k_hw_reg_read(ah, AR5K_CR);
+ } else {
+
+ /*
+ * Enable DCU early termination to quickly
+ * flush any pending frames from QCU
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_DCU_EARLY);
+
+ /*
+ * Schedule TX disable and wait until queue is empty
+ */
+ AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
+
+ /* Wait for queue to stop */
+ for (i = 1000; i > 0 &&
+ (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
+ i--)
+ udelay(100);
+
+ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "queue %i didn't stop !\n", queue);
+
+ /* Check for pending frames */
+ i = 1000;
+ do {
+ pending = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_STATUS(queue)) &
+ AR5K_QCU_STS_FRMPENDCNT;
+ udelay(100);
+ } while (--i && pending);
+
+ /* For 2413+ order PCU to drop packets using
+ * QUIET mechanism */
+ if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
+ pending) {
+ /* Set periodicity and duration */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
+ AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
+ AR5K_QUIET_CTL2);
+
+ /* Enable quiet period for current TSF */
+ ath5k_hw_reg_write(ah,
+ AR5K_QUIET_CTL1_QT_EN |
+ AR5K_REG_SM(ath5k_hw_reg_read(ah,
+ AR5K_TSF_L32_5211) >> 10,
+ AR5K_QUIET_CTL1_NEXT_QT_TSF),
+ AR5K_QUIET_CTL1);
+
+ /* Force channel idle high */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
+ AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
+
+ /* Wait a while and disable mechanism */
+ udelay(400);
+ AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
+ AR5K_QUIET_CTL1_QT_EN);
+
+ /* Re-check for pending frames */
+ i = 100;
+ do {
+ pending = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_STATUS(queue)) &
+ AR5K_QCU_STS_FRMPENDCNT;
+ udelay(100);
+ } while (--i && pending);
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
+ AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
+
+ if (pending)
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "quiet mechanism didn't work q:%i !\n",
+ queue);
+ }
+
+ /*
+ * Disable DCU early termination
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_DCU_EARLY);
+
+ /* Clear register */
+ ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
+ if (pending) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "tx dma didn't stop (q:%i, frm:%i) !\n",
+ queue, pending);
+ return -EBUSY;
+ }
+ }
+
+ /* TODO: Check for success on 5210 else return error */
+ return 0;
+}
+
+/**
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
+ *
+ * Returns -EIO if queue didn't stop
+ */
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ int ret;
+ ret = ath5k_hw_stop_tx_dma(ah, queue);
+ if (ret) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
+ "beacon queue didn't stop !\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ *
+ * Get TX descriptor's address for a specific queue. For 5210 we ignore
+ * the queue number and use tx queue type since we only have 2 queues.
+ * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
+ * For newer chips with QCU/DCU we just read the corresponding TXDP register.
+ *
+ * XXX: Is TXDP read and clear ?
+ */
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+{
+ u16 tx_reg;
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /*
+ * Get the transmit queue descriptor pointer from the selected queue
+ */
+ /*5210 doesn't have QCU*/
+ if (ah->ah_version == AR5K_AR5210) {
+ switch (ah->ah_txq[queue].tqi_type) {
+ case AR5K_TX_QUEUE_DATA:
+ tx_reg = AR5K_NOQCU_TXDP0;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ case AR5K_TX_QUEUE_CAB:
+ tx_reg = AR5K_NOQCU_TXDP1;
+ break;
+ default:
+ return 0xffffffff;
+ }
+ } else {
+ tx_reg = AR5K_QUEUE_TXDP(queue);
+ }
+
+ return ath5k_hw_reg_read(ah, tx_reg);
+}
+
+/**
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The hw queue number
+ * @phys_addr: The physical address
+ *
+ * Set TX descriptor's address for a specific queue. For 5210 we ignore
+ * the queue number and we use tx queue type since we only have 2 queues
+ * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
+ * For newer chips with QCU/DCU we just set the corresponding TXDP register.
+ * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
+ * active.
+ */
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+{
+ u16 tx_reg;
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /*
+ * Set the transmit queue descriptor pointer register by type
+ * on 5210
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ switch (ah->ah_txq[queue].tqi_type) {
+ case AR5K_TX_QUEUE_DATA:
+ tx_reg = AR5K_NOQCU_TXDP0;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ case AR5K_TX_QUEUE_CAB:
+ tx_reg = AR5K_NOQCU_TXDP1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * Set the transmit queue descriptor pointer for
+ * the selected queue on QCU for 5211+
+ * (this won't work if the queue is still active)
+ */
+ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ return -EIO;
+
+ tx_reg = AR5K_QUEUE_TXDP(queue);
+ }
+
+ /* Set descriptor pointer */
+ ath5k_hw_reg_write(ah, phys_addr, tx_reg);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
+ * @ah: The &struct ath5k_hw
+ * @increase: Flag to force increase of trigger level
+ *
+ * This function increases/decreases the tx trigger level for the tx fifo
+ * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
+ * the buffer and transmits its data. Lowering this results sending small
+ * frames more quickly but can lead to tx underruns, raising it a lot can
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
+ *
+ * XXX: Link this with tx DMA size ?
+ * XXX2: Use it to save interrupts ?
+ */
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+{
+ u32 trigger_level, imr;
+ int ret = -EIO;
+
+ /*
+ * Disable interrupts by setting the mask
+ */
+ imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
+
+ trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
+ AR5K_TXCFG_TXFULL);
+
+ if (!increase) {
+ if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
+ goto done;
+ } else
+ trigger_level +=
+ ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
+
+ /*
+ * Update trigger level on success
+ */
+ if (ah->ah_version == AR5K_AR5210)
+ ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
+ else
+ AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_TXFULL, trigger_level);
+
+ ret = 0;
+
+done:
+ /*
+ * Restore interrupt mask
+ */
+ ath5k_hw_set_imr(ah, imr);
+
+ return ret;
+}
+
+
+/*******************\
+* Interrupt masking *
+\*******************/
+
+/**
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if we have pending interrupts to process. Returns 1 if we
+ * have pending interrupts and 0 if we haven't.
+ */
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+{
+ return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
+}
+
+/**
+ * ath5k_hw_get_isr() - Get interrupt status
+ * @ah: The @struct ath5k_hw
+ * @interrupt_mask: Driver's interrupt mask used to filter out
+ * interrupts in sw.
+ *
+ * This function is used inside our interrupt handler to determine the reason
+ * for the interrupt by reading Primary Interrupt Status Register. Returns an
+ * abstract interrupt status mask which is mostly ISR with some uncommon bits
+ * being mapped on some standard non hw-specific positions
+ * (check out &ath5k_int).
+ *
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
+ */
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+{
+ u32 data = 0;
+
+ /*
+ * Read interrupt status from Primary Interrupt
+ * Register.
+ *
+ * Note: PISR/SISR Not available on 5210
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ u32 isr = 0;
+ isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+ if (unlikely(isr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = isr;
+ return -ENODEV;
+ }
+
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+ /* Hanlde INT_FATAL */
+ if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+ | AR5K_ISR_DPERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*
+ * XXX: BMISS interrupts may occur after association.
+ * I found this on 5210 code but it needs testing. If this is
+ * true we should disable them before assoc and re-enable them
+ * after a successful assoc + some jiffies.
+ interrupt_mask &= ~AR5K_INT_BMISS;
+ */
+
+ data = isr;
+ } else {
+ u32 pisr = 0;
+ u32 pisr_clear = 0;
+ u32 sisr0 = 0;
+ u32 sisr1 = 0;
+ u32 sisr2 = 0;
+ u32 sisr3 = 0;
+ u32 sisr4 = 0;
+
+ /* Read PISR and SISRs... */
+ pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+ if (unlikely(pisr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = pisr;
+ return -ENODEV;
+ }
+
+ sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+ sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+ sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+ sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+ sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
+
+ /*
+ * PISR holds the logical OR of interrupt bits
+ * from SISR registers:
+ *
+ * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
+ * per-queue bits on SISR0
+ *
+ * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+ * per-queue bits on SISR1
+ *
+ * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+ *
+ * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+ *
+ * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+ * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+ * (and TSFOOR ?) bits on SISR2
+ *
+ * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+ * QCBRURN per-queue bits on SISR3
+ * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+ *
+ * If we clean these bits on PISR we 'll also clear all
+ * related bits from SISRs, e.g. if we write the TXOK bit on
+ * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+ * interrupt got fired for another queue while we were reading
+ * the interrupt registers and we write back the TXOK bit on
+ * PISR we 'll lose it. So make sure that we don't write back
+ * on PISR any bits that come from SISRs. Clearing them from
+ * SISRs will also clear PISR so no need to worry here.
+ */
+
+ /* XXX: There seems to be an issue on some cards
+ * with tx interrupt flags not being updated
+ * on PISR despite that all Tx interrupt bits
+ * are cleared on SISRs. Since we handle all
+ * Tx queues all together it shouldn't be an
+ * issue if we clear Tx interrupt flags also
+ * on PISR to avoid that.
+ */
+ pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
+ (pisr & AR5K_INT_TX_ALL);
+
+ /*
+ * Write to clear them...
+ * Note: This means that each bit we write back
+ * to the registers will get cleared, leaving the
+ * rest unaffected. So this won't affect new interrupts
+ * we didn't catch while reading/processing, we 'll get
+ * them next time get_isr gets called.
+ */
+ ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+ ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+ ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+ ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+ ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+ ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+ /* Flush previous write */
+ ath5k_hw_reg_read(ah, AR5K_PISR);
+
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+ /* We treat TXOK,TXDESC, TXERR and TXEOL
+ * the same way (schedule the tx tasklet)
+ * so we track them all together per queue */
+ if (pisr & AR5K_ISR_TXOK)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXOK);
+
+ if (pisr & AR5K_ISR_TXDESC)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXDESC);
+
+ if (pisr & AR5K_ISR_TXERR)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXERR);
+
+ if (pisr & AR5K_ISR_TXEOL)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXEOL);
+
+ /* Currently this is not much useful since we treat
+ * all queues the same way if we get a TXURN (update
+ * tx trigger level) but we might need it later on*/
+ if (pisr & AR5K_ISR_TXURN)
+ ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+ AR5K_SISR2_QCU_TXURN);
+
+ /* Misc Beacon related interrupts */
+
+ /* For AR5211 */
+ if (pisr & AR5K_ISR_TIM)
+ *interrupt_mask |= AR5K_INT_TIM;
+
+ /* For AR5212+ */
+ if (pisr & AR5K_ISR_BCNMISC) {
+ if (sisr2 & AR5K_SISR2_TIM)
+ *interrupt_mask |= AR5K_INT_TIM;
+ if (sisr2 & AR5K_SISR2_DTIM)
+ *interrupt_mask |= AR5K_INT_DTIM;
+ if (sisr2 & AR5K_SISR2_DTIM_SYNC)
+ *interrupt_mask |= AR5K_INT_DTIM_SYNC;
+ if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
+ *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
+ if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
+ *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
+ }
+
+ /* Below interrupts are unlikely to happen */
+
+ /* HIU = Host Interface Unit (PCI etc)
+ * Can be one of MCABT, SSERR, DPERR from SISR2 */
+ if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*Beacon Not Ready*/
+ if (unlikely(pisr & (AR5K_ISR_BNR)))
+ *interrupt_mask |= AR5K_INT_BNR;
+
+ /* A queue got CBR overrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+ *interrupt_mask |= AR5K_INT_QCBRORN;
+ ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRORN);
+ }
+
+ /* A queue got CBR underrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+ *interrupt_mask |= AR5K_INT_QCBRURN;
+ ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRURN);
+ }
+
+ /* A queue got triggered */
+ if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+ *interrupt_mask |= AR5K_INT_QTRIG;
+ ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+ AR5K_SISR4_QTRIG);
+ }
+
+ data = pisr;
+ }
+
+ /*
+ * In case we didn't handle anything,
+ * print the register value.
+ */
+ if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
+ ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_imr() - Set interrupt mask
+ * @ah: The &struct ath5k_hw
+ * @new_mask: The new interrupt mask to be set
+ *
+ * Set the interrupt mask in hw to save interrupts. We do that by mapping
+ * ath5k_int bits to hw-specific bits to remove abstraction and writing
+ * Interrupt Mask Register.
+ */
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+{
+ enum ath5k_int old_mask, int_mask;
+
+ old_mask = ah->ah_imr;
+
+ /*
+ * Disable card interrupts to prevent any race conditions
+ * (they will be re-enabled afterwards if AR5K_INT GLOBAL
+ * is set again on the new mask).
+ */
+ if (old_mask & AR5K_INT_GLOBAL) {
+ ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
+ ath5k_hw_reg_read(ah, AR5K_IER);
+ }
+
+ /*
+ * Add additional, chipset-dependent interrupt mask flags
+ * and write them to the IMR (interrupt mask register).
+ */
+ int_mask = new_mask & AR5K_INT_COMMON;
+
+ if (ah->ah_version != AR5K_AR5210) {
+ /* Preserve per queue TXURN interrupt mask */
+ u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
+ & AR5K_SIMR2_QCU_TXURN;
+
+ /* Fatal interrupt abstraction for 5211+ */
+ if (new_mask & AR5K_INT_FATAL) {
+ int_mask |= AR5K_IMR_HIUERR;
+ simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
+ | AR5K_SIMR2_DPERR);
+ }
+
+ /* Misc beacon related interrupts */
+ if (new_mask & AR5K_INT_TIM)
+ int_mask |= AR5K_IMR_TIM;
+
+ if (new_mask & AR5K_INT_TIM)
+ simr2 |= AR5K_SISR2_TIM;
+ if (new_mask & AR5K_INT_DTIM)
+ simr2 |= AR5K_SISR2_DTIM;
+ if (new_mask & AR5K_INT_DTIM_SYNC)
+ simr2 |= AR5K_SISR2_DTIM_SYNC;
+ if (new_mask & AR5K_INT_BCN_TIMEOUT)
+ simr2 |= AR5K_SISR2_BCN_TIMEOUT;
+ if (new_mask & AR5K_INT_CAB_TIMEOUT)
+ simr2 |= AR5K_SISR2_CAB_TIMEOUT;
+
+ /*Beacon Not Ready*/
+ if (new_mask & AR5K_INT_BNR)
+ int_mask |= AR5K_INT_BNR;
+
+ /* Note: Per queue interrupt masks
+ * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
+ ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
+ ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
+
+ } else {
+ /* Fatal interrupt abstraction for 5210 */
+ if (new_mask & AR5K_INT_FATAL)
+ int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
+ | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
+
+ /* Only common interrupts left for 5210 (no SIMRs) */
+ ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
+ }
+
+ /* If RXNOFRM interrupt is masked disable it
+ * by setting AR5K_RXNOFRM to zero */
+ if (!(new_mask & AR5K_INT_RXNOFRM))
+ ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
+
+ /* Store new interrupt mask */
+ ah->ah_imr = new_mask;
+
+ /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
+ if (new_mask & AR5K_INT_GLOBAL) {
+ ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
+ ath5k_hw_reg_read(ah, AR5K_IER);
+ }
+
+ return old_mask;
+}
+
+
+/********************\
+ Init/Stop functions
+\********************/
+
+/**
+ * ath5k_hw_dma_init() - Initialize DMA unit
+ * @ah: The &struct ath5k_hw
+ *
+ * Set DMA size and pre-enable interrupts
+ * (driver handles tx/rx buffer setup and
+ * dma start/stop)
+ *
+ * XXX: Save/restore RXDP/TXDP registers ?
+ */
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
+{
+ /*
+ * Set Rx/Tx DMA Configuration
+ *
+ * Set standard DMA size (128). Note that
+ * a DMA size of 512 causes rx overruns and tx errors
+ * on pci-e cards (tested on 5424 but since rx overruns
+ * also occur on 5416/5418 with madwifi we set 128
+ * for all PCI-E cards to be safe).
+ *
+ * XXX: need to check 5210 for this
+ * TODO: Check out tx trigger level, it's always 64 on dumps but I
+ * guess we can tweak it and see how it goes ;-)
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
+ AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
+ AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
+ }
+
+ /* Pre-enable interrupts on 5211/5212*/
+ if (ah->ah_version != AR5K_AR5210)
+ ath5k_hw_set_imr(ah, ah->ah_imr);
+
+}
+
+/**
+ * ath5k_hw_dma_stop() - stop DMA unit
+ * @ah: The &struct ath5k_hw
+ *
+ * Stop tx/rx DMA and interrupts. Returns
+ * -EBUSY if tx or rx dma failed to stop.
+ *
+ * XXX: Sometimes DMA unit hangs and we have
+ * stuck frames on tx queues, only a reset
+ * can fix that.
+ */
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
+{
+ int i, qmax, err;
+ err = 0;
+
+ /* Disable interrupts */
+ ath5k_hw_set_imr(ah, 0);
+
+ /* Stop rx dma */
+ err = ath5k_hw_stop_rx_dma(ah);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts
+ * and disable tx dma */
+ if (ah->ah_version != AR5K_AR5210) {
+ ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
+ qmax = AR5K_NUM_TX_QUEUES;
+ } else {
+ /* PISR/SISR Not available on 5210 */
+ ath5k_hw_reg_read(ah, AR5K_ISR);
+ qmax = AR5K_NUM_TX_QUEUES_NOQCU;
+ }
+
+ for (i = 0; i < qmax; i++) {
+ err = ath5k_hw_stop_tx_dma(ah, i);
+ /* -EINVAL -> queue inactive */
+ if (err && err != -EINVAL)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
new file mode 100644
index 000000000..94d34ee02
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -0,0 +1,1796 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*************************************\
+* EEPROM access functions and helpers *
+\*************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+
+/******************\
+* Helper functions *
+\******************/
+
+/*
+ * Translate binary channel representation in EEPROM to frequency
+ */
+static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
+ unsigned int mode)
+{
+ u16 val;
+
+ if (bin == AR5K_EEPROM_CHANNEL_DIS)
+ return bin;
+
+ if (mode == AR5K_EEPROM_MODE_11A) {
+ if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
+ val = (5 * bin) + 4800;
+ else
+ val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
+ (bin * 10) + 5100;
+ } else {
+ if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
+ val = bin + 2300;
+ else
+ val = bin + 2400;
+ }
+
+ return val;
+}
+
+
+/*********\
+* Parsers *
+\*********/
+
+/*
+ * Initialize eeprom & capabilities structs
+ */
+static int
+ath5k_eeprom_init_header(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u16 val;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
+
+ /*
+ * Read values from EEPROM and store them in the capability structure
+ */
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
+
+ /* Return if we have an old EEPROM */
+ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
+ return 0;
+
+ /*
+ * Validate the checksum of the EEPROM date. There are some
+ * devices with invalid EEPROMs.
+ */
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
+ cksum ^= val;
+ }
+ if (cksum != AR5K_EEPROM_INFO_CKSUM) {
+ ATH5K_ERR(ah, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
+ return -EIO;
+ }
+
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
+ ee_ant_gain);
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
+
+ /* XXX: Don't know which versions include these two */
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2);
+
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3)
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3);
+
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) {
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5);
+ AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6);
+ }
+ }
+
+ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
+ ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
+ ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
+
+ AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
+ ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
+ ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
+ }
+
+ AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val);
+
+ if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val)
+ ee->ee_is_hb63 = true;
+ else
+ ee->ee_is_hb63 = false;
+
+ AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val);
+ ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL);
+ ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false;
+
+ /* Check if PCIE_OFFSET points to PCIE_SERDES_SECTION
+ * and enable serdes programming if needed.
+ *
+ * XXX: Serdes values seem to be fixed so
+ * no need to read them here, we write them
+ * during ath5k_hw_init */
+ AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val);
+ ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ?
+ true : false;
+
+ return 0;
+}
+
+
+/*
+ * Read antenna infos from eeprom
+ */
+static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
+ unsigned int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 o = *offset;
+ u16 val;
+ int i = 0;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
+ ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f;
+ ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
+ ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
+ ee->ee_ant_control[mode][i++] = val & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
+ ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
+ ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
+ ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
+ ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
+ ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
+ ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
+ ee->ee_ant_control[mode][i++] = val & 0x3f;
+
+ /* Get antenna switch tables */
+ ah->ah_ant_ctl[mode][AR5K_ANT_CTL] =
+ (ee->ee_ant_control[mode][0] << 4);
+ ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] =
+ ee->ee_ant_control[mode][1] |
+ (ee->ee_ant_control[mode][2] << 6) |
+ (ee->ee_ant_control[mode][3] << 12) |
+ (ee->ee_ant_control[mode][4] << 18) |
+ (ee->ee_ant_control[mode][5] << 24);
+ ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] =
+ ee->ee_ant_control[mode][6] |
+ (ee->ee_ant_control[mode][7] << 6) |
+ (ee->ee_ant_control[mode][8] << 12) |
+ (ee->ee_ant_control[mode][9] << 18) |
+ (ee->ee_ant_control[mode][10] << 24);
+
+ /* return new offset */
+ *offset = o;
+
+ return 0;
+}
+
+/*
+ * Read supported modes and some mode-specific calibration data
+ * from eeprom
+ */
+static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
+ unsigned int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 o = *offset;
+ u16 val;
+
+ ee->ee_n_piers[mode] = 0;
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ ee->ee_ob[mode][3] = (val >> 5) & 0x7;
+ ee->ee_db[mode][3] = (val >> 2) & 0x7;
+ ee->ee_ob[mode][2] = (val << 1) & 0x7;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
+ ee->ee_db[mode][2] = (val >> 12) & 0x7;
+ ee->ee_ob[mode][1] = (val >> 9) & 0x7;
+ ee->ee_db[mode][1] = (val >> 6) & 0x7;
+ ee->ee_ob[mode][0] = (val >> 3) & 0x7;
+ ee->ee_db[mode][0] = val & 0x7;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ case AR5K_EEPROM_MODE_11B:
+ ee->ee_ob[mode][1] = (val >> 4) & 0x7;
+ ee->ee_db[mode][1] = val & 0x7;
+ break;
+ }
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
+ ee->ee_thr_62[mode] = val & 0xff;
+
+ if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
+ ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
+ ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
+
+ if ((val & 0xff) & 0x80)
+ ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
+ else
+ ee->ee_noise_floor_thr[mode] = val & 0xff;
+
+ if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
+ ee->ee_noise_floor_thr[mode] =
+ mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
+ ee->ee_x_gain[mode] = (val >> 1) & 0xf;
+ ee->ee_xpd[mode] = val & 0x1;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode != AR5K_EEPROM_MODE_11B)
+ ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
+
+ if (mode == AR5K_EEPROM_MODE_11A)
+ ee->ee_xr_power[mode] = val & 0x3f;
+ else {
+ /* b_DB_11[bg] and b_OB_11[bg] */
+ ee->ee_ob[mode][0] = val & 0x7;
+ ee->ee_db[mode][0] = (val >> 3) & 0x7;
+ }
+ }
+
+ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
+ ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
+ ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
+ } else {
+ ee->ee_i_gain[mode] = (val >> 13) & 0x7;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_i_gain[mode] |= (val << 3) & 0x38;
+
+ if (mode == AR5K_EEPROM_MODE_11G) {
+ ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6)
+ ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
+ }
+ }
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode == AR5K_EEPROM_MODE_11A) {
+ ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
+ ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
+ }
+
+ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0)
+ goto done;
+
+ /* Note: >= v5 have bg freq piers on another location
+ * so these freq piers are ignored for >= v5 (should be 0xff
+ * anyway) */
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1)
+ break;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_margin_tx_rx[mode] = val & 0x3f;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ AR5K_EEPROM_READ(o++, val);
+
+ ee->ee_pwr_cal_b[0].freq =
+ ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
+ if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ ee->ee_pwr_cal_b[1].freq =
+ ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
+ if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_pwr_cal_b[2].freq =
+ ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
+ if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+ ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ AR5K_EEPROM_READ(o++, val);
+
+ ee->ee_pwr_cal_g[0].freq =
+ ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
+ if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ ee->ee_pwr_cal_g[1].freq =
+ ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
+ if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_turbo_max_power[mode] = val & 0x7f;
+ ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_pwr_cal_g[2].freq =
+ ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
+ if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS)
+ ee->ee_n_piers[mode]++;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+ ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
+
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_i_cal[mode] = (val >> 5) & 0x3f;
+ ee->ee_q_cal[mode] = val & 0x1f;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_cck_ofdm_gain_delta = val & 0xff;
+ }
+ break;
+ }
+
+ /*
+ * Read turbo mode information on newer EEPROM versions
+ */
+ if (ee->ee_version < AR5K_EEPROM_VERSION_5_0)
+ goto done;
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f;
+
+ ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7;
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3;
+ ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f;
+
+ ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f;
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7;
+ ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff;
+
+ if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >= 2)
+ ee->ee_pd_gain_overlap = (val >> 9) & 0xf;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f;
+
+ ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7;
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1;
+ ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f;
+
+ ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f;
+ AR5K_EEPROM_READ(o++, val);
+ ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5;
+ ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff;
+ break;
+ }
+
+done:
+ /* return new offset */
+ *offset = o;
+
+ return 0;
+}
+
+/* Read mode-specific data (except power calibration data) */
+static int
+ath5k_eeprom_init_modes(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 mode_offset[3];
+ unsigned int mode;
+ u32 offset;
+ int ret;
+
+ /*
+ * Get values for all modes
+ */
+ mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
+ mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
+ mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
+
+ ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] =
+ AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
+
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) {
+ offset = mode_offset[mode];
+
+ ret = ath5k_eeprom_read_ants(ah, &offset, mode);
+ if (ret)
+ return ret;
+
+ ret = ath5k_eeprom_read_modes(ah, &offset, mode);
+ if (ret)
+ return ret;
+ }
+
+ /* override for older eeprom versions for better performance */
+ if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) {
+ ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15;
+ ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28;
+ ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28;
+ }
+
+ return 0;
+}
+
+/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
+ * frequency mask) */
+static inline int
+ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
+ struct ath5k_chan_pcal_info *pc, unsigned int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ int o = *offset;
+ int i = 0;
+ u8 freq1, freq2;
+ u16 val;
+
+ ee->ee_n_piers[mode] = 0;
+ while (i < max) {
+ AR5K_EEPROM_READ(o++, val);
+
+ freq1 = val & 0xff;
+ if (!freq1)
+ break;
+
+ pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+ freq1, mode);
+ ee->ee_n_piers[mode]++;
+
+ freq2 = (val >> 8) & 0xff;
+ if (!freq2)
+ break;
+
+ pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+ freq2, mode);
+ ee->ee_n_piers[mode]++;
+ }
+
+ /* return new offset */
+ *offset = o;
+
+ return 0;
+}
+
+/* Read frequency piers for 802.11a */
+static int
+ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
+ int i;
+ u16 val;
+ u8 mask;
+
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
+ ath5k_eeprom_read_freq_list(ah, &offset,
+ AR5K_EEPROM_N_5GHZ_CHAN, pcal,
+ AR5K_EEPROM_MODE_11A);
+ } else {
+ mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version);
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcal[0].freq = (val >> 9) & mask;
+ pcal[1].freq = (val >> 2) & mask;
+ pcal[2].freq = (val << 5) & mask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcal[2].freq |= (val >> 11) & 0x1f;
+ pcal[3].freq = (val >> 4) & mask;
+ pcal[4].freq = (val << 3) & mask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcal[4].freq |= (val >> 13) & 0x7;
+ pcal[5].freq = (val >> 6) & mask;
+ pcal[6].freq = (val << 1) & mask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcal[6].freq |= (val >> 15) & 0x1;
+ pcal[7].freq = (val >> 8) & mask;
+ pcal[8].freq = (val >> 1) & mask;
+ pcal[9].freq = (val << 6) & mask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcal[9].freq |= (val >> 10) & 0x3f;
+
+ /* Fixed number of piers */
+ ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10;
+
+ for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) {
+ pcal[i].freq = ath5k_eeprom_bin2freq(ee,
+ pcal[i].freq, AR5K_EEPROM_MODE_11A);
+ }
+ }
+
+ return 0;
+}
+
+/* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */
+static inline int
+ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *pcal;
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11B:
+ pcal = ee->ee_pwr_cal_b;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ pcal = ee->ee_pwr_cal_g;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ath5k_eeprom_read_freq_list(ah, &offset,
+ AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal,
+ mode);
+
+ return 0;
+}
+
+
+/*
+ * Read power calibration for RF5111 chips
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * here and interpolate later.
+ */
+
+/* Used to match PCDAC steps with power values on RF5111 chips
+ * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
+ * steps that match with the power values we read from eeprom. On
+ * older eeprom versions (< 3.2) these steps are equally spaced at
+ * 10% of the pcdac curve -until the curve reaches its maximum-
+ * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
+ * these 11 steps are spaced in a different way. This function returns
+ * the pcdac steps based on eeprom version and curve min/max so that we
+ * can have pcdac/pwr points.
+ */
+static inline void
+ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
+{
+ static const u16 intercepts3[] = {
+ 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100
+ };
+ static const u16 intercepts3_2[] = {
+ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
+ };
+ const u16 *ip;
+ int i;
+
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
+ ip = intercepts3_2;
+ else
+ ip = intercepts3;
+
+ for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
+ vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
+}
+
+static int
+ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *chinfo;
+ u8 pier, pdg;
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_a;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_b;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
+ return 0;
+ chinfo = ee->ee_pwr_cal_g;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+ if (!chinfo[pier].pd_curves)
+ continue;
+
+ for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[pdg];
+
+ kfree(pd->pd_step);
+ kfree(pd->pd_pwr);
+ }
+
+ kfree(chinfo[pier].pd_curves);
+ }
+
+ return 0;
+}
+
+/* Convert RF5111 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf5111 *pcinfo;
+ struct ath5k_pdgain_info *pd;
+ u8 pier, point, idx;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf5111_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ goto err_out;
+
+ /* Only one curve for RF5111
+ * find out which one and place
+ * in pd_curves.
+ * Note: ee_x_gain is reversed here */
+ for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
+
+ if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) {
+ pdgain_idx[0] = idx;
+ break;
+ }
+ }
+
+ ee->ee_pd_gains[mode] = 1;
+
+ pd = &chinfo[pier].pd_curves[idx];
+
+ pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
+ sizeof(u8), GFP_KERNEL);
+ if (!pd->pd_step)
+ goto err_out;
+
+ pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
+ sizeof(s16), GFP_KERNEL);
+ if (!pd->pd_pwr)
+ goto err_out;
+
+ /* Fill raw dataset
+ * (convert power to 0.25dB units
+ * for RF5112 compatibility) */
+ for (point = 0; point < pd->pd_points; point++) {
+
+ /* Absolute values */
+ pd->pd_pwr[point] = 2 * pcinfo->pwr[point];
+
+ /* Already sorted */
+ pd->pd_step[point] = pcinfo->pcdac[point];
+ }
+
+ /* Set min/max pwr */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+ chinfo[pier].max_pwr = pd->pd_pwr[10];
+
+ }
+
+ return 0;
+
+err_out:
+ ath5k_eeprom_free_pcal_info(ah, mode);
+ return -ENOMEM;
+}
+
+/* Parse EEPROM data */
+static int
+ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *pcal;
+ int offset, ret;
+ int i;
+ u16 val;
+
+ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
+ return 0;
+
+ ret = ath5k_eeprom_init_11a_pcal_freq(ah,
+ offset + AR5K_EEPROM_GROUP1_OFFSET);
+ if (ret < 0)
+ return ret;
+
+ offset += AR5K_EEPROM_GROUP2_OFFSET;
+ pcal = ee->ee_pwr_cal_a;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ if (!AR5K_EEPROM_HDR_11B(ee->ee_header) &&
+ !AR5K_EEPROM_HDR_11G(ee->ee_header))
+ return 0;
+
+ pcal = ee->ee_pwr_cal_b;
+ offset += AR5K_EEPROM_GROUP3_OFFSET;
+
+ /* fixed piers */
+ pcal[0].freq = 2412;
+ pcal[1].freq = 2447;
+ pcal[2].freq = 2484;
+ ee->ee_n_piers[mode] = 3;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
+ return 0;
+
+ pcal = ee->ee_pwr_cal_g;
+ offset += AR5K_EEPROM_GROUP4_OFFSET;
+
+ /* fixed piers */
+ pcal[0].freq = 2312;
+ pcal[1].freq = 2412;
+ pcal[2].freq = 2484;
+ ee->ee_n_piers[mode] = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ee->ee_n_piers[mode]; i++) {
+ struct ath5k_chan_pcal_info_rf5111 *cdata =
+ &pcal[i].rf5111_info;
+
+ AR5K_EEPROM_READ(offset++, val);
+ cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M);
+ cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M);
+ cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M);
+
+ AR5K_EEPROM_READ(offset++, val);
+ cdata->pwr[0] |= ((val >> 14) & 0x3);
+ cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M);
+
+ AR5K_EEPROM_READ(offset++, val);
+ cdata->pwr[3] |= ((val >> 12) & 0xf);
+ cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M);
+
+ AR5K_EEPROM_READ(offset++, val);
+ cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M);
+
+ AR5K_EEPROM_READ(offset++, val);
+ cdata->pwr[8] |= ((val >> 14) & 0x3);
+ cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M);
+ cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M);
+
+ ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min,
+ cdata->pcdac_max, cdata->pcdac);
+ }
+
+ return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal);
+}
+
+
+/*
+ * Read power calibration for RF5112 chips
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we read 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) here and
+ * interpolate later.
+ *
+ * Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
+ */
+
+/* Convert RF5112 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf5112 *pcinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ unsigned int pier, pdg, point;
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf5112_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ goto err_out;
+
+ /* Fill pd_curves */
+ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+
+ u8 idx = pdgain_idx[pdg];
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[idx];
+
+ /* Lowest gain curve (max power) */
+ if (pdg == 0) {
+ /* One more point for better accuracy */
+ pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ goto err_out;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ goto err_out;
+
+ /* Fill raw dataset
+ * (all power levels are in 0.25dB units) */
+ pd->pd_step[0] = pcinfo->pcdac_x0[0];
+ pd->pd_pwr[0] = pcinfo->pwr_x0[0];
+
+ for (point = 1; point < pd->pd_points;
+ point++) {
+ /* Absolute values */
+ pd->pd_pwr[point] =
+ pcinfo->pwr_x0[point];
+
+ /* Deltas */
+ pd->pd_step[point] =
+ pd->pd_step[point - 1] +
+ pcinfo->pcdac_x0[point];
+ }
+
+ /* Set min power for this frequency */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+
+ /* Highest gain curve (min power) */
+ } else if (pdg == 1) {
+
+ pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ goto err_out;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ goto err_out;
+
+ /* Fill raw dataset
+ * (all power levels are in 0.25dB units) */
+ for (point = 0; point < pd->pd_points;
+ point++) {
+ /* Absolute values */
+ pd->pd_pwr[point] =
+ pcinfo->pwr_x3[point];
+
+ /* Fixed points */
+ pd->pd_step[point] =
+ pcinfo->pcdac_x3[point];
+ }
+
+ /* Since we have a higher gain curve
+ * override min power */
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ ath5k_eeprom_free_pcal_info(ah, mode);
+ return -ENOMEM;
+}
+
+/* Parse EEPROM data */
+static int
+ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
+ struct ath5k_chan_pcal_info *gen_chan_info;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ u32 offset;
+ u8 i, c;
+ u16 val;
+ u8 pd_gains = 0;
+
+ /* Count how many curves we have and
+ * identify them (which one of the 4
+ * available curves we have on each count).
+ * Curves are stored from lower (x0) to
+ * higher (x3) gain */
+ for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) {
+ /* ee_x_gain[mode] is x gain mask */
+ if ((ee->ee_x_gain[mode] >> i) & 0x1)
+ pdgain_idx[pd_gains++] = i;
+ }
+ ee->ee_pd_gains[mode] = pd_gains;
+
+ if (pd_gains == 0 || pd_gains > 2)
+ return -EINVAL;
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ /*
+ * Read 5GHz EEPROM channels
+ */
+ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
+ ath5k_eeprom_init_11a_pcal_freq(ah, offset);
+
+ offset += AR5K_EEPROM_GROUP2_OFFSET;
+ gen_chan_info = ee->ee_pwr_cal_a;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
+ if (AR5K_EEPROM_HDR_11A(ee->ee_header))
+ offset += AR5K_EEPROM_GROUP3_OFFSET;
+
+ /* NB: frequency piers parsed during mode init */
+ gen_chan_info = ee->ee_pwr_cal_b;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
+ if (AR5K_EEPROM_HDR_11A(ee->ee_header))
+ offset += AR5K_EEPROM_GROUP4_OFFSET;
+ else if (AR5K_EEPROM_HDR_11B(ee->ee_header))
+ offset += AR5K_EEPROM_GROUP2_OFFSET;
+
+ /* NB: frequency piers parsed during mode init */
+ gen_chan_info = ee->ee_pwr_cal_g;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ee->ee_n_piers[mode]; i++) {
+ chan_pcal_info = &gen_chan_info[i].rf5112_info;
+
+ /* Power values in quarter dB
+ * for the lower xpd gain curve
+ * (0 dBm -> higher output power) */
+ for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
+ AR5K_EEPROM_READ(offset++, val);
+ chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff);
+ chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff);
+ }
+
+ /* PCDAC steps
+ * corresponding to the above power
+ * measurements */
+ AR5K_EEPROM_READ(offset++, val);
+ chan_pcal_info->pcdac_x0[1] = (val & 0x1f);
+ chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
+ chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
+
+ /* Power values in quarter dB
+ * for the higher xpd gain curve
+ * (18 dBm -> lower output power) */
+ AR5K_EEPROM_READ(offset++, val);
+ chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff);
+ chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff);
+
+ AR5K_EEPROM_READ(offset++, val);
+ chan_pcal_info->pwr_x3[2] = (val & 0xff);
+
+ /* PCDAC steps
+ * corresponding to the above power
+ * measurements (fixed) */
+ chan_pcal_info->pcdac_x3[0] = 20;
+ chan_pcal_info->pcdac_x3[1] = 35;
+ chan_pcal_info->pcdac_x3[2] = 63;
+
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
+ chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f);
+
+ /* Last xpd0 power level is also channel maximum */
+ gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
+ } else {
+ chan_pcal_info->pcdac_x0[0] = 1;
+ gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff);
+ }
+
+ }
+
+ return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info);
+}
+
+
+/*
+ * Read power calibration for RF2413 chips
+ *
+ * For RF2413 we have a Power to PDDAC table (Power Detector)
+ * instead of a PCDAC and 4 pd gain curves for each calibrated channel.
+ * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y
+ * axis and looks like an exponential function like the RF5111 curve.
+ *
+ * To recreate the curves we read here the points and interpolate
+ * later. Note that in most cases only 2 (higher and lower) curves are
+ * used (like RF5112) but vendors have the opportunity to include all
+ * 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
+ */
+
+/* For RF2413 power calibration data doesn't start on a fixed location and
+ * if a mode is not supported, its section is missing -not zeroed-.
+ * So we need to calculate the starting offset for each section by using
+ * these two functions */
+
+/* Return the size of each section based on the mode and the number of pd
+ * gains available (maximum 4). */
+static inline unsigned int
+ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode)
+{
+ static const unsigned int pdgains_size[] = { 4, 6, 9, 12 };
+ unsigned int sz;
+
+ sz = pdgains_size[ee->ee_pd_gains[mode] - 1];
+ sz *= ee->ee_n_piers[mode];
+
+ return sz;
+}
+
+/* Return the starting offset for a section based on the modes supported
+ * and each section's size. */
+static unsigned int
+ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
+{
+ u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4);
+
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11G:
+ if (AR5K_EEPROM_HDR_11B(ee->ee_header))
+ offset += ath5k_pdgains_size_2413(ee,
+ AR5K_EEPROM_MODE_11B) +
+ AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
+ /* fall through */
+ case AR5K_EEPROM_MODE_11B:
+ if (AR5K_EEPROM_HDR_11A(ee->ee_header))
+ offset += ath5k_pdgains_size_2413(ee,
+ AR5K_EEPROM_MODE_11A) +
+ AR5K_EEPROM_N_5GHZ_CHAN / 2;
+ /* fall through */
+ case AR5K_EEPROM_MODE_11A:
+ break;
+ default:
+ break;
+ }
+
+ return offset;
+}
+
+/* Convert RF2413 specific data to generic raw data
+ * used by interpolation code */
+static int
+ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
+ struct ath5k_chan_pcal_info *chinfo)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf2413 *pcinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ unsigned int pier, pdg, point;
+
+ /* Fill raw data for each calibration pier */
+ for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
+
+ pcinfo = &chinfo[pier].rf2413_info;
+
+ /* Allocate pd_curves for this cal pier */
+ chinfo[pier].pd_curves =
+ kcalloc(AR5K_EEPROM_N_PD_CURVES,
+ sizeof(struct ath5k_pdgain_info),
+ GFP_KERNEL);
+
+ if (!chinfo[pier].pd_curves)
+ goto err_out;
+
+ /* Fill pd_curves */
+ for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+
+ u8 idx = pdgain_idx[pdg];
+ struct ath5k_pdgain_info *pd =
+ &chinfo[pier].pd_curves[idx];
+
+ /* One more point for the highest power
+ * curve (lowest gain) */
+ if (pdg == ee->ee_pd_gains[mode] - 1)
+ pd->pd_points = AR5K_EEPROM_N_PD_POINTS;
+ else
+ pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1;
+
+ /* Allocate pd points for this curve */
+ pd->pd_step = kcalloc(pd->pd_points,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!pd->pd_step)
+ goto err_out;
+
+ pd->pd_pwr = kcalloc(pd->pd_points,
+ sizeof(s16), GFP_KERNEL);
+
+ if (!pd->pd_pwr)
+ goto err_out;
+
+ /* Fill raw dataset
+ * convert all pwr levels to
+ * quarter dB for RF5112 compatibility */
+ pd->pd_step[0] = pcinfo->pddac_i[pdg];
+ pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
+
+ for (point = 1; point < pd->pd_points; point++) {
+
+ pd->pd_pwr[point] = pd->pd_pwr[point - 1] +
+ 2 * pcinfo->pwr[pdg][point - 1];
+
+ pd->pd_step[point] = pd->pd_step[point - 1] +
+ pcinfo->pddac[pdg][point - 1];
+
+ }
+
+ /* Highest gain curve -> min power */
+ if (pdg == 0)
+ chinfo[pier].min_pwr = pd->pd_pwr[0];
+
+ /* Lowest gain curve -> max power */
+ if (pdg == ee->ee_pd_gains[mode] - 1)
+ chinfo[pier].max_pwr =
+ pd->pd_pwr[pd->pd_points - 1];
+ }
+ }
+
+ return 0;
+
+err_out:
+ ath5k_eeprom_free_pcal_info(ah, mode);
+ return -ENOMEM;
+}
+
+/* Parse EEPROM data */
+static int
+ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info_rf2413 *pcinfo;
+ struct ath5k_chan_pcal_info *chinfo;
+ u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
+ u32 offset;
+ int idx, i;
+ u16 val;
+ u8 pd_gains = 0;
+
+ /* Count how many curves we have and
+ * identify them (which one of the 4
+ * available curves we have on each count).
+ * Curves are stored from higher to
+ * lower gain so we go backwards */
+ for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) {
+ /* ee_x_gain[mode] is x gain mask */
+ if ((ee->ee_x_gain[mode] >> idx) & 0x1)
+ pdgain_idx[pd_gains++] = idx;
+
+ }
+ ee->ee_pd_gains[mode] = pd_gains;
+
+ if (pd_gains == 0)
+ return -EINVAL;
+
+ offset = ath5k_cal_data_offset_2413(ee, mode);
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
+ return 0;
+
+ ath5k_eeprom_init_11a_pcal_freq(ah, offset);
+ offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
+ chinfo = ee->ee_pwr_cal_a;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
+ return 0;
+
+ ath5k_eeprom_init_11bg_2413(ah, mode, offset);
+ offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
+ chinfo = ee->ee_pwr_cal_b;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
+ return 0;
+
+ ath5k_eeprom_init_11bg_2413(ah, mode, offset);
+ offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
+ chinfo = ee->ee_pwr_cal_g;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ee->ee_n_piers[mode]; i++) {
+ pcinfo = &chinfo[i].rf2413_info;
+
+ /*
+ * Read pwr_i, pddac_i and the first
+ * 2 pd points (pwr, pddac)
+ */
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr_i[0] = val & 0x1f;
+ pcinfo->pddac_i[0] = (val >> 5) & 0x7f;
+ pcinfo->pwr[0][0] = (val >> 12) & 0xf;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[0][0] = val & 0x3f;
+ pcinfo->pwr[0][1] = (val >> 6) & 0xf;
+ pcinfo->pddac[0][1] = (val >> 10) & 0x3f;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr[0][2] = val & 0xf;
+ pcinfo->pddac[0][2] = (val >> 4) & 0x3f;
+
+ pcinfo->pwr[0][3] = 0;
+ pcinfo->pddac[0][3] = 0;
+
+ if (pd_gains > 1) {
+ /*
+ * Pd gain 0 is not the last pd gain
+ * so it only has 2 pd points.
+ * Continue with pd gain 1.
+ */
+ pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
+
+ pcinfo->pddac_i[1] = (val >> 15) & 0x1;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac_i[1] |= (val & 0x3F) << 1;
+
+ pcinfo->pwr[1][0] = (val >> 6) & 0xf;
+ pcinfo->pddac[1][0] = (val >> 10) & 0x3f;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr[1][1] = val & 0xf;
+ pcinfo->pddac[1][1] = (val >> 4) & 0x3f;
+ pcinfo->pwr[1][2] = (val >> 10) & 0xf;
+
+ pcinfo->pddac[1][2] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[1][2] |= (val & 0xF) << 2;
+
+ pcinfo->pwr[1][3] = 0;
+ pcinfo->pddac[1][3] = 0;
+ } else if (pd_gains == 1) {
+ /*
+ * Pd gain 0 is the last one so
+ * read the extra point.
+ */
+ pcinfo->pwr[0][3] = (val >> 10) & 0xf;
+
+ pcinfo->pddac[0][3] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[0][3] |= (val & 0xF) << 2;
+ }
+
+ /*
+ * Proceed with the other pd_gains
+ * as above.
+ */
+ if (pd_gains > 2) {
+ pcinfo->pwr_i[2] = (val >> 4) & 0x1f;
+ pcinfo->pddac_i[2] = (val >> 9) & 0x7f;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr[2][0] = (val >> 0) & 0xf;
+ pcinfo->pddac[2][0] = (val >> 4) & 0x3f;
+ pcinfo->pwr[2][1] = (val >> 10) & 0xf;
+
+ pcinfo->pddac[2][1] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[2][1] |= (val & 0xF) << 2;
+
+ pcinfo->pwr[2][2] = (val >> 4) & 0xf;
+ pcinfo->pddac[2][2] = (val >> 8) & 0x3f;
+
+ pcinfo->pwr[2][3] = 0;
+ pcinfo->pddac[2][3] = 0;
+ } else if (pd_gains == 2) {
+ pcinfo->pwr[1][3] = (val >> 4) & 0xf;
+ pcinfo->pddac[1][3] = (val >> 8) & 0x3f;
+ }
+
+ if (pd_gains > 3) {
+ pcinfo->pwr_i[3] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
+
+ pcinfo->pddac_i[3] = (val >> 3) & 0x7f;
+ pcinfo->pwr[3][0] = (val >> 10) & 0xf;
+ pcinfo->pddac[3][0] = (val >> 14) & 0x3;
+
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[3][0] |= (val & 0xF) << 2;
+ pcinfo->pwr[3][1] = (val >> 4) & 0xf;
+ pcinfo->pddac[3][1] = (val >> 8) & 0x3f;
+
+ pcinfo->pwr[3][2] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2;
+
+ pcinfo->pddac[3][2] = (val >> 2) & 0x3f;
+ pcinfo->pwr[3][3] = (val >> 8) & 0xf;
+
+ pcinfo->pddac[3][3] = (val >> 12) & 0xF;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4;
+ } else if (pd_gains == 3) {
+ pcinfo->pwr[2][3] = (val >> 14) & 0x3;
+ AR5K_EEPROM_READ(offset++, val);
+ pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2;
+
+ pcinfo->pddac[2][3] = (val >> 2) & 0x3f;
+ }
+ }
+
+ return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo);
+}
+
+
+/*
+ * Read per rate target power (this is the maximum tx power
+ * supported by the card). This info is used when setting
+ * tx power, no matter the channel.
+ *
+ * This also works for v5 EEPROMs.
+ */
+static int
+ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_rate_pcal_info *rate_pcal_info;
+ u8 *rate_target_pwr_num;
+ u32 offset;
+ u16 val;
+ int i;
+
+ offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
+ rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
+ switch (mode) {
+ case AR5K_EEPROM_MODE_11A:
+ offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
+ rate_pcal_info = ee->ee_rate_tpwr_a;
+ ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
+ rate_pcal_info = ee->ee_rate_tpwr_b;
+ ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version);
+ rate_pcal_info = ee->ee_rate_tpwr_g;
+ ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Different freq mask for older eeproms (<= v3.2) */
+ if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) {
+ for (i = 0; i < (*rate_target_pwr_num); i++) {
+ AR5K_EEPROM_READ(offset++, val);
+ rate_pcal_info[i].freq =
+ ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode);
+
+ rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f);
+ rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f;
+
+ AR5K_EEPROM_READ(offset++, val);
+
+ if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
+ val == 0) {
+ (*rate_target_pwr_num) = i;
+ break;
+ }
+
+ rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7);
+ rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f);
+ rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f);
+ }
+ } else {
+ for (i = 0; i < (*rate_target_pwr_num); i++) {
+ AR5K_EEPROM_READ(offset++, val);
+ rate_pcal_info[i].freq =
+ ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
+
+ rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f);
+ rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f;
+
+ AR5K_EEPROM_READ(offset++, val);
+
+ if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
+ val == 0) {
+ (*rate_target_pwr_num) = i;
+ break;
+ }
+
+ rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf;
+ rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f);
+ rate_pcal_info[i].target_power_54 = (val & 0x3f);
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Read per channel calibration info from EEPROM
+ *
+ * This info is used to calibrate the baseband power table. Imagine
+ * that for each channel there is a power curve that's hw specific
+ * (depends on amplifier etc) and we try to "correct" this curve using
+ * offsets we pass on to phy chip (baseband -> before amplifier) so that
+ * it can use accurate power values when setting tx power (takes amplifier's
+ * performance on each channel into account).
+ *
+ * EEPROM provides us with the offsets for some pre-calibrated channels
+ * and we have to interpolate to create the full table for these channels and
+ * also the table for any channel.
+ */
+static int
+ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ int (*read_pcal)(struct ath5k_hw *hw, int mode);
+ int mode;
+ int err;
+
+ if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) &&
+ (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1))
+ read_pcal = ath5k_eeprom_read_pcal_info_5112;
+ else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) &&
+ (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2))
+ read_pcal = ath5k_eeprom_read_pcal_info_2413;
+ else
+ read_pcal = ath5k_eeprom_read_pcal_info_5111;
+
+
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G;
+ mode++) {
+ err = read_pcal(ah, mode);
+ if (err)
+ return err;
+
+ err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Read conformance test limits used for regulatory control */
+static int
+ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_edge_power *rep;
+ unsigned int fmask, pmask;
+ unsigned int ctl_mode;
+ int i, j;
+ u32 offset;
+ u16 val;
+
+ pmask = AR5K_EEPROM_POWER_M;
+ fmask = AR5K_EEPROM_FREQ_M(ee->ee_version);
+ offset = AR5K_EEPROM_CTL(ee->ee_version);
+ ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version);
+ for (i = 0; i < ee->ee_ctls; i += 2) {
+ AR5K_EEPROM_READ(offset++, val);
+ ee->ee_ctl[i] = (val >> 8) & 0xff;
+ ee->ee_ctl[i + 1] = val & 0xff;
+ }
+
+ offset = AR5K_EEPROM_GROUP8_OFFSET;
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0)
+ offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) -
+ AR5K_EEPROM_GROUP5_OFFSET;
+ else
+ offset += AR5K_EEPROM_GROUPS_START(ee->ee_version);
+
+ rep = ee->ee_ctl_pwr;
+ for (i = 0; i < ee->ee_ctls; i++) {
+ switch (ee->ee_ctl[i] & AR5K_CTL_MODE_M) {
+ case AR5K_CTL_11A:
+ case AR5K_CTL_TURBO:
+ ctl_mode = AR5K_EEPROM_MODE_11A;
+ break;
+ default:
+ ctl_mode = AR5K_EEPROM_MODE_11G;
+ break;
+ }
+ if (ee->ee_ctl[i] == 0) {
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3)
+ offset += 8;
+ else
+ offset += 7;
+ rep += AR5K_EEPROM_N_EDGES;
+ continue;
+ }
+ if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
+ for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
+ AR5K_EEPROM_READ(offset++, val);
+ rep[j].freq = (val >> 8) & fmask;
+ rep[j + 1].freq = val & fmask;
+ }
+ for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
+ AR5K_EEPROM_READ(offset++, val);
+ rep[j].edge = (val >> 8) & pmask;
+ rep[j].flag = (val >> 14) & 1;
+ rep[j + 1].edge = val & pmask;
+ rep[j + 1].flag = (val >> 6) & 1;
+ }
+ } else {
+ AR5K_EEPROM_READ(offset++, val);
+ rep[0].freq = (val >> 9) & fmask;
+ rep[1].freq = (val >> 2) & fmask;
+ rep[2].freq = (val << 5) & fmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[2].freq |= (val >> 11) & 0x1f;
+ rep[3].freq = (val >> 4) & fmask;
+ rep[4].freq = (val << 3) & fmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[4].freq |= (val >> 13) & 0x7;
+ rep[5].freq = (val >> 6) & fmask;
+ rep[6].freq = (val << 1) & fmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[6].freq |= (val >> 15) & 0x1;
+ rep[7].freq = (val >> 8) & fmask;
+
+ rep[0].edge = (val >> 2) & pmask;
+ rep[1].edge = (val << 4) & pmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[1].edge |= (val >> 12) & 0xf;
+ rep[2].edge = (val >> 6) & pmask;
+ rep[3].edge = val & pmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[4].edge = (val >> 10) & pmask;
+ rep[5].edge = (val >> 4) & pmask;
+ rep[6].edge = (val << 2) & pmask;
+
+ AR5K_EEPROM_READ(offset++, val);
+ rep[6].edge |= (val >> 14) & 0x3;
+ rep[7].edge = (val >> 8) & pmask;
+ }
+ for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) {
+ rep[j].freq = ath5k_eeprom_bin2freq(ee,
+ rep[j].freq, ctl_mode);
+ }
+ rep += AR5K_EEPROM_N_EDGES;
+ }
+
+ return 0;
+}
+
+static int
+ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 offset;
+ u16 val;
+ int ret = 0, i;
+
+ offset = AR5K_EEPROM_CTL(ee->ee_version) +
+ AR5K_EEPROM_N_CTLS(ee->ee_version);
+
+ if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) {
+ /* No spur info for 5GHz */
+ ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR;
+ /* 2 channels for 2GHz (2464/2420) */
+ ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1;
+ ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2;
+ ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR;
+ } else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) {
+ for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
+ AR5K_EEPROM_READ(offset, val);
+ ee->ee_spur_chans[i][0] = val;
+ AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS,
+ val);
+ ee->ee_spur_chans[i][1] = val;
+ offset++;
+ }
+ }
+
+ return ret;
+}
+
+
+/***********************\
+* Init/Detach functions *
+\***********************/
+
+/*
+ * Initialize eeprom data structure
+ */
+int
+ath5k_eeprom_init(struct ath5k_hw *ah)
+{
+ int err;
+
+ err = ath5k_eeprom_init_header(ah);
+ if (err < 0)
+ return err;
+
+ err = ath5k_eeprom_init_modes(ah);
+ if (err < 0)
+ return err;
+
+ err = ath5k_eeprom_read_pcal_info(ah);
+ if (err < 0)
+ return err;
+
+ err = ath5k_eeprom_read_ctl_info(ah);
+ if (err < 0)
+ return err;
+
+ err = ath5k_eeprom_read_spur_chans(ah);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+void
+ath5k_eeprom_detach(struct ath5k_hw *ah)
+{
+ u8 mode;
+
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
+ ath5k_eeprom_free_pcal_info(ah, mode);
+}
+
+int
+ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ return AR5K_EEPROM_MODE_11A;
+ case AR5K_MODE_11G:
+ return AR5K_EEPROM_MODE_11G;
+ case AR5K_MODE_11B:
+ return AR5K_EEPROM_MODE_11B;
+ default:
+ ATH5K_WARN(ah, "channel is not A/B/G!");
+ return AR5K_EEPROM_MODE_11A;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
new file mode 100644
index 000000000..693296ee9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -0,0 +1,495 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*
+ * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
+ */
+#define AR5K_EEPROM_PCIE_OFFSET 0x02 /* Contains offset to PCI-E infos */
+#define AR5K_EEPROM_PCIE_SERDES_SECTION 0x40 /* PCIE_OFFSET points here when
+ * SERDES infos are present */
+#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
+#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
+
+#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
+
+#define AR5K_EEPROM_RFKILL 0x0f
+#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
+#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
+#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
+#define AR5K_EEPROM_RFKILL_POLARITY_S 1
+
+#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
+#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
+#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
+#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
+#define AR5K_EEPROM_INFO_CKSUM 0xffff
+#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
+
+#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
+#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
+#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2GHz (ar5211_rfregs) */
+#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
+#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
+#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain, ee_cck_ofdm_power_delta (eeprom_read_modes) */
+#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
+#define AR5K_EEPROM_VERSION_4_3 0x4003 /* power calibration changes */
+#define AR5K_EEPROM_VERSION_4_4 0x4004
+#define AR5K_EEPROM_VERSION_4_5 0x4005
+#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
+#define AR5K_EEPROM_VERSION_4_7 0x3007 /* 4007 ? */
+#define AR5K_EEPROM_VERSION_4_9 0x4009 /* EAR futureproofing */
+#define AR5K_EEPROM_VERSION_5_0 0x5000 /* Has 2413 PDADC calibration etc */
+#define AR5K_EEPROM_VERSION_5_1 0x5001 /* Has capability values */
+#define AR5K_EEPROM_VERSION_5_3 0x5003 /* Has spur mitigation tables */
+
+#define AR5K_EEPROM_MODE_11A 0
+#define AR5K_EEPROM_MODE_11B 1
+#define AR5K_EEPROM_MODE_11G 2
+
+#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
+#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
+#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
+#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2GHz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
+#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
+#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
+#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5GHz */
+
+/* Newer EEPROMs are using a different offset */
+#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
+ (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
+
+#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
+#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((s8)(((_v) >> 8) & 0xff))
+#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((s8)((_v) & 0xff))
+
+/* Misc values available since EEPROM 4.0 */
+#define AR5K_EEPROM_MISC0 AR5K_EEPROM_INFO(4)
+#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
+#define AR5K_EEPROM_HDR_XR2_DIS(_v) (((_v) >> 12) & 0x1)
+#define AR5K_EEPROM_HDR_XR5_DIS(_v) (((_v) >> 13) & 0x1)
+#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
+
+#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
+#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
+#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
+
+#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
+#define AR5K_EEPROM_EEP_FILE_VERSION(_v) (((_v) >> 8) & 0xff)
+#define AR5K_EEPROM_EAR_FILE_VERSION(_v) ((_v) & 0xff)
+
+#define AR5K_EEPROM_MISC3 AR5K_EEPROM_INFO(7)
+#define AR5K_EEPROM_ART_BUILD_NUM(_v) (((_v) >> 10) & 0x3f)
+#define AR5K_EEPROM_EAR_FILE_ID(_v) ((_v) & 0xff)
+
+#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
+#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
+#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
+#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
+
+#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
+#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
+#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
+#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
+#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
+#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heavy clipping */
+#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
+
+#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
+#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
+#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
+#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
+#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
+#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
+#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
+
+/* calibration settings */
+#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
+#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
+#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
+#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
+#define AR5K_EEPROM_GROUPS_START(_v) AR5K_EEPROM_OFF(_v, 0x0100, 0x0150) /* Start of Groups */
+#define AR5K_EEPROM_GROUP1_OFFSET 0x0
+#define AR5K_EEPROM_GROUP2_OFFSET 0x5
+#define AR5K_EEPROM_GROUP3_OFFSET 0x37
+#define AR5K_EEPROM_GROUP4_OFFSET 0x46
+#define AR5K_EEPROM_GROUP5_OFFSET 0x55
+#define AR5K_EEPROM_GROUP6_OFFSET 0x65
+#define AR5K_EEPROM_GROUP7_OFFSET 0x69
+#define AR5K_EEPROM_GROUP8_OFFSET 0x6f
+
+#define AR5K_EEPROM_TARGET_PWR_OFF_11A(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
+ AR5K_EEPROM_GROUP5_OFFSET, 0x0000)
+#define AR5K_EEPROM_TARGET_PWR_OFF_11B(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
+ AR5K_EEPROM_GROUP6_OFFSET, 0x0010)
+#define AR5K_EEPROM_TARGET_PWR_OFF_11G(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
+ AR5K_EEPROM_GROUP7_OFFSET, 0x0014)
+
+/* [3.1 - 3.3] */
+#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
+#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
+
+#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
+#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
+#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
+#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
+#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
+#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
+#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
+#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
+#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
+#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
+#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
+#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
+#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
+#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
+#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
+#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
+#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
+
+/* Some EEPROM defines */
+#define AR5K_EEPROM_EEP_SCALE 100
+#define AR5K_EEPROM_EEP_DELTA 10
+#define AR5K_EEPROM_N_MODES 3
+#define AR5K_EEPROM_N_5GHZ_CHAN 10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
+#define AR5K_EEPROM_N_2GHZ_CHAN 3
+#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
+#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
+#define AR5K_EEPROM_MAX_CHAN 10
+#define AR5K_EEPROM_N_PWR_POINTS_5111 11
+#define AR5K_EEPROM_N_PCDAC 11
+#define AR5K_EEPROM_N_PHASE_CAL 5
+#define AR5K_EEPROM_N_TEST_FREQ 8
+#define AR5K_EEPROM_N_EDGES 8
+#define AR5K_EEPROM_N_INTERCEPTS 11
+#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
+#define AR5K_EEPROM_PCDAC_M 0x3f
+#define AR5K_EEPROM_PCDAC_START 1
+#define AR5K_EEPROM_PCDAC_STOP 63
+#define AR5K_EEPROM_PCDAC_STEP 1
+#define AR5K_EEPROM_NON_EDGE_M 0x40
+#define AR5K_EEPROM_CHANNEL_POWER 8
+#define AR5K_EEPROM_N_OBDB 4
+#define AR5K_EEPROM_OBDB_DIS 0xffff
+#define AR5K_EEPROM_CHANNEL_DIS 0xff
+#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
+#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
+#define AR5K_EEPROM_MAX_CTLS 32
+#define AR5K_EEPROM_N_PD_CURVES 4
+#define AR5K_EEPROM_N_XPD0_POINTS 4
+#define AR5K_EEPROM_N_XPD3_POINTS 3
+#define AR5K_EEPROM_N_PD_GAINS 4
+#define AR5K_EEPROM_N_PD_POINTS 5
+#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
+#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
+#define AR5K_EEPROM_POWER_M 0x3f
+#define AR5K_EEPROM_POWER_MIN 0
+#define AR5K_EEPROM_POWER_MAX 3150
+#define AR5K_EEPROM_POWER_STEP 50
+#define AR5K_EEPROM_POWER_TABLE_SIZE 64
+#define AR5K_EEPROM_N_POWER_LOC_11B 4
+#define AR5K_EEPROM_N_POWER_LOC_11G 6
+#define AR5K_EEPROM_I_GAIN 10
+#define AR5K_EEPROM_CCK_OFDM_DELTA 15
+#define AR5K_EEPROM_N_IQ_CAL 2
+/* 5GHz/2GHz */
+enum ath5k_eeprom_freq_bands {
+ AR5K_EEPROM_BAND_5GHZ = 0,
+ AR5K_EEPROM_BAND_2GHZ = 1,
+ AR5K_EEPROM_N_FREQ_BANDS,
+};
+/* Spur chans per freq band */
+#define AR5K_EEPROM_N_SPUR_CHANS 5
+/* fbin value for chan 2464 x2 */
+#define AR5K_EEPROM_5413_SPUR_CHAN_1 1640
+/* fbin value for chan 2420 x2 */
+#define AR5K_EEPROM_5413_SPUR_CHAN_2 1200
+#define AR5K_EEPROM_SPUR_CHAN_MASK 0x3FFF
+#define AR5K_EEPROM_NO_SPUR 0x8000
+#define AR5K_SPUR_CHAN_WIDTH 87
+#define AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz 3125
+#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
+
+#define AR5K_EEPROM_READ(_o, _v) do { \
+ if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
+ return -EIO; \
+} while (0)
+
+#define AR5K_EEPROM_READ_HDR(_o, _v) \
+ AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
+
+enum ath5k_ant_table {
+ AR5K_ANT_CTL = 0, /* Idle switch table settings */
+ AR5K_ANT_SWTABLE_A = 1, /* Switch table for antenna A */
+ AR5K_ANT_SWTABLE_B = 2, /* Switch table for antenna B */
+ AR5K_ANT_MAX,
+};
+
+enum ath5k_ctl_mode {
+ AR5K_CTL_11A = 0,
+ AR5K_CTL_11B = 1,
+ AR5K_CTL_11G = 2,
+ AR5K_CTL_TURBO = 3,
+ AR5K_CTL_TURBOG = 4,
+ AR5K_CTL_2GHT20 = 5,
+ AR5K_CTL_5GHT20 = 6,
+ AR5K_CTL_2GHT40 = 7,
+ AR5K_CTL_5GHT40 = 8,
+ AR5K_CTL_MODE_M = 15,
+};
+
+/* Per channel calibration data, used for power table setup */
+struct ath5k_chan_pcal_info_rf5111 {
+ /* Power levels in half dBm units
+ * for one power curve. */
+ u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
+ /* PCDAC table steps
+ * for the above values */
+ u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
+ /* Starting PCDAC step */
+ u8 pcdac_min;
+ /* Final PCDAC step */
+ u8 pcdac_max;
+};
+
+struct ath5k_chan_pcal_info_rf5112 {
+ /* Power levels in quarter dBm units
+ * for lower (0) and higher (3)
+ * level curves in 0.25dB units */
+ s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
+ s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
+ /* PCDAC table steps
+ * for the above values */
+ u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
+ u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
+};
+
+struct ath5k_chan_pcal_info_rf2413 {
+ /* Starting pwr/pddac values */
+ s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
+ u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
+ /* (pwr,pddac) points
+ * power levels in 0.5dB units */
+ s8 pwr[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_N_PD_POINTS];
+ u8 pddac[AR5K_EEPROM_N_PD_GAINS]
+ [AR5K_EEPROM_N_PD_POINTS];
+};
+
+enum ath5k_powertable_type {
+ AR5K_PWRTABLE_PWR_TO_PCDAC = 0,
+ AR5K_PWRTABLE_LINEAR_PCDAC = 1,
+ AR5K_PWRTABLE_PWR_TO_PDADC = 2,
+};
+
+struct ath5k_pdgain_info {
+ u8 pd_points;
+ u8 *pd_step;
+ /* Power values are in
+ * 0.25dB units */
+ s16 *pd_pwr;
+};
+
+struct ath5k_chan_pcal_info {
+ /* Frequency */
+ u16 freq;
+ /* Tx power boundaries */
+ s16 max_pwr;
+ s16 min_pwr;
+ union {
+ struct ath5k_chan_pcal_info_rf5111 rf5111_info;
+ struct ath5k_chan_pcal_info_rf5112 rf5112_info;
+ struct ath5k_chan_pcal_info_rf2413 rf2413_info;
+ };
+ /* Raw values used by phy code
+ * Curves are stored in order from lower
+ * gain to higher gain (max txpower -> min txpower) */
+ struct ath5k_pdgain_info *pd_curves;
+};
+
+/* Per rate calibration data for each mode,
+ * used for rate power table setup.
+ * Note: Values in 0.5dB units */
+struct ath5k_rate_pcal_info {
+ u16 freq; /* Frequency */
+ /* Power level for 6-24Mbit/s rates or
+ * 1Mb rate */
+ u16 target_power_6to24;
+ /* Power level for 36Mbit rate or
+ * 2Mb rate */
+ u16 target_power_36;
+ /* Power level for 48Mbit rate or
+ * 5.5Mbit rate */
+ u16 target_power_48;
+ /* Power level for 54Mbit rate or
+ * 11Mbit rate */
+ u16 target_power_54;
+};
+
+/* Power edges for conformance test limits */
+struct ath5k_edge_power {
+ u16 freq;
+ u16 edge; /* in half dBm */
+ bool flag;
+};
+
+/**
+ * struct ath5k_eeprom_info - EEPROM calibration data
+ *
+ * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
+ * flags
+ * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
+ * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
+ * OFDM and CCK packets
+ * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
+ * (11Mbps) rate in G mode. 0.1dB steps
+ * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
+ *
+ * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
+ * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
+ * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
+ * @ee_switch_settling: RX/TX Switch settling time
+ * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
+ * @ee_ant_control: Antenna Control Settings
+ * @ee_ob: Bias current for Output stage of PA
+ * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
+ * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
+ * @ee_db: Bias current for Output stage of PA. see @ee_ob
+ * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
+ * to when the external LNA is activated
+ * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
+ * to when the external PA switch is deactivated
+ * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
+ * external PA switch is activated
+ * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
+ * (IEEE802.11a section 17.3.10.5 )
+ * @ee_xlna_gain: Total gain of the LNA (information only)
+ * @ee_xpd: Use external (1) or internal power detector
+ * @ee_x_gain: Gain for external power detector output (differences in EEMAP
+ * versions!)
+ * @ee_i_gain: Initial gain value after reset
+ * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
+ *
+ * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
+ * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
+ * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
+ * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
+ * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
+ */
+struct ath5k_eeprom_info {
+
+ /* Header information */
+ u16 ee_magic;
+ u16 ee_protect;
+ u16 ee_regdomain;
+ u16 ee_version;
+ u16 ee_header;
+ u16 ee_ant_gain;
+ u8 ee_rfkill_pin;
+ bool ee_rfkill_pol;
+ bool ee_is_hb63;
+ bool ee_serdes;
+ u16 ee_misc0;
+ u16 ee_misc1;
+ u16 ee_misc2;
+ u16 ee_misc3;
+ u16 ee_misc4;
+ u16 ee_misc5;
+ u16 ee_misc6;
+ u16 ee_cck_ofdm_gain_delta;
+ u16 ee_cck_ofdm_power_delta;
+ u16 ee_scaled_cck_delta;
+
+ /* RF Calibration settings (reset, rfregs) */
+ u16 ee_i_cal[AR5K_EEPROM_N_MODES];
+ u16 ee_q_cal[AR5K_EEPROM_N_MODES];
+ u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
+ u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
+ u16 ee_xr_power[AR5K_EEPROM_N_MODES];
+ u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
+ u16 ee_atn_tx_rx[AR5K_EEPROM_N_MODES];
+ u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
+ u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
+ u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
+ u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
+ u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
+ u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
+ u16 ee_thr_62[AR5K_EEPROM_N_MODES];
+ u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
+ u16 ee_xpd[AR5K_EEPROM_N_MODES];
+ u16 ee_x_gain[AR5K_EEPROM_N_MODES];
+ u16 ee_i_gain[AR5K_EEPROM_N_MODES];
+ u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
+ u16 ee_switch_settling_turbo[AR5K_EEPROM_N_MODES];
+ u16 ee_margin_tx_rx_turbo[AR5K_EEPROM_N_MODES];
+ u16 ee_atn_tx_rx_turbo[AR5K_EEPROM_N_MODES];
+
+ /* Power calibration data */
+ u16 ee_false_detect[AR5K_EEPROM_N_MODES];
+
+ /* Number of pd gain curves per mode */
+ u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
+ /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */
+ u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS];
+
+ u8 ee_n_piers[AR5K_EEPROM_N_MODES];
+ struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
+ struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+ struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+
+ /* Per rate target power levels */
+ u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
+ struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
+ struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+ struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
+
+ /* Conformance test limits (Unused) */
+ u8 ee_ctls;
+ u8 ee_ctl[AR5K_EEPROM_MAX_CTLS];
+ struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
+
+ /* Noise Floor Calibration settings */
+ s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
+ s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
+ s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
+ s8 ee_adc_desired_size_turbo[AR5K_EEPROM_N_MODES];
+ s8 ee_pga_desired_size_turbo[AR5K_EEPROM_N_MODES];
+ s8 ee_pd_gain_overlap;
+
+ /* Spur mitigation data (fbin values for spur channels) */
+ u16 ee_spur_chans[AR5K_EEPROM_N_SPUR_CHANS][AR5K_EEPROM_N_FREQ_BANDS];
+
+ /* Antenna raw switch tables */
+ u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
+};
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
new file mode 100644
index 000000000..73d3dd8a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/****************\
+ GPIO Functions
+\****************/
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
+ */
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+{
+ u32 led;
+ /*5210 has different led mode handling*/
+ u32 led_5210;
+
+ /*Reset led status*/
+ if (ah->ah_version != AR5K_AR5210)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
+
+ /*
+ * Some blinking values, define at your wish
+ */
+ switch (state) {
+ case AR5K_LED_SCAN:
+ case AR5K_LED_AUTH:
+ led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
+ led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
+ break;
+
+ case AR5K_LED_INIT:
+ led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
+ led_5210 = AR5K_PCICFG_LED_PEND;
+ break;
+
+ case AR5K_LED_ASSOC:
+ case AR5K_LED_RUN:
+ led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
+ led_5210 = AR5K_PCICFG_LED_ASSOC;
+ break;
+
+ default:
+ led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
+ led_5210 = AR5K_PCICFG_LED_PEND;
+ break;
+ }
+
+ /*Write new status to the register*/
+ if (ah->ah_version != AR5K_AR5210)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
+}
+
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
+ */
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+{
+ if (gpio >= AR5K_NUM_GPIO)
+ return -EINVAL;
+
+ ath5k_hw_reg_write(ah,
+ (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
+ | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
+ */
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+{
+ if (gpio >= AR5K_NUM_GPIO)
+ return -EINVAL;
+
+ ath5k_hw_reg_write(ah,
+ (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
+ | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
+ */
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+{
+ if (gpio >= AR5K_NUM_GPIO)
+ return 0xffffffff;
+
+ /* GPIO input magic */
+ return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
+ 0x1;
+}
+
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
+ */
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+{
+ u32 data;
+
+ if (gpio >= AR5K_NUM_GPIO)
+ return -EINVAL;
+
+ /* GPIO output magic */
+ data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
+
+ data &= ~(1 << gpio);
+ data |= (val & 1) << gpio;
+
+ ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
+ */
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level)
+{
+ u32 data;
+
+ if (gpio >= AR5K_NUM_GPIO)
+ return;
+
+ /*
+ * Set the GPIO interrupt
+ */
+ data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
+ ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
+ AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
+ (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
+
+ ath5k_hw_reg_write(ah, interrupt_level ? data :
+ (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
+
+ ah->ah_imr |= AR5K_IMR_GPIO;
+
+ /* Enable GPIO interrupts */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
+}
+
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
new file mode 100644
index 000000000..ee1c2fa8b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -0,0 +1,1605 @@
+/*
+ * Initial register settings functions
+ *
+ * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
+ */
+struct ath5k_ini {
+ u16 ini_register;
+ u32 ini_value;
+
+ enum {
+ AR5K_INI_WRITE = 0, /* Default */
+ AR5K_INI_READ = 1,
+ } ini_mode;
+};
+
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
+ */
+struct ath5k_ini_mode {
+ u16 mode_register;
+ u32 mode_value[3];
+};
+
+/* Initial register settings for AR5210 */
+static const struct ath5k_ini ar5210_ini[] = {
+ /* PCU and MAC registers */
+ { AR5K_NOQCU_TXDP0, 0 },
+ { AR5K_NOQCU_TXDP1, 0 },
+ { AR5K_RXDP, 0 },
+ { AR5K_CR, 0 },
+ { AR5K_ISR, 0, AR5K_INI_READ },
+ { AR5K_IMR, 0 },
+ { AR5K_IER, AR5K_IER_DISABLE },
+ { AR5K_BSR, 0, AR5K_INI_READ },
+ { AR5K_TXCFG, AR5K_DMASIZE_128B },
+ { AR5K_RXCFG, AR5K_DMASIZE_128B },
+ { AR5K_CFG, AR5K_INIT_CFG },
+ { AR5K_TOPS, 8 },
+ { AR5K_RXNOFRM, 8 },
+ { AR5K_RPGTO, 0 },
+ { AR5K_TXNOFRM, 0 },
+ { AR5K_SFR, 0 },
+ { AR5K_MIBC, 0 },
+ { AR5K_MISC, 0 },
+ { AR5K_RX_FILTER_5210, 0 },
+ { AR5K_MCAST_FILTER0_5210, 0 },
+ { AR5K_MCAST_FILTER1_5210, 0 },
+ { AR5K_TX_MASK0, 0 },
+ { AR5K_TX_MASK1, 0 },
+ { AR5K_CLR_TMASK, 0 },
+ { AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES },
+ { AR5K_DIAG_SW_5210, 0 },
+ { AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES },
+ { AR5K_TSF_L32_5210, 0 },
+ { AR5K_TIMER0_5210, 0 },
+ { AR5K_TIMER1_5210, 0xffffffff },
+ { AR5K_TIMER2_5210, 0xffffffff },
+ { AR5K_TIMER3_5210, 1 },
+ { AR5K_CFP_DUR_5210, 0 },
+ { AR5K_CFP_PERIOD_5210, 0 },
+ /* PHY registers */
+ { AR5K_PHY(0), 0x00000047 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY(3), 0x09848ea6 },
+ { AR5K_PHY(4), 0x3d32e000 },
+ { AR5K_PHY(5), 0x0000076b },
+ { AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE },
+ { AR5K_PHY(8), 0x02020200 },
+ { AR5K_PHY(9), 0x00000e0e },
+ { AR5K_PHY(10), 0x0a020201 },
+ { AR5K_PHY(11), 0x00036ffc },
+ { AR5K_PHY(12), 0x00000000 },
+ { AR5K_PHY(13), 0x00000e0e },
+ { AR5K_PHY(14), 0x00000007 },
+ { AR5K_PHY(15), 0x00020100 },
+ { AR5K_PHY(16), 0x89630000 },
+ { AR5K_PHY(17), 0x1372169c },
+ { AR5K_PHY(18), 0x0018b633 },
+ { AR5K_PHY(19), 0x1284613c },
+ { AR5K_PHY(20), 0x0de8b8e0 },
+ { AR5K_PHY(21), 0x00074859 },
+ { AR5K_PHY(22), 0x7e80beba },
+ { AR5K_PHY(23), 0x313a665e },
+ { AR5K_PHY_AGCCTL, 0x00001d08 },
+ { AR5K_PHY(25), 0x0001ce00 },
+ { AR5K_PHY(26), 0x409a4190 },
+ { AR5K_PHY(28), 0x0000000f },
+ { AR5K_PHY(29), 0x00000080 },
+ { AR5K_PHY(30), 0x00000004 },
+ { AR5K_PHY(31), 0x00000018 }, /* 0x987c */
+ { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */
+ { AR5K_PHY(65), 0x00000000 },
+ { AR5K_PHY(66), 0x00000000 },
+ { AR5K_PHY(67), 0x00800000 },
+ { AR5K_PHY(68), 0x00000003 },
+ /* BB gain table (64bytes) */
+ { AR5K_BB_GAIN(0), 0x00000000 },
+ { AR5K_BB_GAIN(1), 0x00000020 },
+ { AR5K_BB_GAIN(2), 0x00000010 },
+ { AR5K_BB_GAIN(3), 0x00000030 },
+ { AR5K_BB_GAIN(4), 0x00000008 },
+ { AR5K_BB_GAIN(5), 0x00000028 },
+ { AR5K_BB_GAIN(6), 0x00000028 },
+ { AR5K_BB_GAIN(7), 0x00000004 },
+ { AR5K_BB_GAIN(8), 0x00000024 },
+ { AR5K_BB_GAIN(9), 0x00000014 },
+ { AR5K_BB_GAIN(10), 0x00000034 },
+ { AR5K_BB_GAIN(11), 0x0000000c },
+ { AR5K_BB_GAIN(12), 0x0000002c },
+ { AR5K_BB_GAIN(13), 0x00000002 },
+ { AR5K_BB_GAIN(14), 0x00000022 },
+ { AR5K_BB_GAIN(15), 0x00000012 },
+ { AR5K_BB_GAIN(16), 0x00000032 },
+ { AR5K_BB_GAIN(17), 0x0000000a },
+ { AR5K_BB_GAIN(18), 0x0000002a },
+ { AR5K_BB_GAIN(19), 0x00000001 },
+ { AR5K_BB_GAIN(20), 0x00000021 },
+ { AR5K_BB_GAIN(21), 0x00000011 },
+ { AR5K_BB_GAIN(22), 0x00000031 },
+ { AR5K_BB_GAIN(23), 0x00000009 },
+ { AR5K_BB_GAIN(24), 0x00000029 },
+ { AR5K_BB_GAIN(25), 0x00000005 },
+ { AR5K_BB_GAIN(26), 0x00000025 },
+ { AR5K_BB_GAIN(27), 0x00000015 },
+ { AR5K_BB_GAIN(28), 0x00000035 },
+ { AR5K_BB_GAIN(29), 0x0000000d },
+ { AR5K_BB_GAIN(30), 0x0000002d },
+ { AR5K_BB_GAIN(31), 0x00000003 },
+ { AR5K_BB_GAIN(32), 0x00000023 },
+ { AR5K_BB_GAIN(33), 0x00000013 },
+ { AR5K_BB_GAIN(34), 0x00000033 },
+ { AR5K_BB_GAIN(35), 0x0000000b },
+ { AR5K_BB_GAIN(36), 0x0000002b },
+ { AR5K_BB_GAIN(37), 0x00000007 },
+ { AR5K_BB_GAIN(38), 0x00000027 },
+ { AR5K_BB_GAIN(39), 0x00000017 },
+ { AR5K_BB_GAIN(40), 0x00000037 },
+ { AR5K_BB_GAIN(41), 0x0000000f },
+ { AR5K_BB_GAIN(42), 0x0000002f },
+ { AR5K_BB_GAIN(43), 0x0000002f },
+ { AR5K_BB_GAIN(44), 0x0000002f },
+ { AR5K_BB_GAIN(45), 0x0000002f },
+ { AR5K_BB_GAIN(46), 0x0000002f },
+ { AR5K_BB_GAIN(47), 0x0000002f },
+ { AR5K_BB_GAIN(48), 0x0000002f },
+ { AR5K_BB_GAIN(49), 0x0000002f },
+ { AR5K_BB_GAIN(50), 0x0000002f },
+ { AR5K_BB_GAIN(51), 0x0000002f },
+ { AR5K_BB_GAIN(52), 0x0000002f },
+ { AR5K_BB_GAIN(53), 0x0000002f },
+ { AR5K_BB_GAIN(54), 0x0000002f },
+ { AR5K_BB_GAIN(55), 0x0000002f },
+ { AR5K_BB_GAIN(56), 0x0000002f },
+ { AR5K_BB_GAIN(57), 0x0000002f },
+ { AR5K_BB_GAIN(58), 0x0000002f },
+ { AR5K_BB_GAIN(59), 0x0000002f },
+ { AR5K_BB_GAIN(60), 0x0000002f },
+ { AR5K_BB_GAIN(61), 0x0000002f },
+ { AR5K_BB_GAIN(62), 0x0000002f },
+ { AR5K_BB_GAIN(63), 0x0000002f },
+ /* 5110 RF gain table (64btes) */
+ { AR5K_RF_GAIN(0), 0x0000001d },
+ { AR5K_RF_GAIN(1), 0x0000005d },
+ { AR5K_RF_GAIN(2), 0x0000009d },
+ { AR5K_RF_GAIN(3), 0x000000dd },
+ { AR5K_RF_GAIN(4), 0x0000011d },
+ { AR5K_RF_GAIN(5), 0x00000021 },
+ { AR5K_RF_GAIN(6), 0x00000061 },
+ { AR5K_RF_GAIN(7), 0x000000a1 },
+ { AR5K_RF_GAIN(8), 0x000000e1 },
+ { AR5K_RF_GAIN(9), 0x00000031 },
+ { AR5K_RF_GAIN(10), 0x00000071 },
+ { AR5K_RF_GAIN(11), 0x000000b1 },
+ { AR5K_RF_GAIN(12), 0x0000001c },
+ { AR5K_RF_GAIN(13), 0x0000005c },
+ { AR5K_RF_GAIN(14), 0x00000029 },
+ { AR5K_RF_GAIN(15), 0x00000069 },
+ { AR5K_RF_GAIN(16), 0x000000a9 },
+ { AR5K_RF_GAIN(17), 0x00000020 },
+ { AR5K_RF_GAIN(18), 0x00000019 },
+ { AR5K_RF_GAIN(19), 0x00000059 },
+ { AR5K_RF_GAIN(20), 0x00000099 },
+ { AR5K_RF_GAIN(21), 0x00000030 },
+ { AR5K_RF_GAIN(22), 0x00000005 },
+ { AR5K_RF_GAIN(23), 0x00000025 },
+ { AR5K_RF_GAIN(24), 0x00000065 },
+ { AR5K_RF_GAIN(25), 0x000000a5 },
+ { AR5K_RF_GAIN(26), 0x00000028 },
+ { AR5K_RF_GAIN(27), 0x00000068 },
+ { AR5K_RF_GAIN(28), 0x0000001f },
+ { AR5K_RF_GAIN(29), 0x0000001e },
+ { AR5K_RF_GAIN(30), 0x00000018 },
+ { AR5K_RF_GAIN(31), 0x00000058 },
+ { AR5K_RF_GAIN(32), 0x00000098 },
+ { AR5K_RF_GAIN(33), 0x00000003 },
+ { AR5K_RF_GAIN(34), 0x00000004 },
+ { AR5K_RF_GAIN(35), 0x00000044 },
+ { AR5K_RF_GAIN(36), 0x00000084 },
+ { AR5K_RF_GAIN(37), 0x00000013 },
+ { AR5K_RF_GAIN(38), 0x00000012 },
+ { AR5K_RF_GAIN(39), 0x00000052 },
+ { AR5K_RF_GAIN(40), 0x00000092 },
+ { AR5K_RF_GAIN(41), 0x000000d2 },
+ { AR5K_RF_GAIN(42), 0x0000002b },
+ { AR5K_RF_GAIN(43), 0x0000002a },
+ { AR5K_RF_GAIN(44), 0x0000006a },
+ { AR5K_RF_GAIN(45), 0x000000aa },
+ { AR5K_RF_GAIN(46), 0x0000001b },
+ { AR5K_RF_GAIN(47), 0x0000001a },
+ { AR5K_RF_GAIN(48), 0x0000005a },
+ { AR5K_RF_GAIN(49), 0x0000009a },
+ { AR5K_RF_GAIN(50), 0x000000da },
+ { AR5K_RF_GAIN(51), 0x00000006 },
+ { AR5K_RF_GAIN(52), 0x00000006 },
+ { AR5K_RF_GAIN(53), 0x00000006 },
+ { AR5K_RF_GAIN(54), 0x00000006 },
+ { AR5K_RF_GAIN(55), 0x00000006 },
+ { AR5K_RF_GAIN(56), 0x00000006 },
+ { AR5K_RF_GAIN(57), 0x00000006 },
+ { AR5K_RF_GAIN(58), 0x00000006 },
+ { AR5K_RF_GAIN(59), 0x00000006 },
+ { AR5K_RF_GAIN(60), 0x00000006 },
+ { AR5K_RF_GAIN(61), 0x00000006 },
+ { AR5K_RF_GAIN(62), 0x00000006 },
+ { AR5K_RF_GAIN(63), 0x00000006 },
+ /* PHY activation */
+ { AR5K_PHY(53), 0x00000020 },
+ { AR5K_PHY(51), 0x00000004 },
+ { AR5K_PHY(50), 0x00060106 },
+ { AR5K_PHY(39), 0x0000006d },
+ { AR5K_PHY(48), 0x00000000 },
+ { AR5K_PHY(52), 0x00000014 },
+ { AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE },
+};
+
+/* Initial register settings for AR5211 */
+static const struct ath5k_ini ar5211_ini[] = {
+ { AR5K_RXDP, 0x00000000 },
+ { AR5K_RTSD0, 0x84849c9c },
+ { AR5K_RTSD1, 0x7c7c7c7c },
+ { AR5K_RXCFG, 0x00000005 },
+ { AR5K_MIBC, 0x00000000 },
+ { AR5K_TOPS, 0x00000008 },
+ { AR5K_RXNOFRM, 0x00000008 },
+ { AR5K_TXNOFRM, 0x00000010 },
+ { AR5K_RPGTO, 0x00000000 },
+ { AR5K_RFCNT, 0x0000001f },
+ { AR5K_QUEUE_TXDP(0), 0x00000000 },
+ { AR5K_QUEUE_TXDP(1), 0x00000000 },
+ { AR5K_QUEUE_TXDP(2), 0x00000000 },
+ { AR5K_QUEUE_TXDP(3), 0x00000000 },
+ { AR5K_QUEUE_TXDP(4), 0x00000000 },
+ { AR5K_QUEUE_TXDP(5), 0x00000000 },
+ { AR5K_QUEUE_TXDP(6), 0x00000000 },
+ { AR5K_QUEUE_TXDP(7), 0x00000000 },
+ { AR5K_QUEUE_TXDP(8), 0x00000000 },
+ { AR5K_QUEUE_TXDP(9), 0x00000000 },
+ { AR5K_DCU_FP, 0x00000000 },
+ { AR5K_STA_ID1, 0x00000000 },
+ { AR5K_BSS_ID0, 0x00000000 },
+ { AR5K_BSS_ID1, 0x00000000 },
+ { AR5K_RSSI_THR, 0x00000000 },
+ { AR5K_CFP_PERIOD_5211, 0x00000000 },
+ { AR5K_TIMER0_5211, 0x00000030 },
+ { AR5K_TIMER1_5211, 0x0007ffff },
+ { AR5K_TIMER2_5211, 0x01ffffff },
+ { AR5K_TIMER3_5211, 0x00000031 },
+ { AR5K_CFP_DUR_5211, 0x00000000 },
+ { AR5K_RX_FILTER_5211, 0x00000000 },
+ { AR5K_MCAST_FILTER0_5211, 0x00000000 },
+ { AR5K_MCAST_FILTER1_5211, 0x00000002 },
+ { AR5K_DIAG_SW_5211, 0x00000000 },
+ { AR5K_ADDAC_TEST, 0x00000000 },
+ { AR5K_DEFAULT_ANTENNA, 0x00000000 },
+ /* PHY registers */
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY(3), 0x2d849093 },
+ { AR5K_PHY(4), 0x7d32e000 },
+ { AR5K_PHY(5), 0x00000f6b },
+ { AR5K_PHY_ACT, 0x00000000 },
+ { AR5K_PHY(11), 0x00026ffe },
+ { AR5K_PHY(12), 0x00000000 },
+ { AR5K_PHY(15), 0x00020100 },
+ { AR5K_PHY(16), 0x206a017a },
+ { AR5K_PHY(19), 0x1284613c },
+ { AR5K_PHY(21), 0x00000859 },
+ { AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */
+ { AR5K_PHY(27), 0x050cb081 },
+ { AR5K_PHY(28), 0x0000000f },
+ { AR5K_PHY(29), 0x00000080 },
+ { AR5K_PHY(30), 0x0000000c },
+ { AR5K_PHY(64), 0x00000000 },
+ { AR5K_PHY(65), 0x00000000 },
+ { AR5K_PHY(66), 0x00000000 },
+ { AR5K_PHY(67), 0x00800000 },
+ { AR5K_PHY(68), 0x00000001 },
+ { AR5K_PHY(71), 0x0000092a },
+ { AR5K_PHY_IQ, 0x00000000 },
+ { AR5K_PHY(73), 0x00058a05 },
+ { AR5K_PHY(74), 0x00000001 },
+ { AR5K_PHY(75), 0x00000000 },
+ { AR5K_PHY_PAPD_PROBE, 0x00000000 },
+ { AR5K_PHY(77), 0x00000000 }, /* 0x9934 */
+ { AR5K_PHY(78), 0x00000000 }, /* 0x9938 */
+ { AR5K_PHY(79), 0x0000003f }, /* 0x993c */
+ { AR5K_PHY(80), 0x00000004 },
+ { AR5K_PHY(82), 0x00000000 },
+ { AR5K_PHY(83), 0x00000000 },
+ { AR5K_PHY(84), 0x00000000 },
+ { AR5K_PHY_RADAR, 0x5d50f14c },
+ { AR5K_PHY(86), 0x00000018 },
+ { AR5K_PHY(87), 0x004b6a8e },
+ /* Initial Power table (32bytes)
+ * common on all cards/modes.
+ * Note: Table is rewritten during
+ * txpower setup later using calibration
+ * data etc. so next write is non-common */
+ { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff },
+ { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff },
+ { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff },
+ { AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff },
+ { AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff },
+ { AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff },
+ { AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff },
+ { AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff },
+ { AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff },
+ { AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff },
+ { AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff },
+ { AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff },
+ { AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff },
+ { AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff },
+ { AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff },
+ { AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff },
+ { AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff },
+ { AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff },
+ { AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff },
+ { AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff },
+ { AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff },
+ { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff },
+ { AR5K_PHY_CCKTXCTL, 0x00000000 },
+ { AR5K_PHY(642), 0x503e4646 },
+ { AR5K_PHY_GAIN_2GHZ, 0x6480416c },
+ { AR5K_PHY(644), 0x0199a003 },
+ { AR5K_PHY(645), 0x044cd610 },
+ { AR5K_PHY(646), 0x13800040 },
+ { AR5K_PHY(647), 0x1be00060 },
+ { AR5K_PHY(648), 0x0c53800a },
+ { AR5K_PHY(649), 0x0014df3b },
+ { AR5K_PHY(650), 0x000001b5 },
+ { AR5K_PHY(651), 0x00000020 },
+};
+
+/* Initial mode-specific settings for AR5211
+ * 5211 supports OFDM-only g (draft g) but we
+ * need to test it ! */
+static const struct ath5k_ini_mode ar5211_ini_mode[] = {
+ { AR5K_TXCFG,
+ /* A B G */
+ { 0x00000015, 0x0000001d, 0x00000015 } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(0),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(1),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(2),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(3),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(4),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(5),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(6),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(7),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(8),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(9),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_DCU_GBL_IFS_SLOT,
+ { 0x00000168, 0x000001b8, 0x00000168 } },
+ { AR5K_DCU_GBL_IFS_SIFS,
+ { 0x00000230, 0x000000b0, 0x00000230 } },
+ { AR5K_DCU_GBL_IFS_EIFS,
+ { 0x00000d98, 0x00001f48, 0x00000d98 } },
+ { AR5K_DCU_GBL_IFS_MISC,
+ { 0x0000a0e0, 0x00005880, 0x0000a0e0 } },
+ { AR5K_TIME_OUT,
+ { 0x04000400, 0x20003000, 0x04000400 } },
+ { AR5K_USEC_5211,
+ { 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } },
+ { AR5K_PHY(8),
+ { 0x02020200, 0x02010200, 0x02020200 } },
+ { AR5K_PHY_RF_CTL2,
+ { 0x00000e0e, 0x00000707, 0x00000e0e } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05010000, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000007, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_SETTLING,
+ { 0x1372169c, 0x137216a8, 0x1372169c } },
+ { AR5K_PHY_GAIN,
+ { 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
+ { AR5K_PHY_SIG,
+ { 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
+ { AR5K_PHY_AGCCTL,
+ { 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
+ { AR5K_PHY_NF,
+ { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x00002710, 0x0000157c, 0x00002710 } },
+ { AR5K_PHY(70),
+ { 0x00000190, 0x00000084, 0x00000190 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
+ { AR5K_PHY_PCDAC_TXPOWER_BASE,
+ { 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
+ { AR5K_RF_BUFFER_CONTROL_4,
+ { 0x00000010, 0x00000010, 0x00000010 } },
+};
+
+/* Initial register settings for AR5212 and newer chips */
+static const struct ath5k_ini ar5212_ini_common_start[] = {
+ { AR5K_RXDP, 0x00000000 },
+ { AR5K_RXCFG, 0x00000005 },
+ { AR5K_MIBC, 0x00000000 },
+ { AR5K_TOPS, 0x00000008 },
+ { AR5K_RXNOFRM, 0x00000008 },
+ { AR5K_TXNOFRM, 0x00000010 },
+ { AR5K_RPGTO, 0x00000000 },
+ { AR5K_RFCNT, 0x0000001f },
+ { AR5K_QUEUE_TXDP(0), 0x00000000 },
+ { AR5K_QUEUE_TXDP(1), 0x00000000 },
+ { AR5K_QUEUE_TXDP(2), 0x00000000 },
+ { AR5K_QUEUE_TXDP(3), 0x00000000 },
+ { AR5K_QUEUE_TXDP(4), 0x00000000 },
+ { AR5K_QUEUE_TXDP(5), 0x00000000 },
+ { AR5K_QUEUE_TXDP(6), 0x00000000 },
+ { AR5K_QUEUE_TXDP(7), 0x00000000 },
+ { AR5K_QUEUE_TXDP(8), 0x00000000 },
+ { AR5K_QUEUE_TXDP(9), 0x00000000 },
+ { AR5K_DCU_FP, 0x00000000 },
+ { AR5K_DCU_TXP, 0x00000000 },
+ /* Tx filter table 0 (32 entries) */
+ { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */
+ { AR5K_DCU_TX_FILTER_0(1), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(2), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(3), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */
+ { AR5K_DCU_TX_FILTER_0(5), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(6), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(7), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */
+ { AR5K_DCU_TX_FILTER_0(9), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(10), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(11), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */
+ { AR5K_DCU_TX_FILTER_0(13), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(14), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(15), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */
+ { AR5K_DCU_TX_FILTER_0(17), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(18), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(19), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */
+ { AR5K_DCU_TX_FILTER_0(21), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(22), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(23), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */
+ { AR5K_DCU_TX_FILTER_0(25), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(26), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(27), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */
+ { AR5K_DCU_TX_FILTER_0(29), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(30), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_0(31), 0x00000000 },
+ /* Tx filter table 1 (16 entries) */
+ { AR5K_DCU_TX_FILTER_1(0), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(1), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(2), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(3), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(4), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(5), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(6), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(7), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(8), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(9), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(10), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(11), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(12), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(13), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(14), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_1(15), 0x00000000 },
+ { AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
+ { AR5K_DCU_TX_FILTER_SET, 0x00000000 },
+ { AR5K_STA_ID1, 0x00000000 },
+ { AR5K_BSS_ID0, 0x00000000 },
+ { AR5K_BSS_ID1, 0x00000000 },
+ { AR5K_BEACON_5211, 0x00000000 },
+ { AR5K_CFP_PERIOD_5211, 0x00000000 },
+ { AR5K_TIMER0_5211, 0x00000030 },
+ { AR5K_TIMER1_5211, 0x0007ffff },
+ { AR5K_TIMER2_5211, 0x01ffffff },
+ { AR5K_TIMER3_5211, 0x00000031 },
+ { AR5K_CFP_DUR_5211, 0x00000000 },
+ { AR5K_RX_FILTER_5211, 0x00000000 },
+ { AR5K_DIAG_SW_5211, 0x00000000 },
+ { AR5K_ADDAC_TEST, 0x00000000 },
+ { AR5K_DEFAULT_ANTENNA, 0x00000000 },
+ { AR5K_FRAME_CTL_QOSM, 0x000fc78f },
+ { AR5K_XRMODE, 0x2a82301a },
+ { AR5K_XRDELAY, 0x05dc01e0 },
+ { AR5K_XRTIMEOUT, 0x1f402710 },
+ { AR5K_XRCHIRP, 0x01f40000 },
+ { AR5K_XRSTOMP, 0x00001e1c },
+ { AR5K_SLEEP0, 0x0002aaaa },
+ { AR5K_SLEEP1, 0x02005555 },
+ { AR5K_SLEEP2, 0x00000000 },
+ { AR_BSSMSKL, 0xffffffff },
+ { AR_BSSMSKU, 0x0000ffff },
+ { AR5K_TXPC, 0x00000000 },
+ { AR5K_PROFCNT_TX, 0x00000000 },
+ { AR5K_PROFCNT_RX, 0x00000000 },
+ { AR5K_PROFCNT_RXCLR, 0x00000000 },
+ { AR5K_PROFCNT_CYCLE, 0x00000000 },
+ { AR5K_QUIET_CTL1, 0x00000088 },
+ /* Initial rate duration table (32 entries )*/
+ { AR5K_RATE_DUR(0), 0x00000000 },
+ { AR5K_RATE_DUR(1), 0x0000008c },
+ { AR5K_RATE_DUR(2), 0x000000e4 },
+ { AR5K_RATE_DUR(3), 0x000002d5 },
+ { AR5K_RATE_DUR(4), 0x00000000 },
+ { AR5K_RATE_DUR(5), 0x00000000 },
+ { AR5K_RATE_DUR(6), 0x000000a0 },
+ { AR5K_RATE_DUR(7), 0x000001c9 },
+ { AR5K_RATE_DUR(8), 0x0000002c },
+ { AR5K_RATE_DUR(9), 0x0000002c },
+ { AR5K_RATE_DUR(10), 0x00000030 },
+ { AR5K_RATE_DUR(11), 0x0000003c },
+ { AR5K_RATE_DUR(12), 0x0000002c },
+ { AR5K_RATE_DUR(13), 0x0000002c },
+ { AR5K_RATE_DUR(14), 0x00000030 },
+ { AR5K_RATE_DUR(15), 0x0000003c },
+ { AR5K_RATE_DUR(16), 0x00000000 },
+ { AR5K_RATE_DUR(17), 0x00000000 },
+ { AR5K_RATE_DUR(18), 0x00000000 },
+ { AR5K_RATE_DUR(19), 0x00000000 },
+ { AR5K_RATE_DUR(20), 0x00000000 },
+ { AR5K_RATE_DUR(21), 0x00000000 },
+ { AR5K_RATE_DUR(22), 0x00000000 },
+ { AR5K_RATE_DUR(23), 0x00000000 },
+ { AR5K_RATE_DUR(24), 0x000000d5 },
+ { AR5K_RATE_DUR(25), 0x000000df },
+ { AR5K_RATE_DUR(26), 0x00000102 },
+ { AR5K_RATE_DUR(27), 0x0000013a },
+ { AR5K_RATE_DUR(28), 0x00000075 },
+ { AR5K_RATE_DUR(29), 0x0000007f },
+ { AR5K_RATE_DUR(30), 0x000000a2 },
+ { AR5K_RATE_DUR(31), 0x00000000 },
+ { AR5K_QUIET_CTL2, 0x00010002 },
+ { AR5K_TSF_PARM, 0x00000001 },
+ { AR5K_QOS_NOACK, 0x000000c0 },
+ { AR5K_PHY_ERR_FIL, 0x00000000 },
+ { AR5K_XRLAT_TX, 0x00000168 },
+ { AR5K_ACKSIFS, 0x00000000 },
+ /* Rate -> db table
+ * notice ...03<-02<-01<-00 ! */
+ { AR5K_RATE2DB(0), 0x03020100 },
+ { AR5K_RATE2DB(1), 0x07060504 },
+ { AR5K_RATE2DB(2), 0x0b0a0908 },
+ { AR5K_RATE2DB(3), 0x0f0e0d0c },
+ { AR5K_RATE2DB(4), 0x13121110 },
+ { AR5K_RATE2DB(5), 0x17161514 },
+ { AR5K_RATE2DB(6), 0x1b1a1918 },
+ { AR5K_RATE2DB(7), 0x1f1e1d1c },
+ /* Db -> Rate table */
+ { AR5K_DB2RATE(0), 0x03020100 },
+ { AR5K_DB2RATE(1), 0x07060504 },
+ { AR5K_DB2RATE(2), 0x0b0a0908 },
+ { AR5K_DB2RATE(3), 0x0f0e0d0c },
+ { AR5K_DB2RATE(4), 0x13121110 },
+ { AR5K_DB2RATE(5), 0x17161514 },
+ { AR5K_DB2RATE(6), 0x1b1a1918 },
+ { AR5K_DB2RATE(7), 0x1f1e1d1c },
+ /* PHY registers (Common settings
+ * for all chips/modes) */
+ { AR5K_PHY(3), 0xad848e19 },
+ { AR5K_PHY(4), 0x7d28e000 },
+ { AR5K_PHY_TIMING_3, 0x9c0a9f6b },
+ { AR5K_PHY_ACT, 0x00000000 },
+ { AR5K_PHY(16), 0x206a017a },
+ { AR5K_PHY(21), 0x00000859 },
+ { AR5K_PHY_BIN_MASK_1, 0x00000000 },
+ { AR5K_PHY_BIN_MASK_2, 0x00000000 },
+ { AR5K_PHY_BIN_MASK_3, 0x00000000 },
+ { AR5K_PHY_BIN_MASK_CTL, 0x00800000 },
+ { AR5K_PHY_ANT_CTL, 0x00000001 },
+ /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */
+ { AR5K_PHY_MAX_RX_LEN, 0x00000c80 },
+ { AR5K_PHY_IQ, 0x05100000 },
+ { AR5K_PHY_WARM_RESET, 0x00000001 },
+ { AR5K_PHY_CTL, 0x00000004 },
+ { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 },
+ { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d },
+ { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f },
+ { AR5K_PHY(82), 0x9280b212 },
+ { AR5K_PHY_RADAR, 0x5d50e188 },
+ /*{ AR5K_PHY(86), 0x000000ff },*/
+ { AR5K_PHY(87), 0x004b6a8e },
+ { AR5K_PHY_NFTHRES, 0x000003ce },
+ { AR5K_PHY_RESTART, 0x192fb515 },
+ { AR5K_PHY(94), 0x00000001 },
+ { AR5K_PHY_RFBUS_REQ, 0x00000000 },
+ /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */
+ /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */
+ { AR5K_PHY(644), 0x00806333 },
+ { AR5K_PHY(645), 0x00106c10 },
+ { AR5K_PHY(646), 0x009c4060 },
+ /* { AR5K_PHY(647), 0x1483800a }, */
+ /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */
+ { AR5K_PHY(648), 0x018830c6 },
+ { AR5K_PHY(649), 0x00000400 },
+ /*{ AR5K_PHY(650), 0x000001b5 },*/
+ { AR5K_PHY(651), 0x00000000 },
+ { AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
+ { AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
+ /*{ AR5K_PHY(655), 0x13c889af },*/
+ { AR5K_PHY(656), 0x38490a20 },
+ { AR5K_PHY(657), 0x00007bb6 },
+ { AR5K_PHY(658), 0x0fff3ffc },
+};
+
+/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */
+static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
+ { AR5K_QUEUE_DFS_LOCAL_IFS(0),
+ /* A/XR B G */
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(1),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(2),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(3),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(4),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(5),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(6),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(7),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(8),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_QUEUE_DFS_LOCAL_IFS(9),
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { AR5K_DCU_GBL_IFS_SIFS,
+ { 0x00000230, 0x000000b0, 0x00000160 } },
+ { AR5K_DCU_GBL_IFS_SLOT,
+ { 0x00000168, 0x000001b8, 0x0000018c } },
+ { AR5K_DCU_GBL_IFS_EIFS,
+ { 0x00000e60, 0x00001f1c, 0x00003e38 } },
+ { AR5K_DCU_GBL_IFS_MISC,
+ { 0x0000a0e0, 0x00005880, 0x0000b0e0 } },
+ { AR5K_TIME_OUT,
+ { 0x03e803e8, 0x04200420, 0x08400840 } },
+ { AR5K_PHY(8),
+ { 0x02020200, 0x02010200, 0x02020200 } },
+ { AR5K_PHY_RF_CTL2,
+ { 0x00000e0e, 0x00000707, 0x00000e0e } },
+ { AR5K_PHY_SETTLING,
+ { 0x1372161c, 0x13721722, 0x137216a2 } },
+ { AR5K_PHY_AGCCTL,
+ { 0x00009d10, 0x00009d18, 0x00009d18 } },
+ { AR5K_PHY_NF,
+ { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
+ { AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ { 0x409a4190, 0x409a4190, 0x409a4190 } },
+ { AR5K_PHY(70),
+ { 0x000001b8, 0x00000084, 0x00000108 } },
+ { AR5K_PHY_OFDM_SELFCORR,
+ { 0x10058a05, 0x10058a05, 0x10058a05 } },
+ { 0xa230,
+ { 0x00000000, 0x00000000, 0x00000108 } },
+};
+
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
+static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
+ { AR5K_TXCFG,
+ /* A/XR B G */
+ { 0x00008015, 0x00008015, 0x00008015 } },
+ { AR5K_USEC_5211,
+ { 0x128d8fa7, 0x04e00f95, 0x12e00fab } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05010100, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000007, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_GAIN,
+ { 0x0018da5a, 0x0018ca69, 0x0018ca69 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
+ { AR5K_PHY_SIG,
+ { 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x3137665e, 0x3137665e, 0x3137665e } },
+ { AR5K_PHY_WEAK_OFDM_LOW_THR,
+ { 0x050cb081, 0x050cb081, 0x050cb080 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x00002710, 0x0000157c, 0x00002af8 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0xf7b81020, 0xf7b80d20, 0xf7b81020 } },
+ { AR5K_PHY_GAIN_2GHZ,
+ { 0x642c416a, 0x6440416a, 0x6440416a } },
+ { AR5K_PHY_CCK_RX_CTL_4,
+ { 0x1883800a, 0x1873800a, 0x1883800a } },
+};
+
+/* Common for all modes */
+static const struct ath5k_ini rf5111_ini_common_end[] = {
+ { AR5K_DCU_FP, 0x00000000 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x00022ffe },
+ { 0x983c, 0x00020100 },
+ { AR5K_PHY_GAIN_OFFSET, 0x1284613c },
+ { AR5K_PHY_PAPD_PROBE, 0x00004883 },
+ { 0x9940, 0x00000004 },
+ { 0x9958, 0x000000ff },
+ { 0x9974, 0x00000000 },
+ { AR5K_PHY_SPENDING, 0x00000018 },
+ { AR5K_PHY_CCKTXCTL, 0x00000000 },
+ { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 },
+ { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
+ { 0xa23c, 0x13c889af },
+};
+
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
+static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
+ { AR5K_TXCFG,
+ /* A/XR B G */
+ { 0x00008015, 0x00008015, 0x00008015 } },
+ { AR5K_USEC_5211,
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000007, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_GAIN,
+ { 0x0018da6d, 0x0018ca75, 0x0018ca75 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
+ { AR5K_PHY_SIG,
+ { 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x3137665e, 0x3137665e, 0x3137665e } },
+ { AR5K_PHY_WEAK_OFDM_LOW_THR,
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0xf7b81020, 0xf7b80d10, 0xf7b81010 } },
+ { AR5K_PHY_CCKTXCTL,
+ { 0x00000000, 0x00000008, 0x00000008 } },
+ { AR5K_PHY_CCK_CROSSCORR,
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
+ { AR5K_PHY_GAIN_2GHZ,
+ { 0x642c0140, 0x6442c160, 0x6442c160 } },
+ { AR5K_PHY_CCK_RX_CTL_4,
+ { 0x1883800a, 0x1873800a, 0x1883800a } },
+};
+
+static const struct ath5k_ini rf5112_ini_common_end[] = {
+ { AR5K_DCU_FP, 0x00000000 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x00022ffe },
+ { 0x983c, 0x00020100 },
+ { AR5K_PHY_GAIN_OFFSET, 0x1284613c },
+ { AR5K_PHY_PAPD_PROBE, 0x00004882 },
+ { 0x9940, 0x00000004 },
+ { 0x9958, 0x000000ff },
+ { 0x9974, 0x00000000 },
+ { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
+ { 0xa23c, 0x13c889af },
+};
+
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
+static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
+ { AR5K_TXCFG,
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
+ { AR5K_USEC_5211,
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000007, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_GAIN,
+ { 0x0018fa61, 0x001a1a63, 0x001a1a63 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
+ { AR5K_PHY_SIG,
+ { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x3139605e, 0x3139605e, 0x3139605e } },
+ { AR5K_PHY_WEAK_OFDM_LOW_THR,
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
+ { AR5K_PHY_CCKTXCTL,
+ { 0x00000000, 0x00000000, 0x00000000 } },
+ { AR5K_PHY_CCK_CROSSCORR,
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
+ { AR5K_PHY_GAIN_2GHZ,
+ { 0x002ec1e0, 0x002ac120, 0x002ac120 } },
+ { AR5K_PHY_CCK_RX_CTL_4,
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
+ { 0xa300,
+ { 0x18010000, 0x18010000, 0x18010000 } },
+ { 0xa304,
+ { 0x30032602, 0x30032602, 0x30032602 } },
+ { 0xa308,
+ { 0x48073e06, 0x48073e06, 0x48073e06 } },
+ { 0xa30c,
+ { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
+ { 0xa310,
+ { 0x641a600f, 0x641a600f, 0x641a600f } },
+ { 0xa314,
+ { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
+ { 0xa318,
+ { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
+ { 0xa31c,
+ { 0x90cf865b, 0x8ecf865b, 0x8ecf865b } },
+ { 0xa320,
+ { 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } },
+ { 0xa324,
+ { 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } },
+ { 0xa328,
+ { 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } },
+ { 0xa32c,
+ { 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } },
+ { 0xa330,
+ { 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } },
+ { 0xa334,
+ { 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
+};
+
+static const struct ath5k_ini rf5413_ini_common_end[] = {
+ { AR5K_DCU_FP, 0x000003e0 },
+ { AR5K_5414_CBCFG, 0x00000010 },
+ { AR5K_SEQ_MASK, 0x0000000f },
+ { 0x809c, 0x00000000 },
+ { 0x80a0, 0x00000000 },
+ { AR5K_MIC_QOS_CTL, 0x00000000 },
+ { AR5K_MIC_QOS_SEL, 0x00000000 },
+ { AR5K_MISC_MODE, 0x00000000 },
+ { AR5K_OFDM_FIL_CNT, 0x00000000 },
+ { AR5K_CCK_FIL_CNT, 0x00000000 },
+ { AR5K_PHYERR_CNT1, 0x00000000 },
+ { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
+ { AR5K_PHYERR_CNT2, 0x00000000 },
+ { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
+ { AR5K_TSF_THRES, 0x00000000 },
+ { 0x8140, 0x800003f9 },
+ { 0x8144, 0x00000000 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x0000a000 },
+ { 0x983c, 0x00200400 },
+ { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
+ { AR5K_PHY_SCR, 0x0000001f },
+ { AR5K_PHY_SLMT, 0x00000080 },
+ { AR5K_PHY_SCAL, 0x0000000e },
+ { 0x9958, 0x00081fff },
+ { AR5K_PHY_TIMING_7, 0x00000000 },
+ { AR5K_PHY_TIMING_8, 0x02800000 },
+ { AR5K_PHY_TIMING_11, 0x00000000 },
+ { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
+ { 0x99e4, 0xaaaaaaaa },
+ { 0x99e8, 0x3c466478 },
+ { 0x99ec, 0x000000aa },
+ { AR5K_PHY_SCLOCK, 0x0000000c },
+ { AR5K_PHY_SDELAY, 0x000000ff },
+ { AR5K_PHY_SPENDING, 0x00000014 },
+ { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
+ { 0xa23c, 0x93c889af },
+ { AR5K_PHY_FAST_ADC, 0x00000001 },
+ { 0xa250, 0x0000a000 },
+ { AR5K_PHY_BLUETOOTH, 0x00000000 },
+ { AR5K_PHY_TPC_RG1, 0x0cc75380 },
+ { 0xa25c, 0x0f0f0f01 },
+ { 0xa260, 0x5f690f01 },
+ { 0xa264, 0x00418a11 },
+ { 0xa268, 0x00000000 },
+ { AR5K_PHY_TPC_RG5, 0x0c30c16a },
+ { 0xa270, 0x00820820 },
+ { 0xa274, 0x081b7caa },
+ { 0xa278, 0x1ce739ce },
+ { 0xa27c, 0x051701ce },
+ { 0xa338, 0x00000000 },
+ { 0xa33c, 0x00000000 },
+ { 0xa340, 0x00000000 },
+ { 0xa344, 0x00000000 },
+ { 0xa348, 0x3fffffff },
+ { 0xa34c, 0x3fffffff },
+ { 0xa350, 0x3fffffff },
+ { 0xa354, 0x0003ffff },
+ { 0xa358, 0x79a8aa1f },
+ { 0xa35c, 0x066c420f },
+ { 0xa360, 0x0f282207 },
+ { 0xa364, 0x17601685 },
+ { 0xa368, 0x1f801104 },
+ { 0xa36c, 0x37a00c03 },
+ { 0xa370, 0x3fc40883 },
+ { 0xa374, 0x57c00803 },
+ { 0xa378, 0x5fd80682 },
+ { 0xa37c, 0x7fe00482 },
+ { 0xa380, 0x7f3c7bba },
+ { 0xa384, 0xf3307ff0 },
+};
+
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
+/* XXX: a mode ? */
+static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
+ { AR5K_TXCFG,
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
+ { AR5K_USEC_5211,
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05020000, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e00, 0x00000e00, 0x00000e00 } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000002, 0x0000000a, 0x0000000a } },
+ { AR5K_PHY_GAIN,
+ { 0x0018da6d, 0x001a6a64, 0x001a6a64 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } },
+ { AR5K_PHY_SIG,
+ { 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x3137665e, 0x3137665e, 0x3139605e } },
+ { AR5K_PHY_WEAK_OFDM_LOW_THR,
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
+ { AR5K_PHY_CCKTXCTL,
+ { 0x00000000, 0x00000000, 0x00000000 } },
+ { AR5K_PHY_CCK_CROSSCORR,
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
+ { AR5K_PHY_GAIN_2GHZ,
+ { 0x002c0140, 0x0042c140, 0x0042c140 } },
+ { AR5K_PHY_CCK_RX_CTL_4,
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
+};
+
+static const struct ath5k_ini rf2413_ini_common_end[] = {
+ { AR5K_DCU_FP, 0x000003e0 },
+ { AR5K_SEQ_MASK, 0x0000000f },
+ { AR5K_MIC_QOS_CTL, 0x00000000 },
+ { AR5K_MIC_QOS_SEL, 0x00000000 },
+ { AR5K_MISC_MODE, 0x00000000 },
+ { AR5K_OFDM_FIL_CNT, 0x00000000 },
+ { AR5K_CCK_FIL_CNT, 0x00000000 },
+ { AR5K_PHYERR_CNT1, 0x00000000 },
+ { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
+ { AR5K_PHYERR_CNT2, 0x00000000 },
+ { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
+ { AR5K_TSF_THRES, 0x00000000 },
+ { 0x8140, 0x800000a8 },
+ { 0x8144, 0x00000000 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x0000a000 },
+ { 0x983c, 0x00200400 },
+ { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
+ { AR5K_PHY_SCR, 0x0000001f },
+ { AR5K_PHY_SLMT, 0x00000080 },
+ { AR5K_PHY_SCAL, 0x0000000e },
+ { 0x9958, 0x000000ff },
+ { AR5K_PHY_TIMING_7, 0x00000000 },
+ { AR5K_PHY_TIMING_8, 0x02800000 },
+ { AR5K_PHY_TIMING_11, 0x00000000 },
+ { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
+ { 0x99e4, 0xaaaaaaaa },
+ { 0x99e8, 0x3c466478 },
+ { 0x99ec, 0x000000aa },
+ { AR5K_PHY_SCLOCK, 0x0000000c },
+ { AR5K_PHY_SDELAY, 0x000000ff },
+ { AR5K_PHY_SPENDING, 0x00000014 },
+ { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
+ { 0xa23c, 0x93c889af },
+ { AR5K_PHY_FAST_ADC, 0x00000001 },
+ { 0xa250, 0x0000a000 },
+ { AR5K_PHY_BLUETOOTH, 0x00000000 },
+ { AR5K_PHY_TPC_RG1, 0x0cc75380 },
+ { 0xa25c, 0x0f0f0f01 },
+ { 0xa260, 0x5f690f01 },
+ { 0xa264, 0x00418a11 },
+ { 0xa268, 0x00000000 },
+ { AR5K_PHY_TPC_RG5, 0x0c30c16a },
+ { 0xa270, 0x00820820 },
+ { 0xa274, 0x001b7caa },
+ { 0xa278, 0x1ce739ce },
+ { 0xa27c, 0x051701ce },
+ { 0xa300, 0x18010000 },
+ { 0xa304, 0x30032602 },
+ { 0xa308, 0x48073e06 },
+ { 0xa30c, 0x560b4c0a },
+ { 0xa310, 0x641a600f },
+ { 0xa314, 0x784f6e1b },
+ { 0xa318, 0x868f7c5a },
+ { 0xa31c, 0x8ecf865b },
+ { 0xa320, 0x9d4f970f },
+ { 0xa324, 0xa5cfa18f },
+ { 0xa328, 0xb55faf1f },
+ { 0xa32c, 0xbddfb99f },
+ { 0xa330, 0xcd7fc73f },
+ { 0xa334, 0xd5ffd1bf },
+ { 0xa338, 0x00000000 },
+ { 0xa33c, 0x00000000 },
+ { 0xa340, 0x00000000 },
+ { 0xa344, 0x00000000 },
+ { 0xa348, 0x3fffffff },
+ { 0xa34c, 0x3fffffff },
+ { 0xa350, 0x3fffffff },
+ { 0xa354, 0x0003ffff },
+ { 0xa358, 0x79a8aa1f },
+ { 0xa35c, 0x066c420f },
+ { 0xa360, 0x0f282207 },
+ { 0xa364, 0x17601685 },
+ { 0xa368, 0x1f801104 },
+ { 0xa36c, 0x37a00c03 },
+ { 0xa370, 0x3fc40883 },
+ { 0xa374, 0x57c00803 },
+ { 0xa378, 0x5fd80682 },
+ { 0xa37c, 0x7fe00482 },
+ { 0xa380, 0x7f3c7bba },
+ { 0xa384, 0xf3307ff0 },
+};
+
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
+/* XXX: a mode ? */
+static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
+ { AR5K_TXCFG,
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
+ { AR5K_USEC_5211,
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000003, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_SETTLING,
+ { 0x1372161c, 0x13721722, 0x13721422 } },
+ { AR5K_PHY_GAIN,
+ { 0x0018fa61, 0x00199a65, 0x00199a65 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
+ { AR5K_PHY_SIG,
+ { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
+ { AR5K_PHY_AGCCOARSE,
+ { 0x3139605e, 0x3139605e, 0x3139605e } },
+ { AR5K_PHY_WEAK_OFDM_LOW_THR,
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { AR5K_PHY_RX_DELAY,
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
+ { AR5K_PHY_FRAME_CTL_5211,
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
+ { AR5K_PHY_CCKTXCTL,
+ { 0x00000000, 0x00000000, 0x00000000 } },
+ { AR5K_PHY_CCK_CROSSCORR,
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
+ { AR5K_PHY_GAIN_2GHZ,
+ { 0x00000140, 0x0052c140, 0x0052c140 } },
+ { AR5K_PHY_CCK_RX_CTL_4,
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
+ { 0xa324,
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa328,
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa32c,
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa330,
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa334,
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+};
+
+static const struct ath5k_ini rf2425_ini_common_end[] = {
+ { AR5K_DCU_FP, 0x000003e0 },
+ { AR5K_SEQ_MASK, 0x0000000f },
+ { 0x809c, 0x00000000 },
+ { 0x80a0, 0x00000000 },
+ { AR5K_MIC_QOS_CTL, 0x00000000 },
+ { AR5K_MIC_QOS_SEL, 0x00000000 },
+ { AR5K_MISC_MODE, 0x00000000 },
+ { AR5K_OFDM_FIL_CNT, 0x00000000 },
+ { AR5K_CCK_FIL_CNT, 0x00000000 },
+ { AR5K_PHYERR_CNT1, 0x00000000 },
+ { AR5K_PHYERR_CNT1_MASK, 0x00000000 },
+ { AR5K_PHYERR_CNT2, 0x00000000 },
+ { AR5K_PHYERR_CNT2_MASK, 0x00000000 },
+ { AR5K_TSF_THRES, 0x00000000 },
+ { 0x8140, 0x800003f9 },
+ { 0x8144, 0x00000000 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x0000a000 },
+ { 0x983c, 0x00200400 },
+ { AR5K_PHY_GAIN_OFFSET, 0x1284233c },
+ { AR5K_PHY_SCR, 0x0000001f },
+ { AR5K_PHY_SLMT, 0x00000080 },
+ { AR5K_PHY_SCAL, 0x0000000e },
+ { 0x9958, 0x00081fff },
+ { AR5K_PHY_TIMING_7, 0x00000000 },
+ { AR5K_PHY_TIMING_8, 0x02800000 },
+ { AR5K_PHY_TIMING_11, 0x00000000 },
+ { 0x99dc, 0xfebadbe8 },
+ { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
+ { 0x99e4, 0xaaaaaaaa },
+ { 0x99e8, 0x3c466478 },
+ { 0x99ec, 0x000000aa },
+ { AR5K_PHY_SCLOCK, 0x0000000c },
+ { AR5K_PHY_SDELAY, 0x000000ff },
+ { AR5K_PHY_SPENDING, 0x00000014 },
+ { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
+ { AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
+ { AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
+ { 0xa23c, 0x93c889af },
+ { AR5K_PHY_FAST_ADC, 0x00000001 },
+ { 0xa250, 0x0000a000 },
+ { AR5K_PHY_BLUETOOTH, 0x00000000 },
+ { AR5K_PHY_TPC_RG1, 0x0cc75380 },
+ { 0xa25c, 0x0f0f0f01 },
+ { 0xa260, 0x5f690f01 },
+ { 0xa264, 0x00418a11 },
+ { 0xa268, 0x00000000 },
+ { AR5K_PHY_TPC_RG5, 0x0c30c166 },
+ { 0xa270, 0x00820820 },
+ { 0xa274, 0x081a3caa },
+ { 0xa278, 0x1ce739ce },
+ { 0xa27c, 0x051701ce },
+ { 0xa300, 0x16010000 },
+ { 0xa304, 0x2c032402 },
+ { 0xa308, 0x48433e42 },
+ { 0xa30c, 0x5a0f500b },
+ { 0xa310, 0x6c4b624a },
+ { 0xa314, 0x7e8b748a },
+ { 0xa318, 0x96cf8ccb },
+ { 0xa31c, 0xa34f9d0f },
+ { 0xa320, 0xa7cfa58f },
+ { 0xa348, 0x3fffffff },
+ { 0xa34c, 0x3fffffff },
+ { 0xa350, 0x3fffffff },
+ { 0xa354, 0x0003ffff },
+ { 0xa358, 0x79a8aa1f },
+ { 0xa35c, 0x066c420f },
+ { 0xa360, 0x0f282207 },
+ { 0xa364, 0x17601685 },
+ { 0xa368, 0x1f801104 },
+ { 0xa36c, 0x37a00c03 },
+ { 0xa370, 0x3fc40883 },
+ { 0xa374, 0x57c00803 },
+ { 0xa378, 0x5fd80682 },
+ { 0xa37c, 0x7fe00482 },
+ { 0xa380, 0x7f3c7bba },
+ { 0xa384, 0xf3307ff0 },
+};
+
+/*
+ * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with
+ * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI)
+ */
+
+/* RF5111 Initial BaseBand Gain settings */
+static const struct ath5k_ini rf5111_ini_bbgain[] = {
+ { AR5K_BB_GAIN(0), 0x00000000 },
+ { AR5K_BB_GAIN(1), 0x00000020 },
+ { AR5K_BB_GAIN(2), 0x00000010 },
+ { AR5K_BB_GAIN(3), 0x00000030 },
+ { AR5K_BB_GAIN(4), 0x00000008 },
+ { AR5K_BB_GAIN(5), 0x00000028 },
+ { AR5K_BB_GAIN(6), 0x00000004 },
+ { AR5K_BB_GAIN(7), 0x00000024 },
+ { AR5K_BB_GAIN(8), 0x00000014 },
+ { AR5K_BB_GAIN(9), 0x00000034 },
+ { AR5K_BB_GAIN(10), 0x0000000c },
+ { AR5K_BB_GAIN(11), 0x0000002c },
+ { AR5K_BB_GAIN(12), 0x00000002 },
+ { AR5K_BB_GAIN(13), 0x00000022 },
+ { AR5K_BB_GAIN(14), 0x00000012 },
+ { AR5K_BB_GAIN(15), 0x00000032 },
+ { AR5K_BB_GAIN(16), 0x0000000a },
+ { AR5K_BB_GAIN(17), 0x0000002a },
+ { AR5K_BB_GAIN(18), 0x00000006 },
+ { AR5K_BB_GAIN(19), 0x00000026 },
+ { AR5K_BB_GAIN(20), 0x00000016 },
+ { AR5K_BB_GAIN(21), 0x00000036 },
+ { AR5K_BB_GAIN(22), 0x0000000e },
+ { AR5K_BB_GAIN(23), 0x0000002e },
+ { AR5K_BB_GAIN(24), 0x00000001 },
+ { AR5K_BB_GAIN(25), 0x00000021 },
+ { AR5K_BB_GAIN(26), 0x00000011 },
+ { AR5K_BB_GAIN(27), 0x00000031 },
+ { AR5K_BB_GAIN(28), 0x00000009 },
+ { AR5K_BB_GAIN(29), 0x00000029 },
+ { AR5K_BB_GAIN(30), 0x00000005 },
+ { AR5K_BB_GAIN(31), 0x00000025 },
+ { AR5K_BB_GAIN(32), 0x00000015 },
+ { AR5K_BB_GAIN(33), 0x00000035 },
+ { AR5K_BB_GAIN(34), 0x0000000d },
+ { AR5K_BB_GAIN(35), 0x0000002d },
+ { AR5K_BB_GAIN(36), 0x00000003 },
+ { AR5K_BB_GAIN(37), 0x00000023 },
+ { AR5K_BB_GAIN(38), 0x00000013 },
+ { AR5K_BB_GAIN(39), 0x00000033 },
+ { AR5K_BB_GAIN(40), 0x0000000b },
+ { AR5K_BB_GAIN(41), 0x0000002b },
+ { AR5K_BB_GAIN(42), 0x0000002b },
+ { AR5K_BB_GAIN(43), 0x0000002b },
+ { AR5K_BB_GAIN(44), 0x0000002b },
+ { AR5K_BB_GAIN(45), 0x0000002b },
+ { AR5K_BB_GAIN(46), 0x0000002b },
+ { AR5K_BB_GAIN(47), 0x0000002b },
+ { AR5K_BB_GAIN(48), 0x0000002b },
+ { AR5K_BB_GAIN(49), 0x0000002b },
+ { AR5K_BB_GAIN(50), 0x0000002b },
+ { AR5K_BB_GAIN(51), 0x0000002b },
+ { AR5K_BB_GAIN(52), 0x0000002b },
+ { AR5K_BB_GAIN(53), 0x0000002b },
+ { AR5K_BB_GAIN(54), 0x0000002b },
+ { AR5K_BB_GAIN(55), 0x0000002b },
+ { AR5K_BB_GAIN(56), 0x0000002b },
+ { AR5K_BB_GAIN(57), 0x0000002b },
+ { AR5K_BB_GAIN(58), 0x0000002b },
+ { AR5K_BB_GAIN(59), 0x0000002b },
+ { AR5K_BB_GAIN(60), 0x0000002b },
+ { AR5K_BB_GAIN(61), 0x0000002b },
+ { AR5K_BB_GAIN(62), 0x00000002 },
+ { AR5K_BB_GAIN(63), 0x00000016 },
+};
+
+/* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */
+static const struct ath5k_ini rf5112_ini_bbgain[] = {
+ { AR5K_BB_GAIN(0), 0x00000000 },
+ { AR5K_BB_GAIN(1), 0x00000001 },
+ { AR5K_BB_GAIN(2), 0x00000002 },
+ { AR5K_BB_GAIN(3), 0x00000003 },
+ { AR5K_BB_GAIN(4), 0x00000004 },
+ { AR5K_BB_GAIN(5), 0x00000005 },
+ { AR5K_BB_GAIN(6), 0x00000008 },
+ { AR5K_BB_GAIN(7), 0x00000009 },
+ { AR5K_BB_GAIN(8), 0x0000000a },
+ { AR5K_BB_GAIN(9), 0x0000000b },
+ { AR5K_BB_GAIN(10), 0x0000000c },
+ { AR5K_BB_GAIN(11), 0x0000000d },
+ { AR5K_BB_GAIN(12), 0x00000010 },
+ { AR5K_BB_GAIN(13), 0x00000011 },
+ { AR5K_BB_GAIN(14), 0x00000012 },
+ { AR5K_BB_GAIN(15), 0x00000013 },
+ { AR5K_BB_GAIN(16), 0x00000014 },
+ { AR5K_BB_GAIN(17), 0x00000015 },
+ { AR5K_BB_GAIN(18), 0x00000018 },
+ { AR5K_BB_GAIN(19), 0x00000019 },
+ { AR5K_BB_GAIN(20), 0x0000001a },
+ { AR5K_BB_GAIN(21), 0x0000001b },
+ { AR5K_BB_GAIN(22), 0x0000001c },
+ { AR5K_BB_GAIN(23), 0x0000001d },
+ { AR5K_BB_GAIN(24), 0x00000020 },
+ { AR5K_BB_GAIN(25), 0x00000021 },
+ { AR5K_BB_GAIN(26), 0x00000022 },
+ { AR5K_BB_GAIN(27), 0x00000023 },
+ { AR5K_BB_GAIN(28), 0x00000024 },
+ { AR5K_BB_GAIN(29), 0x00000025 },
+ { AR5K_BB_GAIN(30), 0x00000028 },
+ { AR5K_BB_GAIN(31), 0x00000029 },
+ { AR5K_BB_GAIN(32), 0x0000002a },
+ { AR5K_BB_GAIN(33), 0x0000002b },
+ { AR5K_BB_GAIN(34), 0x0000002c },
+ { AR5K_BB_GAIN(35), 0x0000002d },
+ { AR5K_BB_GAIN(36), 0x00000030 },
+ { AR5K_BB_GAIN(37), 0x00000031 },
+ { AR5K_BB_GAIN(38), 0x00000032 },
+ { AR5K_BB_GAIN(39), 0x00000033 },
+ { AR5K_BB_GAIN(40), 0x00000034 },
+ { AR5K_BB_GAIN(41), 0x00000035 },
+ { AR5K_BB_GAIN(42), 0x00000035 },
+ { AR5K_BB_GAIN(43), 0x00000035 },
+ { AR5K_BB_GAIN(44), 0x00000035 },
+ { AR5K_BB_GAIN(45), 0x00000035 },
+ { AR5K_BB_GAIN(46), 0x00000035 },
+ { AR5K_BB_GAIN(47), 0x00000035 },
+ { AR5K_BB_GAIN(48), 0x00000035 },
+ { AR5K_BB_GAIN(49), 0x00000035 },
+ { AR5K_BB_GAIN(50), 0x00000035 },
+ { AR5K_BB_GAIN(51), 0x00000035 },
+ { AR5K_BB_GAIN(52), 0x00000035 },
+ { AR5K_BB_GAIN(53), 0x00000035 },
+ { AR5K_BB_GAIN(54), 0x00000035 },
+ { AR5K_BB_GAIN(55), 0x00000035 },
+ { AR5K_BB_GAIN(56), 0x00000035 },
+ { AR5K_BB_GAIN(57), 0x00000035 },
+ { AR5K_BB_GAIN(58), 0x00000035 },
+ { AR5K_BB_GAIN(59), 0x00000035 },
+ { AR5K_BB_GAIN(60), 0x00000035 },
+ { AR5K_BB_GAIN(61), 0x00000035 },
+ { AR5K_BB_GAIN(62), 0x00000010 },
+ { AR5K_BB_GAIN(63), 0x0000001a },
+};
+
+
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
+ */
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+ const struct ath5k_ini *ini_regs, bool skip_pcu)
+{
+ unsigned int i;
+
+ /* Write initial registers */
+ for (i = 0; i < size; i++) {
+ /* Skip PCU registers if
+ * requested */
+ if (skip_pcu &&
+ ini_regs[i].ini_register >= AR5K_PCU_MIN &&
+ ini_regs[i].ini_register <= AR5K_PCU_MAX)
+ continue;
+
+ switch (ini_regs[i].ini_mode) {
+ case AR5K_INI_READ:
+ /* Cleared on read */
+ ath5k_hw_reg_read(ah, ini_regs[i].ini_register);
+ break;
+ case AR5K_INI_WRITE:
+ default:
+ AR5K_REG_WAIT(i);
+ ath5k_hw_reg_write(ah, ini_regs[i].ini_value,
+ ini_regs[i].ini_register);
+ }
+ }
+}
+
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+ unsigned int size, const struct ath5k_ini_mode *ini_mode,
+ u8 mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ AR5K_REG_WAIT(i);
+ ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode],
+ (u32)ini_mode[i].mode_register);
+ }
+
+}
+
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+{
+ /*
+ * Write initial register settings
+ */
+
+ /* For AR5212 and compatible */
+ if (ah->ah_version == AR5K_AR5212) {
+
+ /* First set of mode-specific settings */
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(ar5212_ini_mode_start),
+ ar5212_ini_mode_start, mode);
+
+ /*
+ * Write initial settings common for all modes
+ */
+ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start),
+ ar5212_ini_common_start, skip_pcu);
+
+ /* Second set of mode-specific settings */
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf5111_ini_mode_end),
+ rf5111_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5111_ini_common_end),
+ rf5111_ini_common_end, skip_pcu);
+
+ /* Baseband gain table */
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5111_ini_bbgain),
+ rf5111_ini_bbgain, skip_pcu);
+
+ break;
+ case AR5K_RF5112:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf5112_ini_mode_end),
+ rf5112_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_common_end),
+ rf5112_ini_common_end, skip_pcu);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+
+ break;
+ case AR5K_RF5413:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf5413_ini_mode_end),
+ rf5413_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5413_ini_common_end),
+ rf5413_ini_common_end, skip_pcu);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+
+ break;
+ case AR5K_RF2316:
+ case AR5K_RF2413:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf2413_ini_mode_end),
+ rf2413_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf2413_ini_common_end),
+ rf2413_ini_common_end, skip_pcu);
+
+ /* Override settings from rf2413_ini_common_end */
+ if (ah->ah_radio == AR5K_RF2316) {
+ ath5k_hw_reg_write(ah, 0x00004000,
+ AR5K_PHY_AGC);
+ ath5k_hw_reg_write(ah, 0x081b7caa,
+ 0xa274);
+ }
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+ break;
+ case AR5K_RF2317:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf2413_ini_mode_end),
+ rf2413_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf2425_ini_common_end),
+ rf2425_ini_common_end, skip_pcu);
+
+ /* Override settings from rf2413_ini_mode_end */
+ ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN);
+
+ /* Override settings from rf2413_ini_common_end */
+ ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5,
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa);
+ ath5k_hw_reg_write(ah, 0x800000a8, 0x8140);
+ ath5k_hw_reg_write(ah, 0x000000ff, 0x9958);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+ break;
+ case AR5K_RF2425:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf2425_ini_mode_end),
+ rf2425_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf2425_ini_common_end),
+ rf2425_ini_common_end, skip_pcu);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+ break;
+ default:
+ return -EINVAL;
+
+ }
+
+ /* For AR5211 */
+ } else if (ah->ah_version == AR5K_AR5211) {
+
+ /* AR5K_MODE_11B */
+ if (mode > 2) {
+ ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ /* Mode-specific settings */
+ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode),
+ ar5211_ini_mode, mode);
+
+ /*
+ * Write initial settings common for all modes
+ */
+ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini),
+ ar5211_ini, skip_pcu);
+
+ /* AR5211 only comes with 5111 */
+
+ /* Baseband gain table */
+ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain),
+ rf5111_ini_bbgain, skip_pcu);
+ /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */
+ } else if (ah->ah_version == AR5K_AR5210) {
+ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini),
+ ar5210_ini, skip_pcu);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
new file mode 100644
index 000000000..ca4b7ccd6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2009 Bob Copeland <me@bobcopeland.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include "ath5k.h"
+
+#define ATH_SDEVICE(subv, subd) \
+ .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
+ .subvendor = (subv), .subdevice = (subd)
+
+#define ATH_LED(pin, polarity) .driver_data = (((pin) << 8) | (polarity))
+#define ATH_PIN(data) ((data) >> 8)
+#define ATH_POLARITY(data) ((data) & 0xff)
+
+/* Devices we match on for LED config info (typically laptops) */
+static const struct pci_device_id ath5k_led_devices[] = {
+ /* AR5211 */
+ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) },
+ /* HP Compaq nc6xx, nc4000, nx6000 */
+ { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
+ /* Acer Aspire One A150 (maximlevitsky@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
+ /* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
+ /* Acer Ferrari 5000 (russ.dill@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
+ /* E-machines E510 (tuliom@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
+ /* BenQ Joybook R55v (nowymarluk@wp.pl) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) },
+ /* Acer Extensa 5620z (nekoreeve@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
+ /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
+ /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
+ /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
+ /* HP Compaq C700 (nitrousnrg@gmail.com) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ /* LiteOn AR5BXB63 (magooz@salug.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
+ /* IBM-specific AR5212 (all others) */
+ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
+ /* Dell Vostro A860 (shahar@shahar-or.co.il) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) },
+ { }
+};
+
+void ath5k_led_enable(struct ath5k_hw *ah)
+{
+ if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
+ ath5k_hw_set_gpio_output(ah, ah->led_pin);
+ ath5k_led_off(ah);
+ }
+}
+
+static void ath5k_led_on(struct ath5k_hw *ah)
+{
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+ return;
+ ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
+}
+
+void ath5k_led_off(struct ath5k_hw *ah)
+{
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+ return;
+ ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
+}
+
+static void
+ath5k_led_brightness_set(struct led_classdev *led_dev,
+ enum led_brightness brightness)
+{
+ struct ath5k_led *led = container_of(led_dev, struct ath5k_led,
+ led_dev);
+
+ if (brightness == LED_OFF)
+ ath5k_led_off(led->ah);
+ else
+ ath5k_led_on(led->ah);
+}
+
+static int
+ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
+ const char *name, char *trigger)
+{
+ int err;
+
+ led->ah = ah;
+ strncpy(led->name, name, sizeof(led->name));
+ led->name[sizeof(led->name)-1] = 0;
+ led->led_dev.name = led->name;
+ led->led_dev.default_trigger = trigger;
+ led->led_dev.brightness_set = ath5k_led_brightness_set;
+
+ err = led_classdev_register(ah->dev, &led->led_dev);
+ if (err) {
+ ATH5K_WARN(ah, "could not register LED %s\n", name);
+ led->ah = NULL;
+ }
+ return err;
+}
+
+static void
+ath5k_unregister_led(struct ath5k_led *led)
+{
+ if (!led->ah)
+ return;
+ led_classdev_unregister(&led->led_dev);
+ ath5k_led_off(led->ah);
+ led->ah = NULL;
+}
+
+void ath5k_unregister_leds(struct ath5k_hw *ah)
+{
+ ath5k_unregister_led(&ah->rx_led);
+ ath5k_unregister_led(&ah->tx_led);
+}
+
+int ath5k_init_leds(struct ath5k_hw *ah)
+{
+ int ret = 0;
+ struct ieee80211_hw *hw = ah->hw;
+#ifndef CONFIG_ATH5K_AHB
+ struct pci_dev *pdev = ah->pdev;
+#endif
+ char name[ATH5K_LED_MAX_NAME_LEN + 1];
+ const struct pci_device_id *match;
+
+ if (!ah->pdev)
+ return 0;
+
+#ifdef CONFIG_ATH5K_AHB
+ match = NULL;
+#else
+ match = pci_match_id(&ath5k_led_devices[0], pdev);
+#endif
+ if (match) {
+ __set_bit(ATH_STAT_LEDSOFT, ah->status);
+ ah->led_pin = ATH_PIN(match->driver_data);
+ ah->led_on = ATH_POLARITY(match->driver_data);
+ }
+
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
+ goto out;
+
+ ath5k_led_enable(ah);
+
+ snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
+ ret = ath5k_register_led(ah, &ah->rx_led, name,
+ ieee80211_get_rx_led_name(hw));
+ if (ret)
+ goto out;
+
+ snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
+ ret = ath5k_register_led(ah, &ah->tx_led, name,
+ ieee80211_get_tx_led_name(hw));
+out:
+ return ret;
+}
+
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
new file mode 100644
index 000000000..3b4a6463d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -0,0 +1,834 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2006 Devicescape Software, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+
+#include "ath5k.h"
+#include "base.h"
+#include "reg.h"
+
+/********************\
+* Mac80211 functions *
+\********************/
+
+static void
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath5k_hw *ah = hw->priv;
+ u16 qnum = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
+}
+
+
+static int
+ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath5k_hw *ah = hw->priv;
+ int ret;
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+
+ mutex_lock(&ah->lock);
+
+ if ((vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ && (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
+ ret = -ELNRNG;
+ goto end;
+ }
+
+ /* Don't allow other interfaces if one ad-hoc is configured.
+ * TODO: Fix the problems with ad-hoc and multiple other interfaces.
+ * We would need to operate the HW in ad-hoc mode to allow TSF updates
+ * for the IBSS, but this breaks with additional AP or STA interfaces
+ * at the moment. */
+ if (ah->num_adhoc_vifs ||
+ (ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+ ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
+ ret = -ELNRNG;
+ goto end;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ avf->opmode = vif->type;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto end;
+ }
+
+ ah->nvifs++;
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
+
+ /* Assign the vap/adhoc to a beacon xmit slot. */
+ if ((avf->opmode == NL80211_IFTYPE_AP) ||
+ (avf->opmode == NL80211_IFTYPE_ADHOC) ||
+ (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ int slot;
+
+ WARN_ON(list_empty(&ah->bcbuf));
+ avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
+ list);
+ list_del(&avf->bbuf->list);
+
+ avf->bslot = 0;
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (!ah->bslot[slot]) {
+ avf->bslot = slot;
+ break;
+ }
+ }
+ BUG_ON(ah->bslot[avf->bslot] != NULL);
+ ah->bslot[avf->bslot] = vif;
+ if (avf->opmode == NL80211_IFTYPE_AP)
+ ah->num_ap_vifs++;
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
+ ah->num_adhoc_vifs++;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs++;
+ }
+
+ /* Any MAC address is fine, all others are included through the
+ * filter.
+ */
+ ath5k_hw_set_lladdr(ah, vif->addr);
+
+ ath5k_update_bssid_mask_and_opmode(ah, vif);
+ ret = 0;
+end:
+ mutex_unlock(&ah->lock);
+ return ret;
+}
+
+
+static void
+ath5k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+ unsigned int i;
+
+ mutex_lock(&ah->lock);
+ ah->nvifs--;
+
+ if (avf->bbuf) {
+ ath5k_txbuf_free_skb(ah, avf->bbuf);
+ list_add_tail(&avf->bbuf->list, &ah->bcbuf);
+ for (i = 0; i < ATH_BCBUF; i++) {
+ if (ah->bslot[i] == vif) {
+ ah->bslot[i] = NULL;
+ break;
+ }
+ }
+ avf->bbuf = NULL;
+ }
+ if (avf->opmode == NL80211_IFTYPE_AP)
+ ah->num_ap_vifs--;
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
+ ah->num_adhoc_vifs--;
+ else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
+ ah->num_mesh_vifs--;
+
+ ath5k_update_bssid_mask_and_opmode(ah, NULL);
+ mutex_unlock(&ah->lock);
+}
+
+
+/*
+ * TODO: Phy disable/diversity etc
+ */
+static int
+ath5k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ah->lock);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ret = ath5k_chan_set(ah, &conf->chandef);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
+ (ah->ah_txpower.txp_requested != conf->power_level)) {
+ ah->ah_txpower.txp_requested = conf->power_level;
+
+ /* Half dB steps */
+ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ ah->ah_retry_long = conf->long_frame_max_tx_count;
+ ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+ ath5k_hw_set_tx_retry_limits(ah, i);
+ }
+
+ /* TODO:
+ * 1) Move this on config_interface and handle each case
+ * separately eg. when we have only one STA vif, use
+ * AR5K_ANTMODE_SINGLE_AP
+ *
+ * 2) Allow the user to change antenna mode eg. when only
+ * one antenna is present
+ *
+ * 3) Allow the user to set default/tx antenna when possible
+ *
+ * 4) Default mode should handle 90% of the cases, together
+ * with fixed a/b and single AP modes we should be able to
+ * handle 99%. Sectored modes are extreme cases and i still
+ * haven't found a usage for them. If we decide to support them,
+ * then we must allow the user to set how many tx antennas we
+ * have available
+ */
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+
+unlock:
+ mutex_unlock(&ah->lock);
+ return ret;
+}
+
+
+static void
+ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ mutex_lock(&ah->lock);
+
+ if (changes & BSS_CHANGED_BSSID) {
+ /* Cache for later use during resets */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = 0;
+ ath5k_hw_set_bssid(ah);
+ mmiowb();
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INT)
+ ah->bintval = bss_conf->beacon_int;
+
+ if (changes & BSS_CHANGED_ERP_SLOT) {
+ int slot_time;
+
+ ah->ah_short_slot = bss_conf->use_short_slot;
+ slot_time = ath5k_hw_get_default_slottime(ah) +
+ 3 * ah->ah_coverage_class;
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ avf->assoc = bss_conf->assoc;
+ if (bss_conf->assoc)
+ ah->assoc = bss_conf->assoc;
+ else
+ ah->assoc = ath5k_any_vif_assoc(ah);
+
+ if (ah->opmode == NL80211_IFTYPE_STATION)
+ ath5k_set_beacon_filter(hw, ah->assoc);
+ ath5k_hw_set_ledstate(ah, ah->assoc ?
+ AR5K_LED_ASSOC : AR5K_LED_INIT);
+ if (bss_conf->assoc) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
+ "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
+ common->curaid = bss_conf->aid;
+ ath5k_hw_set_bssid(ah);
+ /* Once ANI is available you would start it here */
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON) {
+ spin_lock_bh(&ah->block);
+ ath5k_beacon_update(hw, vif);
+ spin_unlock_bh(&ah->block);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED)
+ ah->enable_beacon = bss_conf->enable_beacon;
+
+ if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_BEACON_INT))
+ ath5k_beacon_config(ah);
+
+ mutex_unlock(&ah->lock);
+}
+
+
+static u64
+ath5k_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ u32 mfilt[2], val;
+ u8 pos;
+ struct netdev_hw_addr *ha;
+
+ mfilt[0] = 0;
+ mfilt[1] = 0;
+
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ /* calculate XOR of eight 6-bit values */
+ val = get_unaligned_le32(ha->addr + 0);
+ pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+ val = get_unaligned_le32(ha->addr + 3);
+ pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+ pos &= 0x3f;
+ mfilt[pos / 32] |= (1 << (pos % 32));
+ /* XXX: we might be able to just do this instead,
+ * but not sure, needs testing, if we do use this we'd
+ * need to inform below not to reset the mcast */
+ /* ath5k_hw_set_mcast_filterindex(ah,
+ * ha->addr[5]); */
+ }
+
+ return ((u64)(mfilt[1]) << 32) | mfilt[0];
+}
+
+
+/*
+ * o always accept unicast, broadcast, and multicast traffic
+ * o multicast traffic for all BSSIDs will be enabled if mac80211
+ * says it should be
+ * o maintain current state of phy ofdm or phy cck error reception.
+ * If the hardware detects any of these type of errors then
+ * ath5k_hw_get_rx_filter() will pass to us the respective
+ * hardware filters to be able to receive these type of frames.
+ * o probe request frames are accepted only when operating in
+ * hostap, adhoc, or monitor modes
+ * o enable promiscuous mode according to the interface state
+ * o accept beacons:
+ * - when operating in adhoc mode so the 802.11 layer creates
+ * node table entries for peers,
+ * - when operating in station mode for collecting rssi data when
+ * the station is otherwise quiet, or
+ * - when scanning
+ */
+static void
+ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *new_flags, u64 multicast)
+{
+#define SUPPORTED_FIF_FLAGS \
+ (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
+ FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+ struct ath5k_hw *ah = hw->priv;
+ u32 mfilt[2], rfilt;
+ struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
+
+ mutex_lock(&ah->lock);
+
+ mfilt[0] = multicast;
+ mfilt[1] = multicast >> 32;
+
+ /* Only deal with supported flags */
+ changed_flags &= SUPPORTED_FIF_FLAGS;
+ *new_flags &= SUPPORTED_FIF_FLAGS;
+
+ /* If HW detects any phy or radar errors, leave those filters on.
+ * Also, always enable Unicast, Broadcasts and Multicast
+ * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
+ rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
+ (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
+ AR5K_RX_FILTER_MCAST);
+
+ if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
+ if (*new_flags & FIF_PROMISC_IN_BSS)
+ __set_bit(ATH_STAT_PROMISC, ah->status);
+ else
+ __clear_bit(ATH_STAT_PROMISC, ah->status);
+ }
+
+ if (test_bit(ATH_STAT_PROMISC, ah->status))
+ rfilt |= AR5K_RX_FILTER_PROM;
+
+ /* Note, AR5K_RX_FILTER_MCAST is already enabled */
+ if (*new_flags & FIF_ALLMULTI) {
+ mfilt[0] = ~0;
+ mfilt[1] = ~0;
+ }
+
+ /* This is the best we can do */
+ if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
+ rfilt |= AR5K_RX_FILTER_PHYERR;
+
+ /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
+ * and probes for any BSSID */
+ if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
+ rfilt |= AR5K_RX_FILTER_BEACON;
+
+ /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
+ * set we should only pass on control frames for this
+ * station. This needs testing. I believe right now this
+ * enables *all* control frames, which is OK.. but
+ * but we should see if we can improve on granularity */
+ if (*new_flags & FIF_CONTROL)
+ rfilt |= AR5K_RX_FILTER_CONTROL;
+
+ /* Additional settings per mode -- this is per ath5k */
+
+ /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
+
+ switch (ah->opmode) {
+ case NL80211_IFTYPE_MESH_POINT:
+ rfilt |= AR5K_RX_FILTER_CONTROL |
+ AR5K_RX_FILTER_BEACON |
+ AR5K_RX_FILTER_PROBEREQ |
+ AR5K_RX_FILTER_PROM;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ rfilt |= AR5K_RX_FILTER_PROBEREQ |
+ AR5K_RX_FILTER_BEACON;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (ah->assoc)
+ rfilt |= AR5K_RX_FILTER_BEACON;
+ default:
+ break;
+ }
+
+ iter_data.hw_macaddr = NULL;
+ iter_data.n_stas = 0;
+ iter_data.need_set_hw_addr = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
+
+ /* Set up RX Filter */
+ if (iter_data.n_stas > 1) {
+ /* If you have multiple STA interfaces connected to
+ * different APs, ARPs are not received (most of the time?)
+ * Enabling PROMISC appears to fix that problem.
+ */
+ rfilt |= AR5K_RX_FILTER_PROM;
+ }
+
+ /* Set filters */
+ ath5k_hw_set_rx_filter(ah, rfilt);
+
+ /* Set multicast bits */
+ ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
+ /* Set the cached hw filter flags, this will later actually
+ * be set in HW */
+ ah->filter_flags = rfilt;
+ /* Store current FIF filter flags */
+ ah->fif_filter_flags = *new_flags;
+
+ mutex_unlock(&ah->lock);
+}
+
+
+static int
+ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_common *common = ath5k_hw_common(ah);
+ int ret = 0;
+
+ if (ath5k_modparam_nohwcrypt)
+ return -EOPNOTSUPP;
+
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
+ return -EOPNOTSUPP;
+
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* don't program group keys when using IBSS_RSN */
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
+ break;
+ return -EOPNOTSUPP;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ah->lock);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mmiowb();
+ mutex_unlock(&ah->lock);
+ return ret;
+}
+
+
+static void
+ath5k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct ath5k_hw *ah = hw->priv;
+ if (!ah->assoc)
+ ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
+}
+
+
+static void
+ath5k_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath5k_hw *ah = hw->priv;
+ ath5k_hw_set_ledstate(ah, ah->assoc ?
+ AR5K_LED_ASSOC : AR5K_LED_INIT);
+}
+
+
+static int
+ath5k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ /* Force update */
+ ath5k_hw_update_mib_counters(ah);
+
+ stats->dot11ACKFailureCount = ah->stats.ack_fail;
+ stats->dot11RTSFailureCount = ah->stats.rts_fail;
+ stats->dot11RTSSuccessCount = ah->stats.rts_ok;
+ stats->dot11FCSErrorCount = ah->stats.fcs_error;
+
+ return 0;
+}
+
+
+static int
+ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ath5k_txq_info qi;
+ int ret = 0;
+
+ if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
+ return 0;
+
+ mutex_lock(&ah->lock);
+
+ ath5k_hw_get_tx_queueprops(ah, queue, &qi);
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cw_min = params->cw_min;
+ qi.tqi_cw_max = params->cw_max;
+ qi.tqi_burst_time = params->txop * 32;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
+ "Configure tx [queue %d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
+ ATH5K_ERR(ah,
+ "Unable to update hardware queue %u!\n", queue);
+ ret = -EIO;
+ } else
+ ath5k_hw_reset_tx_queue(ah, queue);
+
+ mutex_unlock(&ah->lock);
+
+ return ret;
+}
+
+
+static u64
+ath5k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ return ath5k_hw_get_tsf64(ah);
+}
+
+
+static void
+ath5k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ ath5k_hw_set_tsf64(ah, tsf);
+}
+
+
+static void
+ath5k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ /*
+ * in IBSS mode we need to update the beacon timers too.
+ * this will also reset the TSF if we call it with 0
+ */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_beacon_update_timers(ah, 0);
+ else
+ ath5k_hw_reset_tsf(ah);
+}
+
+
+static int
+ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
+{
+ struct ath5k_hw *ah = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ath_cycle_counters *cc = &common->cc_survey;
+ unsigned int div = common->clockrate * 1000;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ spin_lock_bh(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ if (cc->cycles > 0) {
+ ah->survey.time += cc->cycles / div;
+ ah->survey.time_busy += cc->rx_busy / div;
+ ah->survey.time_rx += cc->rx_frame / div;
+ ah->survey.time_tx += cc->tx_frame / div;
+ }
+ memset(cc, 0, sizeof(*cc));
+ spin_unlock_bh(&common->cc_lock);
+
+ memcpy(survey, &ah->survey, sizeof(*survey));
+
+ survey->channel = conf->chandef.chan;
+ survey->noise = ah->ah_noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_IN_USE |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+
+ return 0;
+}
+
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void
+ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ mutex_lock(&ah->lock);
+ ath5k_hw_set_coverage_class(ah, coverage_class);
+ mutex_unlock(&ah->lock);
+}
+
+
+static int
+ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ if (tx_ant == 1 && rx_ant == 1)
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
+ else if (tx_ant == 2 && rx_ant == 2)
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
+ else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ switch (ah->ah_ant_mode) {
+ case AR5K_ANTMODE_FIXED_A:
+ *tx_ant = 1; *rx_ant = 1; break;
+ case AR5K_ANTMODE_FIXED_B:
+ *tx_ant = 2; *rx_ant = 2; break;
+ case AR5K_ANTMODE_DEFAULT:
+ *tx_ant = 3; *rx_ant = 3; break;
+ }
+ return 0;
+}
+
+
+static void ath5k_get_ringparam(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+ struct ath5k_hw *ah = hw->priv;
+
+ *tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
+
+ *tx_max = ATH5K_TXQ_LEN_MAX;
+ *rx = *rx_max = ATH_RXBUF;
+}
+
+
+static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
+{
+ struct ath5k_hw *ah = hw->priv;
+ u16 qnum;
+
+ /* only support setting tx ring size for now */
+ if (rx != ATH_RXBUF)
+ return -EINVAL;
+
+ /* restrict tx ring size min/max */
+ if (!tx || tx > ATH5K_TXQ_LEN_MAX)
+ return -EINVAL;
+
+ for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
+ if (!ah->txqs[qnum].setup)
+ continue;
+ if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
+ ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
+ continue;
+
+ ah->txqs[qnum].txq_max = tx;
+ if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
+ ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
+ }
+
+ return 0;
+}
+
+
+const struct ieee80211_ops ath5k_hw_ops = {
+ .tx = ath5k_tx,
+ .start = ath5k_start,
+ .stop = ath5k_stop,
+ .add_interface = ath5k_add_interface,
+ /* .change_interface = not implemented */
+ .remove_interface = ath5k_remove_interface,
+ .config = ath5k_config,
+ .bss_info_changed = ath5k_bss_info_changed,
+ .prepare_multicast = ath5k_prepare_multicast,
+ .configure_filter = ath5k_configure_filter,
+ /* .set_tim = not implemented */
+ .set_key = ath5k_set_key,
+ /* .update_tkip_key = not implemented */
+ /* .hw_scan = not implemented */
+ .sw_scan_start = ath5k_sw_scan_start,
+ .sw_scan_complete = ath5k_sw_scan_complete,
+ .get_stats = ath5k_get_stats,
+ /* .get_tkip_seq = not implemented */
+ /* .set_frag_threshold = not implemented */
+ /* .set_rts_threshold = not implemented */
+ /* .sta_add = not implemented */
+ /* .sta_remove = not implemented */
+ /* .sta_notify = not implemented */
+ .conf_tx = ath5k_conf_tx,
+ .get_tsf = ath5k_get_tsf,
+ .set_tsf = ath5k_set_tsf,
+ .reset_tsf = ath5k_reset_tsf,
+ /* .tx_last_beacon = not implemented */
+ /* .ampdu_action = not needed */
+ .get_survey = ath5k_get_survey,
+ .set_coverage_class = ath5k_set_coverage_class,
+ /* .rfkill_poll = not implemented */
+ /* .flush = not implemented */
+ /* .channel_switch = not implemented */
+ /* .napi_poll = not implemented */
+ .set_antenna = ath5k_set_antenna,
+ .get_antenna = ath5k_get_antenna,
+ .set_ringparam = ath5k_set_ringparam,
+ .get_ringparam = ath5k_get_ringparam,
+};
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
new file mode 100644
index 000000000..c6156cc38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/nl80211.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include "../ath.h"
+#include "ath5k.h"
+#include "debug.h"
+#include "base.h"
+#include "reg.h"
+
+/* Known PCI ids */
+static const struct pci_device_id ath5k_pci_id_table[] = {
+ { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
+ { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
+ { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
+ { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
+ { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
+ { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
+ { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
+ { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
+ { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
+ { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
+ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
+ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
+ { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
+
+/* return bus cachesize in 4B word units */
+static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
+ u8 u8tmp;
+
+ pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
+ *csz = (int)u8tmp;
+
+ /*
+ * This check was put in to avoid "unpleasant" consequences if
+ * the bootrom has not fully initialized all PCI devices.
+ * Sometimes the cache line size register is not set
+ */
+
+ if (*csz == 0)
+ *csz = L1_CACHE_BYTES >> 2; /* Use the default size */
+}
+
+/*
+ * Read from eeprom
+ */
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
+ u32 status, timeout;
+
+ /*
+ * Initialize EEPROM access
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
+ (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
+ } else {
+ ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
+ AR5K_EEPROM_CMD_READ);
+ }
+
+ for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
+ status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
+ if (status & AR5K_EEPROM_STAT_RDDONE) {
+ if (status & AR5K_EEPROM_STAT_RDERR)
+ return false;
+ *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
+ 0xffff);
+ return true;
+ }
+ usleep_range(15, 20);
+ }
+
+ return false;
+}
+
+int ath5k_hw_read_srev(struct ath5k_hw *ah)
+{
+ ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
+ return 0;
+}
+
+/*
+ * Read the MAC address from eeprom or platform_data
+ */
+static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+{
+ u8 mac_d[ETH_ALEN] = {};
+ u32 total, offset;
+ u16 data;
+ int octet;
+
+ AR5K_EEPROM_READ(0x20, data);
+
+ for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
+ AR5K_EEPROM_READ(offset, data);
+
+ total += data;
+ mac_d[octet + 1] = data & 0xff;
+ mac_d[octet] = data >> 8;
+ octet += 2;
+ }
+
+ if (!total || total == 3 * 0xffff)
+ return -EINVAL;
+
+ memcpy(mac, mac_d, ETH_ALEN);
+
+ return 0;
+}
+
+
+/* Common ath_bus_opts structure */
+static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
+ .read_cachesize = ath5k_pci_read_cachesize,
+ .eeprom_read = ath5k_pci_eeprom_read,
+ .eeprom_read_mac = ath5k_pci_eeprom_read_mac,
+};
+
+/********************\
+* PCI Initialization *
+\********************/
+
+static int
+ath5k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem *mem;
+ struct ath5k_hw *ah;
+ struct ieee80211_hw *hw;
+ int ret;
+ u8 csz;
+
+ /*
+ * L0s needs to be disabled on all ath5k cards.
+ *
+ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+ * by default in the future in 2.6.36) this will also mean both L1 and
+ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
+ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
+ * though but cannot currently undue the effect of a blacklist, for
+ * details you can read pcie_aspm_sanity_check() and see how it adjusts
+ * the device link capability.
+ *
+ * It may be possible in the future to implement some PCI API to allow
+ * drivers to override blacklists for pre 1.1 PCIe but for now it is
+ * best to accept that both L0s and L1 will be disabled completely for
+ * distributions shipping with CONFIG_PCIEASPM rather than having this
+ * issue present. Motivation for adding this new API will be to help
+ * with power consumption for some of these devices.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable device\n");
+ goto err;
+ }
+
+ /* XXX 32-bit addressing only */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "32-bit DMA not available\n");
+ goto err_dis;
+ }
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
+ if (csz == 0) {
+ /*
+ * Linux 2.4.18 (at least) writes the cache line size
+ * register as a 16-bit wide register which is wrong.
+ * We must have this setup properly for rx buffer
+ * DMA to work so force a reasonable value here if it
+ * comes up zero.
+ */
+ csz = L1_CACHE_BYTES >> 2;
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
+ }
+ /*
+ * The default setting of latency timer yields poor results,
+ * set it to the value used by other systems. It may be worth
+ * tweaking this setting more.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
+
+ /* Enable bus mastering */
+ pci_set_master(pdev);
+
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
+ ret = pci_request_region(pdev, 0, "ath5k");
+ if (ret) {
+ dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
+ goto err_dis;
+ }
+
+ mem = pci_iomap(pdev, 0, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "cannot remap PCI memory region\n");
+ ret = -EIO;
+ goto err_reg;
+ }
+
+ /*
+ * Allocate hw (mac80211 main struct)
+ * and hw->priv (driver private data)
+ */
+ hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
+
+ ah = hw->priv;
+ ah->hw = hw;
+ ah->pdev = pdev;
+ ah->dev = &pdev->dev;
+ ah->irq = pdev->irq;
+ ah->devid = id->device;
+ ah->iobase = mem; /* So we can unmap it on detach */
+
+ /* Initialize */
+ ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
+ if (ret)
+ goto err_free;
+
+ /* Set private data */
+ pci_set_drvdata(pdev, hw);
+
+ return 0;
+err_free:
+ ieee80211_free_hw(hw);
+err_map:
+ pci_iounmap(pdev, mem);
+err_reg:
+ pci_release_region(pdev, 0);
+err_dis:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+static void
+ath5k_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_hw *ah = hw->priv;
+
+ ath5k_deinit_ah(ah);
+ pci_iounmap(pdev, ah->iobase);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ ieee80211_free_hw(hw);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ath5k_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_hw *ah = hw->priv;
+
+ ath5k_led_off(ah);
+ return 0;
+}
+
+static int ath5k_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_hw *ah = hw->priv;
+
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
+ ath5k_led_enable(ah);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+#define ATH5K_PM_OPS (&ath5k_pm_ops)
+#else
+#define ATH5K_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct pci_driver ath5k_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ath5k_pci_id_table,
+ .probe = ath5k_pci_probe,
+ .remove = ath5k_pci_remove,
+ .driver.pm = ATH5K_PM_OPS,
+};
+
+module_pci_driver(ath5k_pci_driver);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
new file mode 100644
index 000000000..bf29da5e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -0,0 +1,1010 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
+ * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*********************************\
+* Protocol Control Unit Functions *
+\*********************************/
+
+#include <asm/unaligned.h>
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
+ * AR5212+ can use higher rates for ack transmission
+ * based on current tx rate instead of the base rate.
+ * It does this to better utilize channel usage.
+ * There is a mapping between G rates (that cover both
+ * CCK and OFDM) and ack rates that we use when setting
+ * rate -> duration table. This mapping is hw-based so
+ * don't change anything.
+ *
+ * To enable this functionality we must set
+ * ah->ah_ack_bitrate_high to true else base rate is
+ * used (1Mb for CCK, 6Mb for OFDM).
+ */
+static const unsigned int ack_rates_high[] =
+/* Tx -> ACK */
+/* 1Mb -> 1Mb */ { 0,
+/* 2MB -> 2Mb */ 1,
+/* 5.5Mb -> 2Mb */ 1,
+/* 11Mb -> 2Mb */ 1,
+/* 6Mb -> 6Mb */ 4,
+/* 9Mb -> 6Mb */ 4,
+/* 12Mb -> 12Mb */ 6,
+/* 18Mb -> 12Mb */ 6,
+/* 24Mb -> 24Mb */ 8,
+/* 36Mb -> 24Mb */ 8,
+/* 48Mb -> 24Mb */ 8,
+/* 54Mb -> 24Mb */ 8 };
+
+/*******************\
+* Helper functions *
+\*******************/
+
+/**
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
+ * @ah: The &struct ath5k_hw
+ * @len: Frame's length in bytes
+ * @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
+ *
+ * Calculate tx duration of a frame given it's rate and length
+ * It extends ieee80211_generic_frame_duration for non standard
+ * bwmodes.
+ */
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ int len, struct ieee80211_rate *rate, bool shortpre)
+{
+ int sifs, preamble, plcp_bits, sym_time;
+ int bitrate, bits, symbols, symbol_bits;
+ int dur;
+
+ /* Fallback */
+ if (!ah->ah_bwmode) {
+ __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
+ NULL, band, len, rate);
+
+ /* subtract difference between long and short preamble */
+ dur = le16_to_cpu(raw_dur);
+ if (shortpre)
+ dur -= 96;
+
+ return dur;
+ }
+
+ bitrate = rate->bitrate;
+ preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
+ plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
+ sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
+
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ sifs = AR5K_INIT_SIFS_TURBO;
+ preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ sifs = AR5K_INIT_SIFS_HALF_RATE;
+ preamble *= 2;
+ sym_time *= 2;
+ bitrate = DIV_ROUND_UP(bitrate, 2);
+ break;
+ case AR5K_BWMODE_5MHZ:
+ sifs = AR5K_INIT_SIFS_QUARTER_RATE;
+ preamble *= 4;
+ sym_time *= 4;
+ bitrate = DIV_ROUND_UP(bitrate, 4);
+ break;
+ default:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
+ break;
+ }
+
+ bits = plcp_bits + (len << 3);
+ /* Bit rate is in 100Kbits */
+ symbol_bits = bitrate * sym_time;
+ symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
+
+ dur = sifs + preamble + (sym_time * symbols);
+
+ return dur;
+}
+
+/**
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ unsigned int slot_time;
+
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_TURBO;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
+ break;
+ case AR5K_BWMODE_DEFAULT:
+ default:
+ slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
+ if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot)
+ slot_time = AR5K_INIT_SLOT_TIME_B;
+ break;
+ }
+
+ return slot_time;
+}
+
+/**
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ unsigned int sifs;
+
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ sifs = AR5K_INIT_SIFS_TURBO;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ sifs = AR5K_INIT_SIFS_HALF_RATE;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ sifs = AR5K_INIT_SIFS_QUARTER_RATE;
+ break;
+ case AR5K_BWMODE_DEFAULT:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
+ default:
+ if (channel->band == IEEE80211_BAND_5GHZ)
+ sifs = AR5K_INIT_SIFS_DEFAULT_A;
+ break;
+ }
+
+ return sifs;
+}
+
+/**
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
+ * @ah: The &struct ath5k_hw
+ *
+ * Reads MIB counters from PCU and updates sw statistics. Is called after a
+ * MIB interrupt, because one of these counters might have reached their maximum
+ * and triggered the MIB interrupt, to let us read and clear the counter.
+ *
+ * NOTE: Is called in interrupt context!
+ */
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+{
+ struct ath5k_statistics *stats = &ah->stats;
+
+ /* Read-And-Clear */
+ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+ stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+ stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+ stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+ stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
+}
+
+
+/******************\
+* ACK/CTS Timeouts *
+\******************/
+
+/**
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
+ *
+ * Write the rate code to duration table upon hw reset. This is a helper for
+ * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
+ * the hardware, based on current mode, for each rate. The rates which are
+ * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
+ * different rate code so we write their value twice (one for long preamble
+ * and one for short).
+ *
+ * Note: Band doesn't matter here, if we set the values for OFDM it works
+ * on both a and g modes. So all we have to do is set values for all g rates
+ * that include all OFDM and CCK rates.
+ *
+ */
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+{
+ struct ieee80211_rate *rate;
+ unsigned int i;
+ /* 802.11g covers both OFDM and CCK */
+ u8 band = IEEE80211_BAND_2GHZ;
+
+ /* Write rate duration table */
+ for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
+ u32 reg;
+ u16 tx_time;
+
+ if (ah->ah_ack_bitrate_high)
+ rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
+ /* CCK -> 1Mb */
+ else if (i < 4)
+ rate = &ah->sbands[band].bitrates[0];
+ /* OFDM -> 6Mb */
+ else
+ rate = &ah->sbands[band].bitrates[4];
+
+ /* Set ACK timeout */
+ reg = AR5K_RATE_DUR(rate->hw_value);
+
+ /* An ACK frame consists of 10 bytes. If you add the FCS,
+ * which ieee80211_generic_frame_duration() adds,
+ * its 14 bytes. Note we use the control rate and not the
+ * actual rate for this rate. See mac80211 tx.c
+ * ieee80211_duration() for a brief description of
+ * what rate we should choose to TX ACKs. */
+ tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
+ rate, false);
+
+ ath5k_hw_reg_write(ah, tx_time, reg);
+
+ if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
+ continue;
+
+ tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
+ ath5k_hw_reg_write(ah, tx_time,
+ reg + (AR5K_SET_SHORT_PREAMBLE << 2));
+ }
+}
+
+/**
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
+ * @ah: The &struct ath5k_hw
+ * @timeout: Timeout in usec
+ */
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+{
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
+ return -EINVAL;
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
+ ath5k_hw_htoclock(ah, timeout));
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
+ * @ah: The &struct ath5k_hw
+ * @timeout: Timeout in usec
+ */
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+{
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
+ return -EINVAL;
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
+ ath5k_hw_htoclock(ah, timeout));
+
+ return 0;
+}
+
+
+/*******************\
+* RX filter Control *
+\*******************/
+
+/**
+ * ath5k_hw_set_lladdr() - Set station id
+ * @ah: The &struct ath5k_hw
+ * @mac: The card's mac address (array of octets)
+ *
+ * Set station id on hw using the provided mac address
+ */
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ u32 low_id, high_id;
+ u32 pcu_reg;
+
+ /* Set new station ID */
+ memcpy(common->macaddr, mac, ETH_ALEN);
+
+ pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
+
+ low_id = get_unaligned_le32(mac);
+ high_id = get_unaligned_le16(mac + 4);
+
+ ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
+ ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
+ * @ah: The &struct ath5k_hw
+ *
+ * Sets the current BSSID and BSSID mask we have from the
+ * common struct into the hardware
+ */
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ u16 tim_offset = 0;
+
+ /*
+ * Set BSSID mask on 5212
+ */
+ if (ah->ah_version == AR5K_AR5212)
+ ath_hw_setbssidmask(common);
+
+ /*
+ * Set BSSID
+ */
+ ath5k_hw_reg_write(ah,
+ get_unaligned_le32(common->curbssid),
+ AR5K_BSS_ID0);
+ ath5k_hw_reg_write(ah,
+ get_unaligned_le16(common->curbssid + 4) |
+ ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
+ AR5K_BSS_ID1);
+
+ if (common->curaid == 0) {
+ ath5k_hw_disable_pspoll(ah);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
+ tim_offset ? tim_offset + 4 : 0);
+
+ ath5k_hw_enable_pspoll(ah, NULL, 0);
+}
+
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ /* Cache bssid mask so that we can restore it
+ * on reset */
+ memcpy(common->bssidmask, mask, ETH_ALEN);
+ if (ah->ah_version == AR5K_AR5212)
+ ath_hw_setbssidmask(common);
+}
+
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
+ */
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+{
+ ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
+ ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
+}
+
+/**
+ * ath5k_hw_get_rx_filter() - Get current rx filter
+ * @ah: The &struct ath5k_hw
+ *
+ * Returns the RX filter by reading rx filter and
+ * phy error filter registers. RX filter is used
+ * to set the allowed frame types that PCU will accept
+ * and pass to the driver. For a list of frame types
+ * check out reg.h.
+ */
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+{
+ u32 data, filter = 0;
+
+ filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
+
+ /*Radar detection for 5212*/
+ if (ah->ah_version == AR5K_AR5212) {
+ data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
+
+ if (data & AR5K_PHY_ERR_FIL_RADAR)
+ filter |= AR5K_RX_FILTER_RADARERR;
+ if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
+ filter |= AR5K_RX_FILTER_PHYERR;
+ }
+
+ return filter;
+}
+
+/**
+ * ath5k_hw_set_rx_filter() - Set rx filter
+ * @ah: The &struct ath5k_hw
+ * @filter: RX filter mask (see reg.h)
+ *
+ * Sets RX filter register and also handles PHY error filter
+ * register on 5212 and newer chips so that we have proper PHY
+ * error reporting.
+ */
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+{
+ u32 data = 0;
+
+ /* Set PHY error filter register on 5212*/
+ if (ah->ah_version == AR5K_AR5212) {
+ if (filter & AR5K_RX_FILTER_RADARERR)
+ data |= AR5K_PHY_ERR_FIL_RADAR;
+ if (filter & AR5K_RX_FILTER_PHYERR)
+ data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
+ }
+
+ /*
+ * The AR5210 uses promiscuous mode to detect radar activity
+ */
+ if (ah->ah_version == AR5K_AR5210 &&
+ (filter & AR5K_RX_FILTER_RADARERR)) {
+ filter &= ~AR5K_RX_FILTER_RADARERR;
+ filter |= AR5K_RX_FILTER_PROM;
+ }
+
+ /*Zero length DMA (phy error reporting) */
+ if (data)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
+
+ /*Write RX Filter register*/
+ ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
+
+ /*Write PHY error filter register on 5212*/
+ if (ah->ah_version == AR5K_AR5212)
+ ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
+
+}
+
+
+/****************\
+* Beacon control *
+\****************/
+
+#define ATH5K_MAX_TSF_READ 10
+
+/**
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
+ * @ah: The &struct ath5k_hw
+ *
+ * Returns the current TSF
+ */
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+{
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+ unsigned long flags;
+
+ /* This code is time critical - we don't want to be interrupted here */
+ local_irq_save(flags);
+
+ /*
+ * While reading TSF upper and then lower part, the clock is still
+ * counting (or jumping in case of IBSS merge) so we might get
+ * inconsistent values. To avoid this, we read the upper part again
+ * and check it has not been changed. We make the hypothesis that a
+ * maximum of 3 changes can happens in a row (we use 10 as a safe
+ * value).
+ *
+ * Impact on performance is pretty small, since in most cases, only
+ * 3 register reads are needed.
+ */
+
+ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
+ tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ local_irq_restore(flags);
+
+ WARN_ON(i == ATH5K_MAX_TSF_READ);
+
+ return ((u64)tsf_upper1 << 32) | tsf_lower;
+}
+
+#undef ATH5K_MAX_TSF_READ
+
+/**
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
+ * @ah: The &struct ath5k_hw
+ * @tsf64: The new 64bit TSF
+ *
+ * Sets the new TSF
+ */
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+{
+ ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
+ ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
+}
+
+/**
+ * ath5k_hw_reset_tsf() - Force a TSF reset
+ * @ah: The &struct ath5k_hw
+ *
+ * Forces a TSF reset on PCU
+ */
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+{
+ u32 val;
+
+ val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
+
+ /*
+ * Each write to the RESET_TSF bit toggles a hardware internal
+ * signal to reset TSF, but if left high it will cause a TSF reset
+ * on the next chip reset as well. Thus we always write the value
+ * twice to clear the signal.
+ */
+ ath5k_hw_reg_write(ah, val, AR5K_BEACON);
+ ath5k_hw_reg_write(ah, val, AR5K_BEACON);
+}
+
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
+ */
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+{
+ u32 timer1, timer2, timer3;
+
+ /*
+ * Set the additional timers by mode
+ */
+ switch (ah->opmode) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_STATION:
+ /* In STA mode timer1 is used as next wakeup
+ * timer and timer2 as next CFP duration start
+ * timer. Both in 1/8TUs. */
+ /* TODO: PCF handling */
+ if (ah->ah_version == AR5K_AR5210) {
+ timer1 = 0xffffffff;
+ timer2 = 0xffffffff;
+ } else {
+ timer1 = 0x0000ffff;
+ timer2 = 0x0007ffff;
+ }
+ /* Mark associated AP as PCF incapable for now */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
+ default:
+ /* On non-STA modes timer1 is used as next DMA
+ * beacon alert (DBA) timer and timer2 as next
+ * software beacon alert. Both in 1/8TUs. */
+ timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
+ timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
+ break;
+ }
+
+ /* Timer3 marks the end of our ATIM window
+ * a zero length window is not allowed because
+ * we 'll get no beacons */
+ timer3 = next_beacon + 1;
+
+ /*
+ * Set the beacon register and enable all timers.
+ */
+ /* When in AP or Mesh Point mode zero timer0 to start TSF */
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT)
+ ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
+
+ ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
+ ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
+ ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
+ ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
+
+ /* Force a TSF reset if requested and enable beacons */
+ if (interval & AR5K_BEACON_RESET_TSF)
+ ath5k_hw_reset_tsf(ah);
+
+ ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
+ AR5K_BEACON_ENABLE),
+ AR5K_BEACON);
+
+ /* Flush any pending BMISS interrupts on ISR by
+ * performing a clear-on-write operation on PISR
+ * register for the BMISS bit (writing a bit on
+ * ISR toggles a reset for that bit and leaves
+ * the remaining bits intact) */
+ if (ah->ah_version == AR5K_AR5210)
+ ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
+ else
+ ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
+
+ /* TODO: Set enhanced sleep registers on AR5212
+ * based on vif->bss_conf params, until then
+ * disable power save reporting.*/
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
+
+}
+
+/**
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
+ * @a: timer a (before b)
+ * @b: timer b (after a)
+ * @window: difference between a and b
+ * @intval: timers are increased by this interval
+ *
+ * This helper function checks if timer B is timer A + window and covers
+ * cases where timer A or B might have already been updated or wrapped
+ * around (Timers are 16 bit).
+ *
+ * Returns true if O.K.
+ */
+static inline bool
+ath5k_check_timer_win(int a, int b, int window, int intval)
+{
+ /*
+ * 1.) usually B should be A + window
+ * 2.) A already updated, B not updated yet
+ * 3.) A already updated and has wrapped around
+ * 4.) B has wrapped around
+ */
+ if ((b - a == window) || /* 1.) */
+ (a - b == intval - window) || /* 2.) */
+ ((a | 0x10000) - b == intval - window) || /* 3.) */
+ ((b | 0x10000) - a == window)) /* 4.) */
+ return true; /* O.K. */
+ return false;
+}
+
+/**
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
+ * @ah: The &struct ath5k_hw
+ * @intval: beacon interval
+ *
+ * This is a workaround for IBSS mode
+ *
+ * The need for this function arises from the fact that we have 4 separate
+ * HW timer registers (TIMER0 - TIMER3), which are closely related to the
+ * next beacon target time (NBTT), and that the HW updates these timers
+ * separately based on the current TSF value. The hardware increments each
+ * timer by the beacon interval, when the local TSF converted to TU is equal
+ * to the value stored in the timer.
+ *
+ * The reception of a beacon with the same BSSID can update the local HW TSF
+ * at any time - this is something we can't avoid. If the TSF jumps to a
+ * time which is later than the time stored in a timer, this timer will not
+ * be updated until the TSF in TU wraps around at 16 bit (the size of the
+ * timers) and reaches the time which is stored in the timer.
+ *
+ * The problem is that these timers are closely related to TIMER0 (NBTT) and
+ * that they define a time "window". When the TSF jumps between two timers
+ * (e.g. ATIM and NBTT), the one in the past will be left behind (not
+ * updated), while the one in the future will be updated every beacon
+ * interval. This causes the window to get larger, until the TSF wraps
+ * around as described above and the timer which was left behind gets
+ * updated again. But - because the beacon interval is usually not an exact
+ * divisor of the size of the timers (16 bit), an unwanted "window" between
+ * these timers has developed!
+ *
+ * This is especially important with the ATIM window, because during
+ * the ATIM window only ATIM frames and no data frames are allowed to be
+ * sent, which creates transmission pauses after each beacon. This symptom
+ * has been described as "ramping ping" because ping times increase linearly
+ * for some time and then drop down again. A wrong window on the DMA beacon
+ * timer has the same effect, so we check for these two conditions.
+ *
+ * Returns true if O.K.
+ */
+bool
+ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
+{
+ unsigned int nbtt, atim, dma;
+
+ nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0);
+ atim = ath5k_hw_reg_read(ah, AR5K_TIMER3);
+ dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
+
+ /* NOTE: SWBA is different. Having a wrong window there does not
+ * stop us from sending data and this condition is caught by
+ * other means (SWBA interrupt) */
+
+ if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
+ ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP,
+ intval))
+ return true; /* O.K. */
+ return false;
+}
+
+/**
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
+ */
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
+
+/***************************\
+* Init/Start/Stop functions *
+\***************************/
+
+/**
+ * ath5k_hw_start_rx_pcu() - Start RX engine
+ * @ah: The &struct ath5k_hw
+ *
+ * Starts RX engine on PCU so that hw can process RXed frames
+ * (ACK etc).
+ *
+ * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
+ */
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+{
+ AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/**
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
+ * @ah: The &struct ath5k_hw
+ *
+ * Stops RX engine on PCU
+ */
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+{
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/**
+ * ath5k_hw_set_opmode() - Set PCU operating mode
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ *
+ * Configure PCU for the various operating modes (AP/STA etc)
+ */
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ u32 pcu_reg, beacon_reg, low_id, high_id;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
+
+ /* Preserve rest settings */
+ pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
+ pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
+ | AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
+
+ beacon_reg = 0;
+
+ switch (op_mode) {
+ case NL80211_IFTYPE_ADHOC:
+ pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
+ beacon_reg |= AR5K_BCR_ADHOC;
+ if (ah->ah_version == AR5K_AR5210)
+ pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
+ break;
+
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
+ beacon_reg |= AR5K_BCR_AP;
+ if (ah->ah_version == AR5K_AR5210)
+ pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ AR5K_STA_ID1_PWR_SV : 0);
+ /* fall through */
+ case NL80211_IFTYPE_MONITOR:
+ pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ AR5K_STA_ID1_NO_PSPOLL : 0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Set PCU registers
+ */
+ low_id = get_unaligned_le32(common->macaddr);
+ high_id = get_unaligned_le16(common->macaddr + 4);
+ ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
+ ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
+
+ /*
+ * Set Beacon Control Register on 5210
+ */
+ if (ah->ah_version == AR5K_AR5210)
+ ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+{
+ /* Set bssid and bssid mask */
+ ath5k_hw_set_bssid(ah);
+
+ /* Set PCU config */
+ ath5k_hw_set_opmode(ah, op_mode);
+
+ /* Write rate duration table only on AR5212 and if
+ * virtual interface has already been brought up
+ * XXX: rethink this after new mode changes to
+ * mac80211 are integrated */
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->nvifs)
+ ath5k_hw_write_rate_duration(ah);
+
+ /* Set RSSI/BRSSI thresholds
+ *
+ * Note: If we decide to set this value
+ * dynamically, have in mind that when AR5K_RSSI_THR
+ * register is read it might return 0x40 if we haven't
+ * wrote anything to it plus BMISS RSSI threshold is zeroed.
+ * So doing a save/restore procedure here isn't the right
+ * choice. Instead store it on ath5k_hw */
+ ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
+ AR5K_TUNE_BMISS_THRES <<
+ AR5K_RSSI_THR_BMISS_S),
+ AR5K_RSSI_THR);
+
+ /* MIC QoS support */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
+ ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
+ ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
+ }
+
+ /* QoS NOACK Policy */
+ if (ah->ah_version == AR5K_AR5212) {
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
+ AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
+ AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
+ AR5K_QOS_NOACK);
+ }
+
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
+ /* Set ACK bitrate mode (see ack_rates_high) */
+ if (ah->ah_version == AR5K_AR5212) {
+ u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
+ if (ah->ah_ack_bitrate_high)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
+ }
+ return;
+}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
new file mode 100644
index 000000000..0fce1c766
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -0,0 +1,3965 @@
+/*
+ * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2008-2009 Felix Fietkau <nbd@openwrt.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/***********************\
+* PHY related functions *
+\***********************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "ath5k.h"
+#include "reg.h"
+#include "rfbuffer.h"
+#include "rfgain.h"
+#include "../regd.h"
+
+
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
+/******************\
+* Helper functions *
+\******************/
+
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
+ */
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+{
+ unsigned int i;
+ u32 srev;
+ u16 ret;
+
+ /*
+ * Set the radio chip access register
+ */
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
+ break;
+ case IEEE80211_BAND_5GHZ:
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+ break;
+ default:
+ return 0;
+ }
+
+ usleep_range(2000, 2500);
+
+ /* ...wait until PHY is ready and read the selected radio revision */
+ ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
+
+ for (i = 0; i < 8; i++)
+ ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
+
+ if (ah->ah_version == AR5K_AR5210) {
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
+ ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
+ } else {
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
+ ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
+ ((srev & 0x0f) << 4), 8);
+ }
+
+ /* Reset to the 5GHz mode */
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+
+ return ret;
+}
+
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
+ */
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+{
+ u16 freq = channel->center_freq;
+
+ /* Check if the channel is in our supported range */
+ if (channel->band == IEEE80211_BAND_2GHZ) {
+ if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
+ (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
+ return true;
+ } else if (channel->band == IEEE80211_BAND_5GHZ)
+ if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
+ (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
+ return true;
+
+ return false;
+}
+
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u8 refclk_freq;
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2413) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ refclk_freq = 40;
+ else
+ refclk_freq = 32;
+
+ if ((channel->center_freq % refclk_freq != 0) &&
+ ((channel->center_freq % refclk_freq < 10) ||
+ (channel->center_freq % refclk_freq > 22)))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
+ */
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
+ u32 val, u8 reg_id, bool set)
+{
+ const struct ath5k_rf_reg *rfreg = NULL;
+ u8 offset, bank, num_bits, col, position;
+ u16 entry;
+ u32 mask, data, last_bit, bits_shifted, first_bit;
+ u32 *rfb;
+ s32 bits_left;
+ int i;
+
+ data = 0;
+ rfb = ah->ah_rf_banks;
+
+ for (i = 0; i < ah->ah_rf_regs_count; i++) {
+ if (rf_regs[i].index == reg_id) {
+ rfreg = &rf_regs[i];
+ break;
+ }
+ }
+
+ if (rfb == NULL || rfreg == NULL) {
+ ATH5K_PRINTF("Rf register not found!\n");
+ /* should not happen */
+ return 0;
+ }
+
+ bank = rfreg->bank;
+ num_bits = rfreg->field.len;
+ first_bit = rfreg->field.pos;
+ col = rfreg->field.col;
+
+ /* first_bit is an offset from bank's
+ * start. Since we have all banks on
+ * the same array, we use this offset
+ * to mark each bank's start */
+ offset = ah->ah_offset[bank];
+
+ /* Boundary check */
+ if (!(col <= 3 && num_bits <= 32 && first_bit + num_bits <= 319)) {
+ ATH5K_PRINTF("invalid values at offset %u\n", offset);
+ return 0;
+ }
+
+ entry = ((first_bit - 1) / 8) + offset;
+ position = (first_bit - 1) % 8;
+
+ if (set)
+ data = ath5k_hw_bitswap(val, num_bits);
+
+ for (bits_shifted = 0, bits_left = num_bits; bits_left > 0;
+ position = 0, entry++) {
+
+ last_bit = (position + bits_left > 8) ? 8 :
+ position + bits_left;
+
+ mask = (((1 << last_bit) - 1) ^ ((1 << position) - 1)) <<
+ (col * 8);
+
+ if (set) {
+ rfb[entry] &= ~mask;
+ rfb[entry] |= ((data << position) << (col * 8)) & mask;
+ data >>= (8 - position);
+ } else {
+ data |= (((rfb[entry] & mask) >> (col * 8)) >> position)
+ << bits_shifted;
+ bits_shifted += last_bit - position;
+ }
+
+ bits_left -= 8 - position;
+ }
+
+ data = set ? 1 : ath5k_hw_bitswap(data, num_bits);
+
+ return data;
+}
+
+/**
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
+ * @ah: the &struct ath5k_hw
+ * @channel: the currently set channel upon reset
+ *
+ * Write the delta slope coefficient (used on pilot tracking ?) for OFDM
+ * operation on the AR5212 upon reset. This is a helper for ath5k_hw_phy_init.
+ *
+ * Since delta slope is floating point we split it on its exponent and
+ * mantissa and provide these values on hw.
+ *
+ * For more infos i think this patent is related
+ * "http://www.freepatentsonline.com/7184495.html"
+ */
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /* Get exponent and mantissa and set it */
+ u32 coef_scaled, coef_exp, coef_man,
+ ds_coef_exp, ds_coef_man, clock;
+
+ BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
+ (channel->hw_value == AR5K_MODE_11B));
+
+ /* Get coefficient
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
+ * we scale coef by shifting clock value by 24 for
+ * better precision since we use integers */
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ clock = 40 * 2;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ clock = 40 / 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ clock = 40 / 4;
+ break;
+ default:
+ clock = 40;
+ break;
+ }
+ coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
+
+ /* Get exponent
+ * ALGO: coef_exp = 14 - highest set bit position */
+ coef_exp = ilog2(coef_scaled);
+
+ /* Doesn't make sense if it's zero*/
+ if (!coef_scaled || !coef_exp)
+ return -EINVAL;
+
+ /* Note: we've shifted coef_scaled by 24 */
+ coef_exp = 14 - (coef_exp - 24);
+
+
+ /* Get mantissa (significant digits)
+ * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
+ coef_man = coef_scaled +
+ (1 << (24 - coef_exp - 1));
+
+ /* Calculate delta slope coefficient exponent
+ * and mantissa (remove scaling) and set them on hw */
+ ds_coef_man = coef_man >> (24 - coef_exp);
+ ds_coef_exp = coef_exp - 16;
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+ AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+ AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
+int ath5k_hw_phy_disable(struct ath5k_hw *ah)
+{
+ /*Just a try M.F.*/
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value == AR5K_MODE_11B) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ usleep_range(100 + delay, 100 + (2 * delay));
+ } else {
+ usleep_range(1000, 1500);
+ }
+}
+
+
+/**********************\
+* RF Gain optimization *
+\**********************/
+
+/**
+ * DOC: RF Gain optimization
+ *
+ * This code is used to optimize RF gain on different environments
+ * (temperature mostly) based on feedback from a power detector.
+ *
+ * It's only used on RF5111 and RF5112, later RF chips seem to have
+ * auto adjustment on hw -notice they have a much smaller BANK 7 and
+ * no gain optimization ladder-.
+ *
+ * For more infos check out this patent doc
+ * "http://www.freepatentsonline.com/7400691.html"
+ *
+ * This paper describes power drops as seen on the receiver due to
+ * probe packets
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
+ *
+ * And this is the MadWiFi bug entry related to the above
+ * "http://madwifi-project.org/ticket/1659"
+ * with various measurements and diagrams
+ */
+
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
+{
+ /* Initialize the gain optimization values */
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+ ah->ah_gain.g_step_idx = rfgain_opt_5111.go_default;
+ ah->ah_gain.g_low = 20;
+ ah->ah_gain.g_high = 35;
+ ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
+ break;
+ case AR5K_RF5112:
+ ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default;
+ ah->ah_gain.g_low = 20;
+ ah->ah_gain.g_high = 85;
+ ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
+ * That means our next packet is going to be sent with lower
+ * tx power and a Peak to Average Power Detector (PAPD) will try
+ * to measure the gain.
+ *
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
+ * just after we enable the probe so that we don't mess with
+ * standard traffic.
+ */
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+{
+
+ /* Skip if gain calibration is inactive or
+ * we already handle a probe request */
+ if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
+ return;
+
+ /* Send the packet with 2dB below max power as
+ * patent doc suggest */
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_ofdm - 4,
+ AR5K_PHY_PAPD_PROBE_TXPOWER) |
+ AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
+
+ ah->ah_gain.g_state = AR5K_RFGAIN_READ_REQUESTED;
+
+}
+
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+{
+ u32 mix, step;
+ u32 *rf;
+ const struct ath5k_gain_opt *go;
+ const struct ath5k_gain_opt_step *g_step;
+ const struct ath5k_rf_reg *rf_regs;
+
+ /* Only RF5112 Rev. 2 supports it */
+ if ((ah->ah_radio != AR5K_RF5112) ||
+ (ah->ah_radio_5ghz_revision <= AR5K_SREV_RAD_5112A))
+ return 0;
+
+ go = &rfgain_opt_5112;
+ rf_regs = rf_regs_5112a;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
+
+ g_step = &go->go_step[ah->ah_gain.g_step_idx];
+
+ if (ah->ah_rf_banks == NULL)
+ return 0;
+
+ rf = ah->ah_rf_banks;
+ ah->ah_gain.g_f_corr = 0;
+
+ /* No VGA (Variable Gain Amplifier) override, skip */
+ if (ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR, false) != 1)
+ return 0;
+
+ /* Mix gain stepping */
+ step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXGAIN_STEP, false);
+
+ /* Mix gain override */
+ mix = g_step->gos_param[0];
+
+ switch (mix) {
+ case 3:
+ ah->ah_gain.g_f_corr = step * 2;
+ break;
+ case 2:
+ ah->ah_gain.g_f_corr = (step - 5) * 2;
+ break;
+ case 1:
+ ah->ah_gain.g_f_corr = step;
+ break;
+ default:
+ ah->ah_gain.g_f_corr = 0;
+ break;
+ }
+
+ return ah->ah_gain.g_f_corr;
+}
+
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
+ * power detector windows. If we get a measurement outside range
+ * we know it's not accurate (detectors can't measure anything outside
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+{
+ const struct ath5k_rf_reg *rf_regs;
+ u32 step, mix_ovr, level[4];
+ u32 *rf;
+
+ if (ah->ah_rf_banks == NULL)
+ return false;
+
+ rf = ah->ah_rf_banks;
+
+ if (ah->ah_radio == AR5K_RF5111) {
+
+ rf_regs = rf_regs_5111;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
+
+ step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_RFGAIN_STEP,
+ false);
+
+ level[0] = 0;
+ level[1] = (step == 63) ? 50 : step + 4;
+ level[2] = (step != 63) ? 64 : level[0];
+ level[3] = level[2] + 50;
+
+ ah->ah_gain.g_high = level[3] -
+ (step == 63 ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5);
+ ah->ah_gain.g_low = level[0] +
+ (step == 63 ? AR5K_GAIN_DYN_ADJUST_LO_MARGIN : 0);
+ } else {
+
+ rf_regs = rf_regs_5112;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
+
+ mix_ovr = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR,
+ false);
+
+ level[0] = level[2] = 0;
+
+ if (mix_ovr == 1) {
+ level[1] = level[3] = 83;
+ } else {
+ level[1] = level[3] = 107;
+ ah->ah_gain.g_high = 55;
+ }
+ }
+
+ return (ah->ah_gain.g_current >= level[0] &&
+ ah->ah_gain.g_current <= level[1]) ||
+ (ah->ah_gain.g_current >= level[2] &&
+ ah->ah_gain.g_current <= level[3]);
+}
+
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+{
+ const struct ath5k_gain_opt *go;
+ const struct ath5k_gain_opt_step *g_step;
+ int ret = 0;
+
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+ go = &rfgain_opt_5111;
+ break;
+ case AR5K_RF5112:
+ go = &rfgain_opt_5112;
+ break;
+ default:
+ return 0;
+ }
+
+ g_step = &go->go_step[ah->ah_gain.g_step_idx];
+
+ if (ah->ah_gain.g_current >= ah->ah_gain.g_high) {
+
+ /* Reached maximum */
+ if (ah->ah_gain.g_step_idx == 0)
+ return -1;
+
+ for (ah->ah_gain.g_target = ah->ah_gain.g_current;
+ ah->ah_gain.g_target >= ah->ah_gain.g_high &&
+ ah->ah_gain.g_step_idx > 0;
+ g_step = &go->go_step[ah->ah_gain.g_step_idx])
+ ah->ah_gain.g_target -= 2 *
+ (go->go_step[--(ah->ah_gain.g_step_idx)].gos_gain -
+ g_step->gos_gain);
+
+ ret = 1;
+ goto done;
+ }
+
+ if (ah->ah_gain.g_current <= ah->ah_gain.g_low) {
+
+ /* Reached minimum */
+ if (ah->ah_gain.g_step_idx == (go->go_steps_count - 1))
+ return -2;
+
+ for (ah->ah_gain.g_target = ah->ah_gain.g_current;
+ ah->ah_gain.g_target <= ah->ah_gain.g_low &&
+ ah->ah_gain.g_step_idx < go->go_steps_count - 1;
+ g_step = &go->go_step[ah->ah_gain.g_step_idx])
+ ah->ah_gain.g_target -= 2 *
+ (go->go_step[++ah->ah_gain.g_step_idx].gos_gain -
+ g_step->gos_gain);
+
+ ret = 2;
+ goto done;
+ }
+
+done:
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "ret %d, gain step %u, current gain %u, target gain %u\n",
+ ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current,
+ ah->ah_gain.g_target);
+
+ return ret;
+}
+
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
+ * Check for a new gain reading and schedule an adjustment
+ * if needed.
+ *
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+{
+ u32 data, type;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+
+ if (ah->ah_rf_banks == NULL ||
+ ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
+ return AR5K_RFGAIN_INACTIVE;
+
+ /* No check requested, either engine is inactive
+ * or an adjustment is already requested */
+ if (ah->ah_gain.g_state != AR5K_RFGAIN_READ_REQUESTED)
+ goto done;
+
+ /* Read the PAPD (Peak to Average Power Detector)
+ * register */
+ data = ath5k_hw_reg_read(ah, AR5K_PHY_PAPD_PROBE);
+
+ /* No probe is scheduled, read gain_F measurement */
+ if (!(data & AR5K_PHY_PAPD_PROBE_TX_NEXT)) {
+ ah->ah_gain.g_current = data >> AR5K_PHY_PAPD_PROBE_GAINF_S;
+ type = AR5K_REG_MS(data, AR5K_PHY_PAPD_PROBE_TYPE);
+
+ /* If tx packet is CCK correct the gain_F measurement
+ * by cck ofdm gain delta */
+ if (type == AR5K_PHY_PAPD_PROBE_TYPE_CCK) {
+ if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A)
+ ah->ah_gain.g_current +=
+ ee->ee_cck_ofdm_gain_delta;
+ else
+ ah->ah_gain.g_current +=
+ AR5K_GAIN_CCK_PROBE_CORR;
+ }
+
+ /* Further correct gain_F measurement for
+ * RF5112A radios */
+ if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
+ ath5k_hw_rf_gainf_corr(ah);
+ ah->ah_gain.g_current =
+ ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
+ (ah->ah_gain.g_current - ah->ah_gain.g_f_corr) :
+ 0;
+ }
+
+ /* Check if measurement is ok and if we need
+ * to adjust gain, schedule a gain adjustment,
+ * else switch back to the active state */
+ if (ath5k_hw_rf_check_gainf_readback(ah) &&
+ AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) &&
+ ath5k_hw_rf_gainf_adjust(ah)) {
+ ah->ah_gain.g_state = AR5K_RFGAIN_NEED_CHANGE;
+ } else {
+ ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
+ }
+ }
+
+done:
+ return ah->ah_gain.g_state;
+}
+
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+{
+ const struct ath5k_ini_rfgain *ath5k_rfg;
+ unsigned int i, size, index;
+
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+ ath5k_rfg = rfgain_5111;
+ size = ARRAY_SIZE(rfgain_5111);
+ break;
+ case AR5K_RF5112:
+ ath5k_rfg = rfgain_5112;
+ size = ARRAY_SIZE(rfgain_5112);
+ break;
+ case AR5K_RF2413:
+ ath5k_rfg = rfgain_2413;
+ size = ARRAY_SIZE(rfgain_2413);
+ break;
+ case AR5K_RF2316:
+ ath5k_rfg = rfgain_2316;
+ size = ARRAY_SIZE(rfgain_2316);
+ break;
+ case AR5K_RF5413:
+ ath5k_rfg = rfgain_5413;
+ size = ARRAY_SIZE(rfgain_5413);
+ break;
+ case AR5K_RF2317:
+ case AR5K_RF2425:
+ ath5k_rfg = rfgain_2425;
+ size = ARRAY_SIZE(rfgain_2425);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
+
+ for (i = 0; i < size; i++) {
+ AR5K_REG_WAIT(i);
+ ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[index],
+ (u32)ath5k_rfg[i].rfg_register);
+ }
+
+ return 0;
+}
+
+
+/********************\
+* RF Registers setup *
+\********************/
+
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
+ */
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode)
+{
+ const struct ath5k_rf_reg *rf_regs;
+ const struct ath5k_ini_rfbuffer *ini_rfb;
+ const struct ath5k_gain_opt *go = NULL;
+ const struct ath5k_gain_opt_step *g_step;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u8 ee_mode = 0;
+ u32 *rfb;
+ int i, obdb = -1, bank = -1;
+
+ switch (ah->ah_radio) {
+ case AR5K_RF5111:
+ rf_regs = rf_regs_5111;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
+ ini_rfb = rfb_5111;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5111);
+ go = &rfgain_opt_5111;
+ break;
+ case AR5K_RF5112:
+ if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
+ rf_regs = rf_regs_5112a;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
+ ini_rfb = rfb_5112a;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112a);
+ } else {
+ rf_regs = rf_regs_5112;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
+ ini_rfb = rfb_5112;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112);
+ }
+ go = &rfgain_opt_5112;
+ break;
+ case AR5K_RF2413:
+ rf_regs = rf_regs_2413;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2413);
+ ini_rfb = rfb_2413;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2413);
+ break;
+ case AR5K_RF2316:
+ rf_regs = rf_regs_2316;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2316);
+ ini_rfb = rfb_2316;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2316);
+ break;
+ case AR5K_RF5413:
+ rf_regs = rf_regs_5413;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5413);
+ ini_rfb = rfb_5413;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5413);
+ break;
+ case AR5K_RF2317:
+ rf_regs = rf_regs_2425;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
+ ini_rfb = rfb_2317;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2317);
+ break;
+ case AR5K_RF2425:
+ rf_regs = rf_regs_2425;
+ ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
+ if (ah->ah_mac_srev < AR5K_SREV_AR2417) {
+ ini_rfb = rfb_2425;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2425);
+ } else {
+ ini_rfb = rfb_2417;
+ ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2417);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* If it's the first time we set RF buffer, allocate
+ * ah->ah_rf_banks based on ah->ah_rf_banks_size
+ * we set above */
+ if (ah->ah_rf_banks == NULL) {
+ ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
+ GFP_KERNEL);
+ if (ah->ah_rf_banks == NULL) {
+ ATH5K_ERR(ah, "out of memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Copy values to modify them */
+ rfb = ah->ah_rf_banks;
+
+ for (i = 0; i < ah->ah_rf_banks_size; i++) {
+ if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
+ ATH5K_ERR(ah, "invalid bank\n");
+ return -EINVAL;
+ }
+
+ /* Bank changed, write down the offset */
+ if (bank != ini_rfb[i].rfb_bank) {
+ bank = ini_rfb[i].rfb_bank;
+ ah->ah_offset[bank] = i;
+ }
+
+ rfb[i] = ini_rfb[i].rfb_mode_data[mode];
+ }
+
+ /* Set Output and Driver bias current (OB/DB) */
+ if (channel->band == IEEE80211_BAND_2GHZ) {
+
+ if (channel->hw_value == AR5K_MODE_11B)
+ ee_mode = AR5K_EEPROM_MODE_11B;
+ else
+ ee_mode = AR5K_EEPROM_MODE_11G;
+
+ /* For RF511X/RF211X combination we
+ * use b_OB and b_DB parameters stored
+ * in eeprom on ee->ee_ob[ee_mode][0]
+ *
+ * For all other chips we use OB/DB for 2GHz
+ * stored in the b/g modal section just like
+ * 802.11a on ee->ee_ob[ee_mode][1] */
+ if ((ah->ah_radio == AR5K_RF5111) ||
+ (ah->ah_radio == AR5K_RF5112))
+ obdb = 0;
+ else
+ obdb = 1;
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
+ AR5K_RF_OB_2GHZ, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
+ AR5K_RF_DB_2GHZ, true);
+
+ /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
+ } else if ((channel->band == IEEE80211_BAND_5GHZ) ||
+ (ah->ah_radio == AR5K_RF5111)) {
+
+ /* For 11a, Turbo and XR we need to choose
+ * OB/DB based on frequency range */
+ ee_mode = AR5K_EEPROM_MODE_11A;
+ obdb = channel->center_freq >= 5725 ? 3 :
+ (channel->center_freq >= 5500 ? 2 :
+ (channel->center_freq >= 5260 ? 1 :
+ (channel->center_freq > 4000 ? 0 : -1)));
+
+ if (obdb < 0)
+ return -EINVAL;
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
+ AR5K_RF_OB_5GHZ, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
+ AR5K_RF_DB_5GHZ, true);
+ }
+
+ g_step = &go->go_step[ah->ah_gain.g_step_idx];
+
+ /* Set turbo mode (N/A on RF5413) */
+ if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
+ (ah->ah_radio != AR5K_RF5413))
+ ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_TURBO, false);
+
+ /* Bank Modifications (chip-specific) */
+ if (ah->ah_radio == AR5K_RF5111) {
+
+ /* Set gain_F settings according to current step */
+ if (channel->hw_value != AR5K_MODE_11B) {
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
+ AR5K_PHY_FRAME_CTL_TX_CLIP,
+ g_step->gos_param[0]);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
+ AR5K_RF_PWD_90, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
+ AR5K_RF_PWD_84, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
+ AR5K_RF_RFGAIN_SEL, true);
+
+ /* We programmed gain_F parameters, switch back
+ * to active state */
+ ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
+
+ }
+
+ /* Bank 6/7 setup */
+
+ ath5k_hw_rfb_op(ah, rf_regs, !ee->ee_xpd[ee_mode],
+ AR5K_RF_PWD_XPD, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_x_gain[ee_mode],
+ AR5K_RF_XPD_GAIN, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
+ AR5K_RF_GAIN_I, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
+ AR5K_RF_PLO_SEL, true);
+
+ /* Tweak power detectors for half/quarter rate support */
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
+ ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
+ u8 wait_i;
+
+ ath5k_hw_rfb_op(ah, rf_regs, 0x1f,
+ AR5K_RF_WAIT_S, true);
+
+ wait_i = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
+ 0x1f : 0x10;
+
+ ath5k_hw_rfb_op(ah, rf_regs, wait_i,
+ AR5K_RF_WAIT_I, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 3,
+ AR5K_RF_MAX_TIME, true);
+
+ }
+ }
+
+ if (ah->ah_radio == AR5K_RF5112) {
+
+ /* Set gain_F settings according to current step */
+ if (channel->hw_value != AR5K_MODE_11B) {
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[0],
+ AR5K_RF_MIXGAIN_OVR, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
+ AR5K_RF_PWD_138, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
+ AR5K_RF_PWD_137, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
+ AR5K_RF_PWD_136, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[4],
+ AR5K_RF_PWD_132, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[5],
+ AR5K_RF_PWD_131, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[6],
+ AR5K_RF_PWD_130, true);
+
+ /* We programmed gain_F parameters, switch back
+ * to active state */
+ ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
+ }
+
+ /* Bank 6/7 setup */
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
+ AR5K_RF_XPD_SEL, true);
+
+ if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
+ /* Rev. 1 supports only one xpd */
+ ath5k_hw_rfb_op(ah, rf_regs,
+ ee->ee_x_gain[ee_mode],
+ AR5K_RF_XPD_GAIN, true);
+
+ } else {
+ u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
+ if (ee->ee_pd_gains[ee_mode] > 1) {
+ ath5k_hw_rfb_op(ah, rf_regs,
+ pdg_curve_to_idx[0],
+ AR5K_RF_PD_GAIN_LO, true);
+ ath5k_hw_rfb_op(ah, rf_regs,
+ pdg_curve_to_idx[1],
+ AR5K_RF_PD_GAIN_HI, true);
+ } else {
+ ath5k_hw_rfb_op(ah, rf_regs,
+ pdg_curve_to_idx[0],
+ AR5K_RF_PD_GAIN_LO, true);
+ ath5k_hw_rfb_op(ah, rf_regs,
+ pdg_curve_to_idx[0],
+ AR5K_RF_PD_GAIN_HI, true);
+ }
+
+ /* Lower synth voltage on Rev 2 */
+ if (ah->ah_radio == AR5K_RF5112 &&
+ (ah->ah_radio_5ghz_revision & AR5K_SREV_REV) > 0) {
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_HIGH_VC_CP, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_MID_VC_CP, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_LOW_VC_CP, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_PUSH_UP, true);
+ }
+
+ /* Decrease power consumption on 5213+ BaseBand */
+ if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
+ ath5k_hw_rfb_op(ah, rf_regs, 1,
+ AR5K_RF_PAD2GND, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 1,
+ AR5K_RF_XB2_LVL, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 1,
+ AR5K_RF_XB5_LVL, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 1,
+ AR5K_RF_PWD_167, true);
+
+ ath5k_hw_rfb_op(ah, rf_regs, 1,
+ AR5K_RF_PWD_166, true);
+ }
+ }
+
+ ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
+ AR5K_RF_GAIN_I, true);
+
+ /* Tweak power detector for half/quarter rates */
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
+ ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
+ u8 pd_delay;
+
+ pd_delay = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
+ 0xf : 0x8;
+
+ ath5k_hw_rfb_op(ah, rf_regs, pd_delay,
+ AR5K_RF_PD_PERIOD_A, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 0xf,
+ AR5K_RF_PD_DELAY_A, true);
+
+ }
+ }
+
+ if (ah->ah_radio == AR5K_RF5413 &&
+ channel->band == IEEE80211_BAND_2GHZ) {
+
+ ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
+ true);
+
+ /* Set optimum value for early revisions (on pci-e chips) */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
+ ah->ah_mac_srev < AR5K_SREV_AR5413)
+ ath5k_hw_rfb_op(ah, rf_regs, ath5k_hw_bitswap(6, 3),
+ AR5K_RF_PWD_ICLOBUF_2G, true);
+
+ }
+
+ /* Write RF banks on hw */
+ for (i = 0; i < ah->ah_rf_banks_size; i++) {
+ AR5K_REG_WAIT(i);
+ ath5k_hw_reg_write(ah, rfb[i], ini_rfb[i].rfb_ctrl_register);
+ }
+
+ return 0;
+}
+
+
+/**************************\
+ PHY/RF channel functions
+\**************************/
+
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
+ */
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+{
+ u32 athchan;
+
+ athchan = (ath5k_hw_bitswap(
+ (ieee80211_frequency_to_channel(
+ channel->center_freq) - 24) / 2, 5)
+ << 1) | (1 << 6) | 0x1;
+ return athchan;
+}
+
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u32 data;
+
+ /*
+ * Set the channel and wait
+ */
+ data = ath5k_hw_rf5110_chan2athchan(channel);
+ ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
+ ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
+ usleep_range(1000, 1500);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
+ */
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+ struct ath5k_athchan_2ghz *athchan)
+{
+ int channel;
+
+ /* Cast this value to catch negative channel numbers (>= -19) */
+ channel = (int)ieee;
+
+ /*
+ * Map 2GHz IEEE channel to 5GHz Atheros channel
+ */
+ if (channel <= 13) {
+ athchan->a2_athchan = 115 + channel;
+ athchan->a2_flags = 0x46;
+ } else if (channel == 14) {
+ athchan->a2_athchan = 124;
+ athchan->a2_flags = 0x44;
+ } else if (channel >= 15 && channel <= 26) {
+ athchan->a2_athchan = ((channel - 14) * 4) + 132;
+ athchan->a2_flags = 0x46;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath5k_athchan_2ghz ath5k_channel_2ghz;
+ unsigned int ath5k_channel =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ u32 data0, data1, clock;
+ int ret;
+
+ /*
+ * Set the channel on the RF5111 radio
+ */
+ data0 = data1 = 0;
+
+ if (channel->band == IEEE80211_BAND_2GHZ) {
+ /* Map 2GHz channel to 5GHz Atheros channel ID */
+ ret = ath5k_hw_rf5111_chan2athchan(
+ ieee80211_frequency_to_channel(channel->center_freq),
+ &ath5k_channel_2ghz);
+ if (ret)
+ return ret;
+
+ ath5k_channel = ath5k_channel_2ghz.a2_athchan;
+ data0 = ((ath5k_hw_bitswap(ath5k_channel_2ghz.a2_flags, 8) & 0xff)
+ << 5) | (1 << 4);
+ }
+
+ if (ath5k_channel < 145 || !(ath5k_channel & 1)) {
+ clock = 1;
+ data1 = ((ath5k_hw_bitswap(ath5k_channel - 24, 8) & 0xff) << 2) |
+ (clock << 1) | (1 << 10) | 1;
+ } else {
+ clock = 0;
+ data1 = ((ath5k_hw_bitswap((ath5k_channel - 24) / 2, 8) & 0xff)
+ << 2) | (clock << 1) | (1 << 10) | 1;
+ }
+
+ ath5k_hw_reg_write(ah, (data1 & 0xff) | ((data0 & 0xff) << 8),
+ AR5K_RF_BUFFER);
+ ath5k_hw_reg_write(ah, ((data1 >> 8) & 0xff) | (data0 & 0xff00),
+ AR5K_RF_BUFFER_CONTROL_3);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
+ */
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u32 data, data0, data1, data2;
+ u16 c;
+
+ data = data0 = data1 = data2 = 0;
+ c = channel->center_freq;
+
+ /* My guess based on code:
+ * 2GHz RF has 2 synth modes, one with a Local Oscillator
+ * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+ * (3040/2). data0 is used to set the PLL divider and data1
+ * selects synth mode. */
+ if (c < 4800) {
+ /* Channel 14 and all frequencies with 2Hz spacing
+ * below/above (non-standard channels) */
+ if (!((c - 2224) % 5)) {
+ /* Same as (c - 2224) / 5 */
+ data0 = ((2 * (c - 704)) - 3040) / 10;
+ data1 = 1;
+ /* Channel 1 and all frequencies with 5Hz spacing
+ * below/above (standard channels without channel 14) */
+ } else if (!((c - 2192) % 5)) {
+ /* Same as (c - 2192) / 5 */
+ data0 = ((2 * (c - 672)) - 3040) / 10;
+ data1 = 0;
+ } else
+ return -EINVAL;
+
+ data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+ /* This is more complex, we have a single synthesizer with
+ * 4 reference clock settings (?) based on frequency spacing
+ * and set using data2. LO is at 4800Hz and data0 is again used
+ * to set some divider.
+ *
+ * NOTE: There is an old atheros presentation at Stanford
+ * that mentions a method called dual direct conversion
+ * with 1GHz sliding IF for RF5110. Maybe that's what we
+ * have here, or an updated version. */
+ } else if ((c % 5) != 2 || c > 5435) {
+ if (!(c % 20) && c >= 5120) {
+ data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
+ data2 = ath5k_hw_bitswap(3, 2);
+ } else if (!(c % 10)) {
+ data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
+ data2 = ath5k_hw_bitswap(2, 2);
+ } else if (!(c % 5)) {
+ data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
+ data2 = ath5k_hw_bitswap(1, 2);
+ } else
+ return -EINVAL;
+ } else {
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
+ data2 = ath5k_hw_bitswap(0, 2);
+ }
+
+ data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001;
+
+ ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
+ ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
+ */
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u32 data, data0, data2;
+ u16 c;
+
+ data = data0 = data2 = 0;
+ c = channel->center_freq;
+
+ if (c < 4800) {
+ data0 = ath5k_hw_bitswap((c - 2272), 8);
+ data2 = 0;
+ /* ? 5GHz ? */
+ } else if ((c % 5) != 2 || c > 5435) {
+ if (!(c % 20) && c < 5120)
+ data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
+ else if (!(c % 10))
+ data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
+ else if (!(c % 5))
+ data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
+ else
+ return -EINVAL;
+ data2 = ath5k_hw_bitswap(1, 2);
+ } else {
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
+ data2 = ath5k_hw_bitswap(0, 2);
+ }
+
+ data = (data0 << 4) | data2 << 2 | 0x1001;
+
+ ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
+ ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
+ */
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ /*
+ * Check bounds supported by the PHY (we don't care about regulatory
+ * restrictions at this point).
+ */
+ if (!ath5k_channel_ok(ah, channel)) {
+ ATH5K_ERR(ah,
+ "channel frequency (%u MHz) out of supported "
+ "band range\n",
+ channel->center_freq);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the channel and wait
+ */
+ switch (ah->ah_radio) {
+ case AR5K_RF5110:
+ ret = ath5k_hw_rf5110_channel(ah, channel);
+ break;
+ case AR5K_RF5111:
+ ret = ath5k_hw_rf5111_channel(ah, channel);
+ break;
+ case AR5K_RF2317:
+ case AR5K_RF2425:
+ ret = ath5k_hw_rf2425_channel(ah, channel);
+ break;
+ default:
+ ret = ath5k_hw_rf5112_channel(ah, channel);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Set JAPAN setting for channel 14 */
+ if (channel->center_freq == 2484) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
+ AR5K_PHY_CCKTXCTL_JAPAN);
+ } else {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
+ AR5K_PHY_CCKTXCTL_WORLD);
+ }
+
+ ah->ah_current_channel = channel;
+
+ return 0;
+}
+
+
+/*****************\
+ PHY calibration
+\*****************/
+
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+{
+ s32 val;
+
+ val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
+ return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
+}
+
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+{
+ int i;
+
+ ah->ah_nfcal_hist.index = 0;
+ for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
+ ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
+}
+
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
+static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
+{
+ struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
+ hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX - 1);
+ hist->nfval[hist->index] = noise_floor;
+}
+
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+{
+ s16 sort[ATH5K_NF_CAL_HIST_MAX];
+ s16 tmp;
+ int i, j;
+
+ memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
+ for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
+ for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
+ if (sort[j] > sort[j - 1]) {
+ tmp = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
+ }
+ }
+ }
+ for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "cal %d:%d\n", i, sort[i]);
+ }
+ return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
+}
+
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
+ *
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
+ */
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 val;
+ s16 nf, threshold;
+ u8 ee_mode;
+
+ /* keep last value if calibration hasn't completed */
+ if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "NF did not complete in calibration window\n");
+
+ return;
+ }
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
+
+ /* completed NF calibration, test threshold */
+ nf = ath5k_hw_read_measured_noise_floor(ah);
+ threshold = ee->ee_noise_floor_thr[ee_mode];
+
+ if (nf > threshold) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "noise floor failure detected; "
+ "read %d, threshold %d\n",
+ nf, threshold);
+
+ nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
+ }
+
+ ath5k_hw_update_nfcal_hist(ah, nf);
+ nf = ath5k_hw_get_median_noise_floor(ah);
+
+ /* load noise floor (in .5 dBm) so the hardware will use it */
+ val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
+ val |= (nf * 2) & AR5K_PHY_NF_M;
+ ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+
+ AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+ ~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
+
+ ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+ 0, false);
+
+ /*
+ * Load a high max CCA Power value (-50 dBm in .5 dBm units)
+ * so that we're not capped by the median we just loaded.
+ * This will be used as the initial value for the next noise
+ * floor calibration.
+ */
+ val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
+ ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF_EN |
+ AR5K_PHY_AGCCTL_NF_NOUPDATE |
+ AR5K_PHY_AGCCTL_NF);
+
+ ah->ah_noise_floor = nf;
+
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "noise floor calibrated: %d\n", nf);
+}
+
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
+ */
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u32 phy_sig, phy_agc, phy_sat, beacon;
+ int ret;
+
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+ return 0;
+
+ /*
+ * Disable beacons and RX/TX queues, wait
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5210,
+ AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
+ beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
+ ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
+
+ usleep_range(2000, 2500);
+
+ /*
+ * Set the channel (with AGC turned off)
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
+ udelay(10);
+ ret = ath5k_hw_channel(ah, channel);
+
+ /*
+ * Activate PHY and wait
+ */
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ usleep_range(1000, 1500);
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Calibrate the radio chip
+ */
+
+ /* Remember normal state */
+ phy_sig = ath5k_hw_reg_read(ah, AR5K_PHY_SIG);
+ phy_agc = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCOARSE);
+ phy_sat = ath5k_hw_reg_read(ah, AR5K_PHY_ADCSAT);
+
+ /* Update radio registers */
+ ath5k_hw_reg_write(ah, (phy_sig & ~(AR5K_PHY_SIG_FIRPWR)) |
+ AR5K_REG_SM(-1, AR5K_PHY_SIG_FIRPWR), AR5K_PHY_SIG);
+
+ ath5k_hw_reg_write(ah, (phy_agc & ~(AR5K_PHY_AGCCOARSE_HI |
+ AR5K_PHY_AGCCOARSE_LO)) |
+ AR5K_REG_SM(-1, AR5K_PHY_AGCCOARSE_HI) |
+ AR5K_REG_SM(-127, AR5K_PHY_AGCCOARSE_LO), AR5K_PHY_AGCCOARSE);
+
+ ath5k_hw_reg_write(ah, (phy_sat & ~(AR5K_PHY_ADCSAT_ICNT |
+ AR5K_PHY_ADCSAT_THR)) |
+ AR5K_REG_SM(2, AR5K_PHY_ADCSAT_ICNT) |
+ AR5K_REG_SM(12, AR5K_PHY_ADCSAT_THR), AR5K_PHY_ADCSAT);
+
+ udelay(20);
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
+ udelay(10);
+ ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
+
+ usleep_range(1000, 1500);
+
+ /*
+ * Enable calibration and wait until completion
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_CAL);
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL, 0, false);
+
+ /* Reset to normal state */
+ ath5k_hw_reg_write(ah, phy_sig, AR5K_PHY_SIG);
+ ath5k_hw_reg_write(ah, phy_agc, AR5K_PHY_AGCCOARSE);
+ ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT);
+
+ if (ret) {
+ ATH5K_ERR(ah, "calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ return ret;
+ }
+
+ /*
+ * Re-enable RX/TX and beacons
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5210,
+ AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
+ ath5k_hw_reg_write(ah, beacon, AR5K_BEACON_5210);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
+ */
+static int
+ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
+{
+ u32 i_pwr, q_pwr;
+ s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
+ int i;
+
+ /* Skip if I/Q calibration is not needed or if it's still running */
+ if (!ah->ah_iq_cal_needed)
+ return -EINVAL;
+ else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "I/Q calibration still running");
+ return -EBUSY;
+ }
+
+ /* Calibration has finished, get the results and re-run */
+
+ /* Work around for empty results which can apparently happen on 5212:
+ * Read registers up to 10 times until we get both i_pr and q_pwr */
+ for (i = 0; i <= 10; i++) {
+ iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
+ i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
+ q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
+ if (i_pwr && q_pwr)
+ break;
+ }
+
+ i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coffd = q_pwr >> 6;
+ else
+ q_coffd = q_pwr >> 7;
+
+ /* In case i_coffd became zero, cancel calibration
+ * not only it's too small, it'll also result a divide
+ * by zero later on. */
+ if (i_coffd == 0 || q_coffd < 2)
+ return -ECANCELED;
+
+ /* Protect against loss of sign bits */
+
+ i_coff = (-iq_corr) / i_coffd;
+ i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coff = (i_pwr / q_coffd) - 64;
+ else
+ q_coff = (i_pwr / q_coffd) - 128;
+ q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
+
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
+ i_coff, q_coff, i_coffd, q_coffd);
+
+ /* Commit new I/Q values (set enable bit last to match HAL sources) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF, i_coff);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF, q_coff);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
+
+ /* Re-enable calibration -if we don't we'll commit
+ * the same values again and again */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
+ */
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+
+ if (ah->ah_radio == AR5K_RF5110)
+ return ath5k_hw_rf5110_calibrate(ah, channel);
+
+ ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ if (ret) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "No I/Q correction performed (%uMHz)\n",
+ channel->center_freq);
+
+ /* Happens all the time if there is not much
+ * traffic, consider it normal behaviour. */
+ ret = 0;
+ }
+
+ /* On full calibration request a PAPD probe for
+ * gainf calibration if needed */
+ if ((ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ (ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112) &&
+ channel->hw_value != AR5K_MODE_11B)
+ ath5k_hw_request_rfgain_probe(ah);
+
+ /* Update noise floor */
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+ ath5k_hw_update_noise_floor(ah);
+
+ return ret;
+}
+
+
+/***************************\
+* Spur mitigation functions *
+\***************************/
+
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
+static void
+ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 mag_mask[4] = {0, 0, 0, 0};
+ u32 pilot_mask[2] = {0, 0};
+ /* Note: fbin values are scaled up by 2 */
+ u16 spur_chan_fbin, chan_fbin, symbol_width, spur_detection_window;
+ s32 spur_delta_phase, spur_freq_sigma_delta;
+ s32 spur_offset, num_symbols_x16;
+ u8 num_symbol_offsets, i, freq_band;
+
+ /* Convert current frequency to fbin value (the same way channels
+ * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
+ * up by 2 so we can compare it later */
+ if (channel->band == IEEE80211_BAND_2GHZ) {
+ chan_fbin = (channel->center_freq - 2300) * 10;
+ freq_band = AR5K_EEPROM_BAND_2GHZ;
+ } else {
+ chan_fbin = (channel->center_freq - 4900) * 10;
+ freq_band = AR5K_EEPROM_BAND_5GHZ;
+ }
+
+ /* Check if any spur_chan_fbin from EEPROM is
+ * within our current channel's spur detection range */
+ spur_chan_fbin = AR5K_EEPROM_NO_SPUR;
+ spur_detection_window = AR5K_SPUR_CHAN_WIDTH;
+ /* XXX: Half/Quarter channels ?*/
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ spur_detection_window *= 2;
+
+ for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
+ spur_chan_fbin = ee->ee_spur_chans[i][freq_band];
+
+ /* Note: mask cleans AR5K_EEPROM_NO_SPUR flag
+ * so it's zero if we got nothing from EEPROM */
+ if (spur_chan_fbin == AR5K_EEPROM_NO_SPUR) {
+ spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
+ break;
+ }
+
+ if ((chan_fbin - spur_detection_window <=
+ (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK)) &&
+ (chan_fbin + spur_detection_window >=
+ (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK))) {
+ spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
+ break;
+ }
+ }
+
+ /* We need to enable spur filter for this channel */
+ if (spur_chan_fbin) {
+ spur_offset = spur_chan_fbin - chan_fbin;
+ /*
+ * Calculate deltas:
+ * spur_freq_sigma_delta -> spur_offset / sample_freq << 21
+ * spur_delta_phase -> spur_offset / chip_freq << 11
+ * Note: Both values have 100Hz resolution
+ */
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ /* Both sample_freq and chip_freq are 80MHz */
+ spur_delta_phase = (spur_offset << 16) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz * 2;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ /* Both sample_freq and chip_freq are 20MHz (?) */
+ spur_delta_phase = (spur_offset << 18) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ /* Both sample_freq and chip_freq are 10MHz (?) */
+ spur_delta_phase = (spur_offset << 19) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
+ break;
+ default:
+ if (channel->band == IEEE80211_BAND_5GHZ) {
+ /* Both sample_freq and chip_freq are 40MHz */
+ spur_delta_phase = (spur_offset << 17) / 25;
+ spur_freq_sigma_delta =
+ (spur_delta_phase >> 10);
+ symbol_width =
+ AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ } else {
+ /* sample_freq -> 40MHz chip_freq -> 44MHz
+ * (for b compatibility) */
+ spur_delta_phase = (spur_offset << 17) / 25;
+ spur_freq_sigma_delta =
+ (spur_offset << 8) / 55;
+ symbol_width =
+ AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ }
+ break;
+ }
+
+ /* Calculate pilot and magnitude masks */
+
+ /* Scale up spur_offset by 1000 to switch to 100HZ resolution
+ * and divide by symbol_width to find how many symbols we have
+ * Note: number of symbols is scaled up by 16 */
+ num_symbols_x16 = ((spur_offset * 1000) << 4) / symbol_width;
+
+ /* Spur is on a symbol if num_symbols_x16 % 16 is zero */
+ if (!(num_symbols_x16 & 0xF))
+ /* _X_ */
+ num_symbol_offsets = 3;
+ else
+ /* _xx_ */
+ num_symbol_offsets = 4;
+
+ for (i = 0; i < num_symbol_offsets; i++) {
+
+ /* Calculate pilot mask */
+ s32 curr_sym_off =
+ (num_symbols_x16 / 16) + i + 25;
+
+ /* Pilot magnitude mask seems to be a way to
+ * declare the boundaries for our detection
+ * window or something, it's 2 for the middle
+ * value(s) where the symbol is expected to be
+ * and 1 on the boundary values */
+ u8 plt_mag_map =
+ (i == 0 || i == (num_symbol_offsets - 1))
+ ? 1 : 2;
+
+ if (curr_sym_off >= 0 && curr_sym_off <= 32) {
+ if (curr_sym_off <= 25)
+ pilot_mask[0] |= 1 << curr_sym_off;
+ else if (curr_sym_off >= 27)
+ pilot_mask[0] |= 1 << (curr_sym_off - 1);
+ } else if (curr_sym_off >= 33 && curr_sym_off <= 52)
+ pilot_mask[1] |= 1 << (curr_sym_off - 33);
+
+ /* Calculate magnitude mask (for viterbi decoder) */
+ if (curr_sym_off >= -1 && curr_sym_off <= 14)
+ mag_mask[0] |=
+ plt_mag_map << (curr_sym_off + 1) * 2;
+ else if (curr_sym_off >= 15 && curr_sym_off <= 30)
+ mag_mask[1] |=
+ plt_mag_map << (curr_sym_off - 15) * 2;
+ else if (curr_sym_off >= 31 && curr_sym_off <= 46)
+ mag_mask[2] |=
+ plt_mag_map << (curr_sym_off - 31) * 2;
+ else if (curr_sym_off >= 47 && curr_sym_off <= 53)
+ mag_mask[3] |=
+ plt_mag_map << (curr_sym_off - 47) * 2;
+
+ }
+
+ /* Write settings on hw to enable spur filter */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_RATE, 0xff);
+ /* XXX: Self correlator also ? */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_PILOT_MASK_EN |
+ AR5K_PHY_IQ_CHAN_MASK_EN |
+ AR5K_PHY_IQ_SPUR_FILT_EN);
+
+ /* Set delta phase and freq sigma delta */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(spur_delta_phase,
+ AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE) |
+ AR5K_REG_SM(spur_freq_sigma_delta,
+ AR5K_PHY_TIMING_11_SPUR_FREQ_SD) |
+ AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC,
+ AR5K_PHY_TIMING_11);
+
+ /* Write pilot masks */
+ ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_7);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
+ AR5K_PHY_TIMING_8_PILOT_MASK_2,
+ pilot_mask[1]);
+
+ ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_9);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
+ AR5K_PHY_TIMING_10_PILOT_MASK_2,
+ pilot_mask[1]);
+
+ /* Write magnitude masks */
+ ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK_1);
+ ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK_2);
+ ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_MASK_4,
+ mag_mask[3]);
+
+ ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK2_1);
+ ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK2_2);
+ ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK2_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
+ AR5K_PHY_BIN_MASK2_4_MASK_4,
+ mag_mask[3]);
+
+ } else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) &
+ AR5K_PHY_IQ_SPUR_FILT_EN) {
+ /* Clean up spur mitigation settings and disable filter */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_RATE, 0);
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_PILOT_MASK_EN |
+ AR5K_PHY_IQ_CHAN_MASK_EN |
+ AR5K_PHY_IQ_SPUR_FILT_EN);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_11);
+
+ /* Clear pilot masks */
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_7);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
+ AR5K_PHY_TIMING_8_PILOT_MASK_2,
+ 0);
+
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_9);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
+ AR5K_PHY_TIMING_10_PILOT_MASK_2,
+ 0);
+
+ /* Clear magnitude masks */
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
+ AR5K_PHY_BIN_MASK_CTL_MASK_4,
+ 0);
+
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_3);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
+ AR5K_PHY_BIN_MASK2_4_MASK_4,
+ 0);
+ }
+}
+
+
+/*****************\
+* Antenna control *
+\*****************/
+
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
+ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
+{
+ if (ah->ah_version != AR5K_AR5210)
+ ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
+}
+
+/**
+ * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
+ */
+static void
+ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
+{
+ switch (ee_mode) {
+ case AR5K_EEPROM_MODE_11G:
+ /* XXX: This is set to
+ * disabled on initvals !!! */
+ case AR5K_EEPROM_MODE_11A:
+ if (enable)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
+ break;
+ default:
+ return;
+ }
+
+ if (enable) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
+ AR5K_PHY_RESTART_DIV_GC, 4);
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
+ AR5K_PHY_FAST_ANT_DIV_EN);
+ } else {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
+ AR5K_PHY_RESTART_DIV_GC, 0);
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
+ AR5K_PHY_FAST_ANT_DIV_EN);
+ }
+}
+
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
+void
+ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
+{
+ u8 ant0, ant1;
+
+ /*
+ * In case a fixed antenna was set as default
+ * use the same switch table twice.
+ */
+ if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
+ ant0 = ant1 = AR5K_ANT_SWTABLE_A;
+ else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
+ ant0 = ant1 = AR5K_ANT_SWTABLE_B;
+ else {
+ ant0 = AR5K_ANT_SWTABLE_A;
+ ant1 = AR5K_ANT_SWTABLE_B;
+ }
+
+ /* Set antenna idle switch table */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
+ AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
+ (ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] |
+ AR5K_PHY_ANT_CTL_TXRX_EN));
+
+ /* Set antenna switch tables */
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0],
+ AR5K_PHY_ANT_SWITCH_TABLE_0);
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1],
+ AR5K_PHY_ANT_SWITCH_TABLE_1);
+}
+
+/**
+ * ath5k_hw_set_antenna_mode() - Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
+ */
+void
+ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div;
+ bool use_def_for_sg;
+ int ee_mode;
+ u8 def_ant, tx_ant;
+ u32 sta_id1 = 0;
+
+ /* if channel is not initialized yet we can't set the antennas
+ * so just store the mode. it will be set on the next reset */
+ if (channel == NULL) {
+ ah->ah_ant_mode = ant_mode;
+ return;
+ }
+
+ def_ant = ah->ah_def_ant;
+
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
+
+ switch (ant_mode) {
+ case AR5K_ANTMODE_DEFAULT:
+ tx_ant = 0;
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = false;
+ use_def_for_sg = false;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_FIXED_A:
+ def_ant = 1;
+ tx_ant = 1;
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_FIXED_B:
+ def_ant = 2;
+ tx_ant = 2;
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_SINGLE_AP:
+ def_ant = 1; /* updated on tx */
+ tx_ant = 0;
+ use_def_for_tx = true;
+ update_def_on_tx = true;
+ use_def_for_rts = true;
+ use_def_for_sg = true;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_SECTOR_AP:
+ tx_ant = 1; /* variable */
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = false;
+ fast_div = false;
+ break;
+ case AR5K_ANTMODE_SECTOR_STA:
+ tx_ant = 1; /* variable */
+ use_def_for_tx = true;
+ update_def_on_tx = false;
+ use_def_for_rts = true;
+ use_def_for_sg = false;
+ fast_div = true;
+ break;
+ case AR5K_ANTMODE_DEBUG:
+ def_ant = 1;
+ tx_ant = 2;
+ use_def_for_tx = false;
+ update_def_on_tx = false;
+ use_def_for_rts = false;
+ use_def_for_sg = false;
+ fast_div = false;
+ break;
+ default:
+ return;
+ }
+
+ ah->ah_tx_ant = tx_ant;
+ ah->ah_ant_mode = ant_mode;
+ ah->ah_def_ant = def_ant;
+
+ sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
+ sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
+ sta_id1 |= use_def_for_rts ? AR5K_STA_ID1_RTS_DEF_ANTENNA : 0;
+ sta_id1 |= use_def_for_sg ? AR5K_STA_ID1_SELFGEN_DEF_ANT : 0;
+
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_ANTENNA_SETTINGS);
+
+ if (sta_id1)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
+
+ ath5k_hw_set_antenna_switch(ah, ee_mode);
+ /* Note: set diversity before default antenna
+ * because it won't work correctly */
+ ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
+ ath5k_hw_set_def_antenna(ah, def_ant);
+}
+
+
+/****************\
+* TX power setup *
+\****************/
+
+/*
+ * Helper functions
+ */
+
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
+ */
+static s16
+ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
+ s16 y_left, s16 y_right)
+{
+ s16 ratio, result;
+
+ /* Avoid divide by zero and skip interpolation
+ * if we have the same point */
+ if ((x_left == x_right) || (y_left == y_right))
+ return y_left;
+
+ /*
+ * Since we use ints and not fps, we need to scale up in
+ * order to get a sane ratio value (or else we 'll eg. get
+ * always 1 instead of 1.25, 1.75 etc). We scale up by 100
+ * to have some accuracy both for 0.5 and 0.25 steps.
+ */
+ ratio = ((100 * y_right - 100 * y_left) / (x_right - x_left));
+
+ /* Now scale down to be in range */
+ result = y_left + (ratio * (target - x_left) / 100);
+
+ return result;
+}
+
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
+ *
+ * Since we have the top of the curve and we draw the line below
+ * until we reach 1 (1 pcdac step) we need to know which point
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
+ */
+static s16
+ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
+ const s16 *pwrL, const s16 *pwrR)
+{
+ s8 tmp;
+ s16 min_pwrL, min_pwrR;
+ s16 pwr_i;
+
+ /* Some vendors write the same pcdac value twice !!! */
+ if (stepL[0] == stepL[1] || stepR[0] == stepR[1])
+ return max(pwrL[0], pwrR[0]);
+
+ if (pwrL[0] == pwrL[1])
+ min_pwrL = pwrL[0];
+ else {
+ pwr_i = pwrL[0];
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrL[0], pwrL[1],
+ stepL[0], stepL[1]);
+ } while (tmp > 1);
+
+ min_pwrL = pwr_i;
+ }
+
+ if (pwrR[0] == pwrR[1])
+ min_pwrR = pwrR[0];
+ else {
+ pwr_i = pwrR[0];
+ do {
+ pwr_i--;
+ tmp = (s8) ath5k_get_interpolated_value(pwr_i,
+ pwrR[0], pwrR[1],
+ stepR[0], stepR[1]);
+ } while (tmp > 1);
+
+ min_pwrR = pwr_i;
+ }
+
+ /* Keep the right boundary so that it works for both curves */
+ return max(min_pwrL, min_pwrR);
+}
+
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
+ * Interpolate (pwr,vpd) points to create a Power to PDADC or a
+ * Power to PCDAC curve.
+ *
+ * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC
+ * steps (offsets) on y axis. Power can go up to 31.5dB and max
+ * PCDAC/PDADC step for each curve is 64 but we can write more than
+ * one curves on hw so we can go up to 128 (which is the max step we
+ * can write on the final table).
+ *
+ * We write y values (PCDAC/PDADC steps) on hw.
+ */
+static void
+ath5k_create_power_curve(s16 pmin, s16 pmax,
+ const s16 *pwr, const u8 *vpd,
+ u8 num_points,
+ u8 *vpd_table, u8 type)
+{
+ u8 idx[2] = { 0, 1 };
+ s16 pwr_i = 2 * pmin;
+ int i;
+
+ if (num_points < 2)
+ return;
+
+ /* We want the whole line, so adjust boundaries
+ * to cover the entire power range. Note that
+ * power values are already 0.25dB so no need
+ * to multiply pwr_i by 2 */
+ if (type == AR5K_PWRTABLE_LINEAR_PCDAC) {
+ pwr_i = pmin;
+ pmin = 0;
+ pmax = 63;
+ }
+
+ /* Find surrounding turning points (TPs)
+ * and interpolate between them */
+ for (i = 0; (i <= (u16) (pmax - pmin)) &&
+ (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
+
+ /* We passed the right TP, move to the next set of TPs
+ * if we pass the last TP, extrapolate above using the last
+ * two TPs for ratio */
+ if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) {
+ idx[0]++;
+ idx[1]++;
+ }
+
+ vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i,
+ pwr[idx[0]], pwr[idx[1]],
+ vpd[idx[0]], vpd[idx[1]]);
+
+ /* Increase by 0.5dB
+ * (0.25 dB units) */
+ pwr_i += 2;
+ }
+}
+
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
+ * Get the surrounding per-channel power calibration piers
+ * for a given frequency so that we can interpolate between
+ * them and come up with an appropriate dataset for our current
+ * channel.
+ */
+static void
+ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ struct ath5k_chan_pcal_info **pcinfo_l,
+ struct ath5k_chan_pcal_info **pcinfo_r)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_chan_pcal_info *pcinfo;
+ u8 idx_l, idx_r;
+ u8 mode, max, i;
+ u32 target = channel->center_freq;
+
+ idx_l = 0;
+ idx_r = 0;
+
+ switch (channel->hw_value) {
+ case AR5K_EEPROM_MODE_11A:
+ pcinfo = ee->ee_pwr_cal_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case AR5K_EEPROM_MODE_11B:
+ pcinfo = ee->ee_pwr_cal_b;
+ mode = AR5K_EEPROM_MODE_11B;
+ break;
+ case AR5K_EEPROM_MODE_11G:
+ default:
+ pcinfo = ee->ee_pwr_cal_g;
+ mode = AR5K_EEPROM_MODE_11G;
+ break;
+ }
+ max = ee->ee_n_piers[mode] - 1;
+
+ /* Frequency is below our calibrated
+ * range. Use the lowest power curve
+ * we have */
+ if (target < pcinfo[0].freq) {
+ idx_l = idx_r = 0;
+ goto done;
+ }
+
+ /* Frequency is above our calibrated
+ * range. Use the highest power curve
+ * we have */
+ if (target > pcinfo[max].freq) {
+ idx_l = idx_r = max;
+ goto done;
+ }
+
+ /* Frequency is inside our calibrated
+ * channel range. Pick the surrounding
+ * calibration piers so that we can
+ * interpolate */
+ for (i = 0; i <= max; i++) {
+
+ /* Frequency matches one of our calibration
+ * piers, no need to interpolate, just use
+ * that calibration pier */
+ if (pcinfo[i].freq == target) {
+ idx_l = idx_r = i;
+ goto done;
+ }
+
+ /* We found a calibration pier that's above
+ * frequency, use this pier and the previous
+ * one to interpolate */
+ if (target < pcinfo[i].freq) {
+ idx_r = i;
+ idx_l = idx_r - 1;
+ goto done;
+ }
+ }
+
+done:
+ *pcinfo_l = &pcinfo[idx_l];
+ *pcinfo_r = &pcinfo[idx_r];
+}
+
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
+ * Get the surrounding per-rate power calibration data
+ * for a given frequency and interpolate between power
+ * values to set max target power supported by hw for
+ * each rate on this frequency.
+ */
+static void
+ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ struct ath5k_rate_pcal_info *rates)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_rate_pcal_info *rpinfo;
+ u8 idx_l, idx_r;
+ u8 mode, max, i;
+ u32 target = channel->center_freq;
+
+ idx_l = 0;
+ idx_r = 0;
+
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ rpinfo = ee->ee_rate_tpwr_a;
+ mode = AR5K_EEPROM_MODE_11A;
+ break;
+ case AR5K_MODE_11B:
+ rpinfo = ee->ee_rate_tpwr_b;
+ mode = AR5K_EEPROM_MODE_11B;
+ break;
+ case AR5K_MODE_11G:
+ default:
+ rpinfo = ee->ee_rate_tpwr_g;
+ mode = AR5K_EEPROM_MODE_11G;
+ break;
+ }
+ max = ee->ee_rate_target_pwr_num[mode] - 1;
+
+ /* Get the surrounding calibration
+ * piers - same as above */
+ if (target < rpinfo[0].freq) {
+ idx_l = idx_r = 0;
+ goto done;
+ }
+
+ if (target > rpinfo[max].freq) {
+ idx_l = idx_r = max;
+ goto done;
+ }
+
+ for (i = 0; i <= max; i++) {
+
+ if (rpinfo[i].freq == target) {
+ idx_l = idx_r = i;
+ goto done;
+ }
+
+ if (target < rpinfo[i].freq) {
+ idx_r = i;
+ idx_l = idx_r - 1;
+ goto done;
+ }
+ }
+
+done:
+ /* Now interpolate power value, based on the frequency */
+ rates->freq = target;
+
+ rates->target_power_6to24 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_6to24,
+ rpinfo[idx_r].target_power_6to24);
+
+ rates->target_power_36 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_36,
+ rpinfo[idx_r].target_power_36);
+
+ rates->target_power_48 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_48,
+ rpinfo[idx_r].target_power_48);
+
+ rates->target_power_54 =
+ ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
+ rpinfo[idx_r].freq,
+ rpinfo[idx_l].target_power_54,
+ rpinfo[idx_r].target_power_54);
+}
+
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Get the max edge power for this channel if
+ * we have such data from EEPROM's Conformance Test
+ * Limits (CTL), and limit max power if needed.
+ */
+static void
+ath5k_get_max_ctl_power(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ struct ath5k_edge_power *rep = ee->ee_ctl_pwr;
+ u8 *ctl_val = ee->ee_ctl;
+ s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4;
+ s16 edge_pwr = 0;
+ u8 rep_idx;
+ u8 i, ctl_mode;
+ u8 ctl_idx = 0xFF;
+ u32 target = channel->center_freq;
+
+ ctl_mode = ath_regd_get_band_ctl(regulatory, channel->band);
+
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ ctl_mode |= AR5K_CTL_TURBO;
+ else
+ ctl_mode |= AR5K_CTL_11A;
+ break;
+ case AR5K_MODE_11G:
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ ctl_mode |= AR5K_CTL_TURBOG;
+ else
+ ctl_mode |= AR5K_CTL_11G;
+ break;
+ case AR5K_MODE_11B:
+ ctl_mode |= AR5K_CTL_11B;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < ee->ee_ctls; i++) {
+ if (ctl_val[i] == ctl_mode) {
+ ctl_idx = i;
+ break;
+ }
+ }
+
+ /* If we have a CTL dataset available grab it and find the
+ * edge power for our frequency */
+ if (ctl_idx == 0xFF)
+ return;
+
+ /* Edge powers are sorted by frequency from lower
+ * to higher. Each CTL corresponds to 8 edge power
+ * measurements. */
+ rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES;
+
+ /* Don't do boundaries check because we
+ * might have more that one bands defined
+ * for this mode */
+
+ /* Get the edge power that's closer to our
+ * frequency */
+ for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) {
+ rep_idx += i;
+ if (target <= rep[rep_idx].freq)
+ edge_pwr = (s16) rep[rep_idx].edge;
+ }
+
+ if (edge_pwr)
+ ah->ah_txpower.txp_max_pwr = 4 * min(edge_pwr, max_chan_pwr);
+}
+
+
+/*
+ * Power to PCDAC table functions
+ */
+
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ *
+ * No further processing is needed for RF5111, the only thing we have to
+ * do is fill the values below and above calibration range since eeprom data
+ * may not cover the entire PCDAC table.
+ */
+static void
+ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
+ s16 *table_max)
+{
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
+ u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
+ s16 min_pwr, max_pwr;
+
+ /* Get table boundaries */
+ min_pwr = table_min[0];
+ pcdac_0 = pcdac_tmp[0];
+
+ max_pwr = table_max[0];
+ pcdac_n = pcdac_tmp[table_max[0] - table_min[0]];
+
+ /* Extrapolate below minimum using pcdac_0 */
+ pcdac_i = 0;
+ for (i = 0; i < min_pwr; i++)
+ pcdac_out[pcdac_i++] = pcdac_0;
+
+ /* Copy values from pcdac_tmp */
+ pwr_idx = min_pwr;
+ for (i = 0; pwr_idx <= max_pwr &&
+ pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
+ pcdac_out[pcdac_i++] = pcdac_tmp[i];
+ pwr_idx++;
+ }
+
+ /* Extrapolate above maximum */
+ while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE)
+ pcdac_out[pcdac_i++] = pcdac_n;
+
+}
+
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
+ *
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
+ * RFX112 can have up to 2 curves (one for low txpower range and one for
+ * higher txpower range). We need to put them both on pcdac_out and place
+ * them in the correct location. In case we only have one curve available
+ * just fit it on pcdac_out (it's supposed to cover the entire range of
+ * available pwr levels since it's always the higher power curve). Extrapolate
+ * below and above final table if needed.
+ */
+static void
+ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
+ s16 *table_max, u8 pdcurves)
+{
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_low_pwr;
+ u8 *pcdac_high_pwr;
+ u8 *pcdac_tmp;
+ u8 pwr;
+ s16 max_pwr_idx;
+ s16 min_pwr_idx;
+ s16 mid_pwr_idx = 0;
+ /* Edge flag turns on the 7nth bit on the PCDAC
+ * to declare the higher power curve (force values
+ * to be greater than 64). If we only have one curve
+ * we don't need to set this, if we have 2 curves and
+ * fill the table backwards this can also be used to
+ * switch from higher power curve to lower power curve */
+ u8 edge_flag;
+ int i;
+
+ /* When we have only one curve available
+ * that's the higher power curve. If we have
+ * two curves the first is the high power curve
+ * and the next is the low power curve. */
+ if (pdcurves > 1) {
+ pcdac_low_pwr = ah->ah_txpower.tmpL[1];
+ pcdac_high_pwr = ah->ah_txpower.tmpL[0];
+ mid_pwr_idx = table_max[1] - table_min[1] - 1;
+ max_pwr_idx = (table_max[0] - table_min[0]) / 2;
+
+ /* If table size goes beyond 31.5dB, keep the
+ * upper 31.5dB range when setting tx power.
+ * Note: 126 = 31.5 dB in quarter dB steps */
+ if (table_max[0] - table_min[1] > 126)
+ min_pwr_idx = table_max[0] - 126;
+ else
+ min_pwr_idx = table_min[1];
+
+ /* Since we fill table backwards
+ * start from high power curve */
+ pcdac_tmp = pcdac_high_pwr;
+
+ edge_flag = 0x40;
+ } else {
+ pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
+ pcdac_high_pwr = ah->ah_txpower.tmpL[0];
+ min_pwr_idx = table_min[0];
+ max_pwr_idx = (table_max[0] - table_min[0]) / 2;
+ pcdac_tmp = pcdac_high_pwr;
+ edge_flag = 0;
+ }
+
+ /* This is used when setting tx power*/
+ ah->ah_txpower.txp_min_idx = min_pwr_idx / 2;
+
+ /* Fill Power to PCDAC table backwards */
+ pwr = max_pwr_idx;
+ for (i = 63; i >= 0; i--) {
+ /* Entering lower power range, reset
+ * edge flag and set pcdac_tmp to lower
+ * power curve.*/
+ if (edge_flag == 0x40 &&
+ (2 * pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
+ edge_flag = 0x00;
+ pcdac_tmp = pcdac_low_pwr;
+ pwr = mid_pwr_idx / 2;
+ }
+
+ /* Don't go below 1, extrapolate below if we have
+ * already switched to the lower power curve -or
+ * we only have one curve and edge_flag is zero
+ * anyway */
+ if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
+ while (i >= 0) {
+ pcdac_out[i] = pcdac_out[i + 1];
+ i--;
+ }
+ break;
+ }
+
+ pcdac_out[i] = pcdac_tmp[pwr] | edge_flag;
+
+ /* Extrapolate above if pcdac is greater than
+ * 126 -this can happen because we OR pcdac_out
+ * value with edge_flag on high power curve */
+ if (pcdac_out[i] > 126)
+ pcdac_out[i] = 126;
+
+ /* Decrease by a 0.5dB step */
+ pwr--;
+ }
+}
+
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
+static void
+ath5k_write_pcdac_table(struct ath5k_hw *ah)
+{
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ int i;
+
+ /*
+ * Write TX power values
+ */
+ for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
+ ath5k_hw_reg_write(ah,
+ (((pcdac_out[2 * i + 0] << 8 | 0xff) & 0xffff) << 0) |
+ (((pcdac_out[2 * i + 1] << 8 | 0xff) & 0xffff) << 16),
+ AR5K_PHY_PCDAC_TXPOWER(i));
+ }
+}
+
+
+/*
+ * Power to PDADC table functions
+ */
+
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
+ *
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
+ * We can have up to 4 pd curves, we need to do a similar process
+ * as we do for RF5112. This time we don't have an edge_flag but we
+ * set the gain boundaries on a separate register.
+ */
+static void
+ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
+ s16 *pwr_min, s16 *pwr_max, u8 pdcurves)
+{
+ u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS];
+ u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
+ u8 *pdadc_tmp;
+ s16 pdadc_0;
+ u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size;
+ u8 pd_gain_overlap;
+
+ /* Note: Register value is initialized on initvals
+ * there is no feedback from hw.
+ * XXX: What about pd_gain_overlap from EEPROM ? */
+ pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) &
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP;
+
+ /* Create final PDADC table */
+ for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) {
+ pdadc_tmp = ah->ah_txpower.tmpL[pdg];
+
+ if (pdg == pdcurves - 1)
+ /* 2 dB boundary stretch for last
+ * (higher power) curve */
+ gain_boundaries[pdg] = pwr_max[pdg] + 4;
+ else
+ /* Set gain boundary in the middle
+ * between this curve and the next one */
+ gain_boundaries[pdg] =
+ (pwr_max[pdg] + pwr_min[pdg + 1]) / 2;
+
+ /* Sanity check in case our 2 db stretch got out of
+ * range. */
+ if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER)
+ gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER;
+
+ /* For the first curve (lower power)
+ * start from 0 dB */
+ if (pdg == 0)
+ pdadc_0 = 0;
+ else
+ /* For the other curves use the gain overlap */
+ pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) -
+ pd_gain_overlap;
+
+ /* Force each power step to be at least 0.5 dB */
+ if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
+ pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
+ else
+ pwr_step = 1;
+
+ /* If pdadc_0 is negative, we need to extrapolate
+ * below this pdgain by a number of pwr_steps */
+ while ((pdadc_0 < 0) && (pdadc_i < 128)) {
+ s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step;
+ pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp;
+ pdadc_0++;
+ }
+
+ /* Set last pwr level, using gain boundaries */
+ pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
+ /* Limit it to be inside pwr range */
+ table_size = pwr_max[pdg] - pwr_min[pdg];
+ max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
+
+ /* Fill pdadc_out table */
+ while (pdadc_0 < max_idx && pdadc_i < 128)
+ pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
+
+ /* Need to extrapolate above this pdgain? */
+ if (pdadc_n <= max_idx)
+ continue;
+
+ /* Force each power step to be at least 0.5 dB */
+ if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
+ pwr_step = pdadc_tmp[table_size - 1] -
+ pdadc_tmp[table_size - 2];
+ else
+ pwr_step = 1;
+
+ /* Extrapolate above */
+ while ((pdadc_0 < (s16) pdadc_n) &&
+ (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) {
+ s16 tmp = pdadc_tmp[table_size - 1] +
+ (pdadc_0 - max_idx) * pwr_step;
+ pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp;
+ pdadc_0++;
+ }
+ }
+
+ while (pdg < AR5K_EEPROM_N_PD_GAINS) {
+ gain_boundaries[pdg] = gain_boundaries[pdg - 1];
+ pdg++;
+ }
+
+ while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) {
+ pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1];
+ pdadc_i++;
+ }
+
+ /* Set gain boundaries */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(pd_gain_overlap,
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) |
+ AR5K_REG_SM(gain_boundaries[0],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) |
+ AR5K_REG_SM(gain_boundaries[1],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) |
+ AR5K_REG_SM(gain_boundaries[2],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) |
+ AR5K_REG_SM(gain_boundaries[3],
+ AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4),
+ AR5K_PHY_TPC_RG5);
+
+ /* Used for setting rate power table */
+ ah->ah_txpower.txp_min_idx = pwr_min[0];
+
+}
+
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
+ u8 *pdg_to_idx = ee->ee_pdc_to_idx[ee_mode];
+ u8 pdcurves = ee->ee_pd_gains[ee_mode];
+ u32 reg;
+ u8 i;
+
+ /* Select the right pdgain curves */
+
+ /* Clear current settings */
+ reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1);
+ reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 |
+ AR5K_PHY_TPC_RG1_PDGAIN_2 |
+ AR5K_PHY_TPC_RG1_PDGAIN_3 |
+ AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
+
+ /*
+ * Use pd_gains curve from eeprom
+ *
+ * This overrides the default setting from initvals
+ * in case some vendors (e.g. Zcomax) don't use the default
+ * curves. If we don't honor their settings we 'll get a
+ * 5dB (1 * gain overlap ?) drop.
+ */
+ reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
+
+ switch (pdcurves) {
+ case 3:
+ reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
+ /* Fall through */
+ case 2:
+ reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
+ /* Fall through */
+ case 1:
+ reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
+ break;
+ }
+ ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1);
+
+ /*
+ * Write TX power values
+ */
+ for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
+ u32 val = get_unaligned_le32(&pdadc_out[4 * i]);
+ ath5k_hw_reg_write(ah, val, AR5K_PHY_PDADC_TXPOWER(i));
+ }
+}
+
+
+/*
+ * Common code for PCDAC/PDADC tables
+ */
+
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
+ * This is the main function that uses all of the above
+ * to set PCDAC/PDADC table on hw for the current channel.
+ * This table is used for tx power calibration on the baseband,
+ * without it we get weird tx power levels and in some cases
+ * distorted spectral mask
+ */
+static int
+ath5k_setup_channel_powertable(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ u8 ee_mode, u8 type)
+{
+ struct ath5k_pdgain_info *pdg_L, *pdg_R;
+ struct ath5k_chan_pcal_info *pcinfo_L;
+ struct ath5k_chan_pcal_info *pcinfo_R;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
+ s16 table_min[AR5K_EEPROM_N_PD_GAINS];
+ s16 table_max[AR5K_EEPROM_N_PD_GAINS];
+ u8 *tmpL;
+ u8 *tmpR;
+ u32 target = channel->center_freq;
+ int pdg, i;
+
+ /* Get surrounding freq piers for this channel */
+ ath5k_get_chan_pcal_surrounding_piers(ah, channel,
+ &pcinfo_L,
+ &pcinfo_R);
+
+ /* Loop over pd gain curves on
+ * surrounding freq piers by index */
+ for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
+
+ /* Fill curves in reverse order
+ * from lower power (max gain)
+ * to higher power. Use curve -> idx
+ * backmapping we did on eeprom init */
+ u8 idx = pdg_curve_to_idx[pdg];
+
+ /* Grab the needed curves by index */
+ pdg_L = &pcinfo_L->pd_curves[idx];
+ pdg_R = &pcinfo_R->pd_curves[idx];
+
+ /* Initialize the temp tables */
+ tmpL = ah->ah_txpower.tmpL[pdg];
+ tmpR = ah->ah_txpower.tmpR[pdg];
+
+ /* Set curve's x boundaries and create
+ * curves so that they cover the same
+ * range (if we don't do that one table
+ * will have values on some range and the
+ * other one won't have any so interpolation
+ * will fail) */
+ table_min[pdg] = min(pdg_L->pd_pwr[0],
+ pdg_R->pd_pwr[0]) / 2;
+
+ table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
+ pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2;
+
+ /* Now create the curves on surrounding channels
+ * and interpolate if needed to get the final
+ * curve for this gain on this channel */
+ switch (type) {
+ case AR5K_PWRTABLE_LINEAR_PCDAC:
+ /* Override min/max so that we don't loose
+ * accuracy (don't divide by 2) */
+ table_min[pdg] = min(pdg_L->pd_pwr[0],
+ pdg_R->pd_pwr[0]);
+
+ table_max[pdg] =
+ max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
+ pdg_R->pd_pwr[pdg_R->pd_points - 1]);
+
+ /* Override minimum so that we don't get
+ * out of bounds while extrapolating
+ * below. Don't do this when we have 2
+ * curves and we are on the high power curve
+ * because table_min is ok in this case */
+ if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) {
+
+ table_min[pdg] =
+ ath5k_get_linear_pcdac_min(pdg_L->pd_step,
+ pdg_R->pd_step,
+ pdg_L->pd_pwr,
+ pdg_R->pd_pwr);
+
+ /* Don't go too low because we will
+ * miss the upper part of the curve.
+ * Note: 126 = 31.5dB (max power supported)
+ * in 0.25dB units */
+ if (table_max[pdg] - table_min[pdg] > 126)
+ table_min[pdg] = table_max[pdg] - 126;
+ }
+
+ /* Fall through */
+ case AR5K_PWRTABLE_PWR_TO_PCDAC:
+ case AR5K_PWRTABLE_PWR_TO_PDADC:
+
+ ath5k_create_power_curve(table_min[pdg],
+ table_max[pdg],
+ pdg_L->pd_pwr,
+ pdg_L->pd_step,
+ pdg_L->pd_points, tmpL, type);
+
+ /* We are in a calibration
+ * pier, no need to interpolate
+ * between freq piers */
+ if (pcinfo_L == pcinfo_R)
+ continue;
+
+ ath5k_create_power_curve(table_min[pdg],
+ table_max[pdg],
+ pdg_R->pd_pwr,
+ pdg_R->pd_step,
+ pdg_R->pd_points, tmpR, type);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Interpolate between curves
+ * of surrounding freq piers to
+ * get the final curve for this
+ * pd gain. Re-use tmpL for interpolation
+ * output */
+ for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) &&
+ (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
+ tmpL[i] = (u8) ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ (s16) tmpL[i],
+ (s16) tmpR[i]);
+ }
+ }
+
+ /* Now we have a set of curves for this
+ * channel on tmpL (x range is table_max - table_min
+ * and y values are tmpL[pdg][]) sorted in the same
+ * order as EEPROM (because we've used the backmapping).
+ * So for RF5112 it's from higher power to lower power
+ * and for RF2413 it's from lower power to higher power.
+ * For RF5111 we only have one curve. */
+
+ /* Fill min and max power levels for this
+ * channel by interpolating the values on
+ * surrounding channels to complete the dataset */
+ ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ pcinfo_L->min_pwr, pcinfo_R->min_pwr);
+
+ ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target,
+ (s16) pcinfo_L->freq,
+ (s16) pcinfo_R->freq,
+ pcinfo_L->max_pwr, pcinfo_R->max_pwr);
+
+ /* Fill PCDAC/PDADC table */
+ switch (type) {
+ case AR5K_PWRTABLE_LINEAR_PCDAC:
+ /* For RF5112 we can have one or two curves
+ * and each curve covers a certain power lvl
+ * range so we need to do some more processing */
+ ath5k_combine_linear_pcdac_curves(ah, table_min, table_max,
+ ee->ee_pd_gains[ee_mode]);
+
+ /* Set txp.offset so that we can
+ * match max power value with max
+ * table index */
+ ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
+ break;
+ case AR5K_PWRTABLE_PWR_TO_PCDAC:
+ /* We are done for RF5111 since it has only
+ * one curve, just fit the curve on the table */
+ ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max);
+
+ /* No rate powertable adjustment for RF5111 */
+ ah->ah_txpower.txp_min_idx = 0;
+ ah->ah_txpower.txp_offset = 0;
+ break;
+ case AR5K_PWRTABLE_PWR_TO_PDADC:
+ /* Set PDADC boundaries and fill
+ * final PDADC table */
+ ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
+ ee->ee_pd_gains[ee_mode]);
+
+ /* Set txp.offset, note that table_min
+ * can be negative */
+ ah->ah_txpower.txp_offset = table_min[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ah->ah_txpower.txp_setup = true;
+
+ return 0;
+}
+
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
+static void
+ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
+{
+ if (type == AR5K_PWRTABLE_PWR_TO_PDADC)
+ ath5k_write_pwr_to_pdadc_table(ah, ee_mode);
+ else
+ ath5k_write_pcdac_table(ah);
+}
+
+
+/**
+ * DOC: Per-rate tx power setting
+ *
+ * This is the code that sets the desired tx power limit (below
+ * maximum) on hw for each rate (we also have TPC that sets
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
+ *
+ * For now we only limit txpower based on maximum tx power
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
+ *
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
+ * rates[0] - rates[7] -> OFDM rates
+ * rates[8] - rates[14] -> CCK rates
+ * rates[15] -> XR rates (they all have the same power)
+ */
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
+ struct ath5k_rate_pcal_info *rate_info,
+ u8 ee_mode)
+{
+ unsigned int i;
+ u16 *rates;
+ s16 rate_idx_scaled = 0;
+
+ /* max_pwr is power level we got from driver/user in 0.5dB
+ * units, switch to 0.25dB units so we can compare */
+ max_pwr *= 2;
+ max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2;
+
+ /* apply rate limits */
+ rates = ah->ah_txpower.txp_rates_power_table;
+
+ /* OFDM rates 6 to 24Mb/s */
+ for (i = 0; i < 5; i++)
+ rates[i] = min(max_pwr, rate_info->target_power_6to24);
+
+ /* Rest OFDM rates */
+ rates[5] = min(rates[0], rate_info->target_power_36);
+ rates[6] = min(rates[0], rate_info->target_power_48);
+ rates[7] = min(rates[0], rate_info->target_power_54);
+
+ /* CCK rates */
+ /* 1L */
+ rates[8] = min(rates[0], rate_info->target_power_6to24);
+ /* 2L */
+ rates[9] = min(rates[0], rate_info->target_power_36);
+ /* 2S */
+ rates[10] = min(rates[0], rate_info->target_power_36);
+ /* 5L */
+ rates[11] = min(rates[0], rate_info->target_power_48);
+ /* 5S */
+ rates[12] = min(rates[0], rate_info->target_power_48);
+ /* 11L */
+ rates[13] = min(rates[0], rate_info->target_power_54);
+ /* 11S */
+ rates[14] = min(rates[0], rate_info->target_power_54);
+
+ /* XR rates */
+ rates[15] = min(rates[0], rate_info->target_power_6to24);
+
+ /* CCK rates have different peak to average ratio
+ * so we have to tweak their power so that gainf
+ * correction works ok. For this we use OFDM to
+ * CCK delta from eeprom */
+ if ((ee_mode == AR5K_EEPROM_MODE_11G) &&
+ (ah->ah_phy_revision < AR5K_SREV_PHY_5212A))
+ for (i = 8; i <= 15; i++)
+ rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
+
+ /* Save min/max and current tx power for this channel
+ * in 0.25dB units.
+ *
+ * Note: We use rates[0] for current tx power because
+ * it covers most of the rates, in most cases. It's our
+ * tx power limit and what the user expects to see. */
+ ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+ ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+ /* Set max txpower for correct OFDM operation on all rates
+ * -that is the txpower for 54Mbit-, it's used for the PAPD
+ * gain probe and it's in 0.5dB units */
+ ah->ah_txpower.txp_ofdm = rates[7];
+
+ /* Now that we have all rates setup use table offset to
+ * match the power range set by user with the power indices
+ * on PCDAC/PDADC table */
+ for (i = 0; i < 16; i++) {
+ rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
+ /* Don't get out of bounds */
+ if (rate_idx_scaled > 63)
+ rate_idx_scaled = 63;
+ if (rate_idx_scaled < 0)
+ rate_idx_scaled = 0;
+ rates[i] = rate_idx_scaled;
+ }
+}
+
+
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
+ */
+static int
+ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 txpower)
+{
+ struct ath5k_rate_pcal_info rate_info;
+ struct ieee80211_channel *curr_channel = ah->ah_current_channel;
+ int ee_mode;
+ u8 type;
+ int ret;
+
+ if (txpower > AR5K_TUNE_MAX_TXPOWER) {
+ ATH5K_ERR(ah, "invalid tx power: %u\n", txpower);
+ return -EINVAL;
+ }
+
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
+
+ /* Initialize TX power table */
+ switch (ah->ah_radio) {
+ case AR5K_RF5110:
+ /* TODO */
+ return 0;
+ case AR5K_RF5111:
+ type = AR5K_PWRTABLE_PWR_TO_PCDAC;
+ break;
+ case AR5K_RF5112:
+ type = AR5K_PWRTABLE_LINEAR_PCDAC;
+ break;
+ case AR5K_RF2413:
+ case AR5K_RF5413:
+ case AR5K_RF2316:
+ case AR5K_RF2317:
+ case AR5K_RF2425:
+ type = AR5K_PWRTABLE_PWR_TO_PDADC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * If we don't change channel/mode skip tx powertable calculation
+ * and use the cached one.
+ */
+ if (!ah->ah_txpower.txp_setup ||
+ (channel->hw_value != curr_channel->hw_value) ||
+ (channel->center_freq != curr_channel->center_freq)) {
+ /* Reset TX power values but preserve requested
+ * tx power from above */
+ int requested_txpower = ah->ah_txpower.txp_requested;
+
+ memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+ /* Restore TPC setting and requested tx power */
+ ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+
+ ah->ah_txpower.txp_requested = requested_txpower;
+
+ /* Calculate the powertable */
+ ret = ath5k_setup_channel_powertable(ah, channel,
+ ee_mode, type);
+ if (ret)
+ return ret;
+ }
+
+ /* Write table on hw */
+ ath5k_write_channel_powertable(ah, ee_mode, type);
+
+ /* Limit max power if we have a CTL available */
+ ath5k_get_max_ctl_power(ah, channel);
+
+ /* FIXME: Antenna reduction stuff */
+
+ /* FIXME: Limit power on turbo modes */
+
+ /* FIXME: TPC scale reduction */
+
+ /* Get surrounding channels for per-rate power table
+ * calibration */
+ ath5k_get_rate_pcal_data(ah, channel, &rate_info);
+
+ /* Setup rate power table */
+ ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode);
+
+ /* Write rate power table on hw */
+ ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) |
+ AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) |
+ AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1);
+
+ ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(7, 24) |
+ AR5K_TXPOWER_OFDM(6, 16) | AR5K_TXPOWER_OFDM(5, 8) |
+ AR5K_TXPOWER_OFDM(4, 0), AR5K_PHY_TXPOWER_RATE2);
+
+ ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(10, 24) |
+ AR5K_TXPOWER_CCK(9, 16) | AR5K_TXPOWER_CCK(15, 8) |
+ AR5K_TXPOWER_CCK(8, 0), AR5K_PHY_TXPOWER_RATE3);
+
+ ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(14, 24) |
+ AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
+ AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
+
+ /* FIXME: TPC support */
+ if (ah->ah_txpower.txp_tpc) {
+ ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
+ AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) |
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) |
+ AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
+ AR5K_TPC);
+ } else {
+ ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+ AR5K_PHY_TXPOWER_RATE_MAX);
+ }
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+{
+ ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
+ "changing txpower to %d\n", txpower);
+
+ return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
+}
+
+
+/*************\
+ Init function
+\*************/
+
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 mode, bool fast)
+{
+ struct ieee80211_channel *curr_channel;
+ int ret, i;
+ u32 phy_tst1;
+ ret = 0;
+
+ /*
+ * Sanity check for fast flag
+ * Don't try fast channel change when changing modulation
+ * mode/band. We check for chip compatibility on
+ * ath5k_hw_reset.
+ */
+ curr_channel = ah->ah_current_channel;
+ if (fast && (channel->hw_value != curr_channel->hw_value))
+ return -EINVAL;
+
+ /*
+ * On fast channel change we only set the synth parameters
+ * while PHY is running, enable calibration and skip the rest.
+ */
+ if (fast) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+ for (i = 0; i < 100; i++) {
+ if (ath5k_hw_reg_read(ah, AR5K_PHY_RFBUS_GRANT))
+ break;
+ udelay(5);
+ }
+ /* Failed */
+ if (i >= 100)
+ return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
+ }
+
+ /*
+ * Set TX power
+ *
+ * Note: We need to do that before we set
+ * RF buffer settings on 5211/5212+ so that we
+ * properly set curve indices.
+ */
+ ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+ ah->ah_txpower.txp_requested * 2 :
+ AR5K_TUNE_MAX_TXPOWER);
+ if (ret)
+ return ret;
+
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value != AR5K_MODE_11B) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
+ /*
+ * For 5210 we do all initialization using
+ * initvals, so we don't have to modify
+ * any settings (5210 also only supports
+ * a/aturbo modes)
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+
+ /*
+ * Write initial RF gain settings
+ * This should work for both 5111/5112
+ */
+ ret = ath5k_hw_rfgain_init(ah, channel->band);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1500);
+
+ /*
+ * Write RF buffer
+ */
+ ret = ath5k_hw_rfregs_init(ah, channel, mode);
+ if (ret)
+ return ret;
+
+ /*Enable/disable 802.11b mode on 5111
+ (enable 2111 frequency converter + CCK)*/
+ if (ah->ah_radio == AR5K_RF5111) {
+ if (mode == AR5K_MODE_11B)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_B_MODE);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_B_MODE);
+ }
+
+ } else if (ah->ah_version == AR5K_AR5210) {
+ usleep_range(1000, 1500);
+ /* Disable phy and wait */
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
+ usleep_range(1000, 1500);
+ }
+
+ /* Set channel on PHY */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable the PHY and wait until completion
+ * This includes BaseBand and Synthesizer
+ * activation.
+ */
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+
+ ath5k_hw_wait_for_synth(ah, channel);
+
+ /*
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
+ */
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ usleep_range(200, 250);
+ }
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+
+ /*
+ * Start automatic gain control calibration
+ *
+ * During AGC calibration RX path is re-routed to
+ * a power detector so we don't receive anything.
+ *
+ * This method is used to calibrate some static offsets
+ * used together with on-the fly I/Q calibration (the
+ * one performed via ath5k_hw_phy_calibrate), which doesn't
+ * interrupt rx path.
+ *
+ * While rx path is re-routed to the power detector we also
+ * start a noise floor calibration to measure the
+ * card's noise floor (the noise we measure when we are not
+ * transmitting or receiving anything).
+ *
+ * If we are in a noisy environment, AGC calibration may time
+ * out and/or noise floor calibration might timeout.
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
+
+ /* At the same time start I/Q calibration for QAM constellation
+ * -no need for CCK- */
+ ah->ah_iq_cal_needed = false;
+ if (!(mode == AR5K_MODE_11B)) {
+ ah->ah_iq_cal_needed = true;
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_RUN);
+ }
+
+ /* Wait for gain calibration to finish (we check for I/Q calibration
+ * during ath5k_phy_calibrate) */
+ if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL, 0, false)) {
+ ATH5K_ERR(ah, "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ /* Restore antenna mode */
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
new file mode 100644
index 000000000..ddaad712c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/********************************************\
+Queue Control Unit, DCF Control Unit Functions
+\********************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
+
+
+/******************\
+* Helper functions *
+\******************/
+
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ */
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+{
+ u32 pending;
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /* Return if queue is declared inactive */
+ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return false;
+
+ /* XXX: How about AR5K_CFG_TXCNT ? */
+ if (ah->ah_version == AR5K_AR5210)
+ return false;
+
+ pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
+ pending &= AR5K_QCU_STS_FRMPENDCNT;
+
+ /* It's possible to have no frames pending even if TXE
+ * is set. To indicate that q has not stopped return
+ * true */
+ if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ return true;
+
+ return pending;
+}
+
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ */
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
+ return;
+
+ /* This queue will be skipped in further operations */
+ ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
+ /*For SIMR setup*/
+ AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
+}
+
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
+ * Make sure cw is a power of 2 minus 1 and smaller than 1024
+ */
+static u16
+ath5k_cw_validate(u16 cw_req)
+{
+ cw_req = min(cw_req, (u16)1023);
+
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
+
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
+}
+
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
+ */
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info)
+{
+ memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
+ */
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *qinfo)
+{
+ struct ath5k_txq_info *qi;
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ qi = &ah->ah_txq[queue];
+
+ if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return -EIO;
+
+ /* copy and validate values */
+ qi->tqi_type = qinfo->tqi_type;
+ qi->tqi_subtype = qinfo->tqi_subtype;
+ qi->tqi_flags = qinfo->tqi_flags;
+ /*
+ * According to the docs: Although the AIFS field is 8 bit wide,
+ * the maximum supported value is 0xFC. Setting it higher than that
+ * will cause the DCU to hang.
+ */
+ qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
+ qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
+ qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
+ qi->tqi_cbr_period = qinfo->tqi_cbr_period;
+ qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
+ qi->tqi_burst_time = qinfo->tqi_burst_time;
+ qi->tqi_ready_time = qinfo->tqi_ready_time;
+
+ /*XXX: Is this supported on 5210 ?*/
+ /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
+ if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
+ ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
+ (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
+ qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
+ qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
+ */
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info)
+{
+ unsigned int queue;
+ int ret;
+
+ /*
+ * Get queue by type
+ */
+ /* 5210 only has 2 queues */
+ if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
+ switch (queue_type) {
+ case AR5K_TX_QUEUE_DATA:
+ queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ case AR5K_TX_QUEUE_CAB:
+ queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (queue_type) {
+ case AR5K_TX_QUEUE_DATA:
+ queue = queue_info->tqi_subtype;
+ break;
+ case AR5K_TX_QUEUE_UAPSD:
+ queue = AR5K_TX_QUEUE_ID_UAPSD;
+ break;
+ case AR5K_TX_QUEUE_BEACON:
+ queue = AR5K_TX_QUEUE_ID_BEACON;
+ break;
+ case AR5K_TX_QUEUE_CAB:
+ queue = AR5K_TX_QUEUE_ID_CAB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Setup internal queue structure
+ */
+ memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
+ ah->ah_txq[queue].tqi_type = queue_type;
+
+ if (queue_info != NULL) {
+ queue_info->tqi_type = queue_type;
+ ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We use ah_txq_status to hold a temp value for
+ * the Secondary interrupt mask registers on 5211+
+ * check out ath5k_hw_reset_tx_queue
+ */
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
+
+ return queue;
+}
+
+
+/*******************************\
+* Single QCU/DCU initialization *
+\*******************************/
+
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
+ */
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue)
+{
+ /* Single data queue on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ struct ath5k_txq_info *tq = &ah->ah_txq[queue];
+
+ if (queue > 0)
+ return;
+
+ ath5k_hw_reg_write(ah,
+ (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_LG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SH_RETRY),
+ AR5K_NODCU_RETRY_LMT);
+ /* DCU on AR5211+ */
+ } else {
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_RTS)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_STA_RTS)
+ | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+ AR5K_DCU_RETRY_LMT_STA_DATA),
+ AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+ }
+}
+
+/**
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * Set DCF properties for the given transmit queue on DCU
+ * and configures all queue-specific parameters.
+ */
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ struct ath5k_txq_info *tq = &ah->ah_txq[queue];
+
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ tq = &ah->ah_txq[queue];
+
+ /* Skip if queue inactive or if we are on AR5210
+ * that doesn't have QCU/DCU */
+ if ((ah->ah_version == AR5K_AR5210) ||
+ (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
+ return 0;
+
+ /*
+ * Set contention window (cw_min/cw_max)
+ * and arbitrated interframe space (aifs)...
+ */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
+ AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
+ AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
+ AR5K_QUEUE_DFS_LOCAL_IFS(queue));
+
+ /*
+ * Set tx retry limits for this queue
+ */
+ ath5k_hw_set_tx_retry_limits(ah, queue);
+
+
+ /*
+ * Set misc registers
+ */
+
+ /* Enable DCU to wait for next fragment from QCU */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_FRAG_WAIT);
+
+ /* On Maui and Spirit use the global seqnum on DCU */
+ if (ah->ah_mac_version < AR5K_SREV_AR5211)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_SEQNUM_CTL);
+
+ /* Constant bit rate period */
+ if (tq->tqi_cbr_period) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
+ AR5K_QCU_CBRCFG_INTVAL) |
+ AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
+ AR5K_QCU_CBRCFG_ORN_THRES),
+ AR5K_QUEUE_CBRCFG(queue));
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_FRSHED_CBR);
+
+ if (tq->tqi_cbr_overflow_limit)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_CBR_THRES_ENABLE);
+ }
+
+ /* Ready time interval */
+ if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
+ AR5K_QCU_RDYTIMECFG_INTVAL) |
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
+
+ if (tq->tqi_burst_time) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
+ AR5K_DCU_CHAN_TIME_DUR) |
+ AR5K_DCU_CHAN_TIME_ENABLE,
+ AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_RDY_VEOL_POLICY);
+ }
+
+ /* Enable/disable Post frame backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
+ AR5K_QUEUE_DFS_MISC(queue));
+
+ /* Enable/disable fragmentation burst backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
+ AR5K_QUEUE_DFS_MISC(queue));
+
+ /*
+ * Set registers by queue type
+ */
+ switch (tq->tqi_type) {
+ case AR5K_TX_QUEUE_BEACON:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
+ AR5K_QCU_MISC_CBREXP_BCN_DIS |
+ AR5K_QCU_MISC_BCN_ENABLE);
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+ AR5K_DCU_MISC_ARBLOCK_CTL_S) |
+ AR5K_DCU_MISC_ARBLOCK_IGNORE |
+ AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
+ AR5K_DCU_MISC_BCN_ENABLE);
+ break;
+
+ case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
+ AR5K_QCU_MISC_CBREXP_DIS |
+ AR5K_QCU_MISC_CBREXP_BCN_DIS);
+
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
+ (AR5K_TUNE_SW_BEACON_RESP -
+ AR5K_TUNE_DMA_BEACON_RESP) -
+ AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
+
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+ AR5K_DCU_MISC_ARBLOCK_CTL_S));
+ break;
+
+ case AR5K_TX_QUEUE_UAPSD:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_CBREXP_DIS);
+ break;
+
+ case AR5K_TX_QUEUE_DATA:
+ default:
+ break;
+ }
+
+ /* TODO: Handle frame compression */
+
+ /*
+ * Enable interrupts for this tx queue
+ * in the secondary interrupt mask registers
+ */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
+
+ /* Update secondary interrupt mask registers */
+
+ /* Filter out inactive queues */
+ ah->ah_txq_imr_txok &= ah->ah_txq_status;
+ ah->ah_txq_imr_txerr &= ah->ah_txq_status;
+ ah->ah_txq_imr_txurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
+ ah->ah_txq_imr_txeol &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
+ ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
+ AR5K_SIMR0_QCU_TXOK) |
+ AR5K_REG_SM(ah->ah_txq_imr_txdesc,
+ AR5K_SIMR0_QCU_TXDESC),
+ AR5K_SIMR0);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
+ AR5K_SIMR1_QCU_TXERR) |
+ AR5K_REG_SM(ah->ah_txq_imr_txeol,
+ AR5K_SIMR1_QCU_TXEOL),
+ AR5K_SIMR1);
+
+ /* Update SIMR2 but don't overwrite rest simr2 settings */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
+ AR5K_REG_SM(ah->ah_txq_imr_txurn,
+ AR5K_SIMR2_QCU_TXURN));
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
+ AR5K_SIMR3_QCBRORN) |
+ AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
+ AR5K_SIMR3_QCBRURN),
+ AR5K_SIMR3);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
+ AR5K_SIMR4_QTRIG), AR5K_SIMR4);
+
+ /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
+ AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
+
+ /* No queue has TXNOFRM enabled, disable the interrupt
+ * by setting AR5K_TXNOFRM to zero */
+ if (ah->ah_txq_imr_nofrm == 0)
+ ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
+
+ /* Set QCU mask for this DCU to save power */
+ AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
+
+ return 0;
+}
+
+
+/**************************\
+* Global QCU/DCU functions *
+\**************************/
+
+/**
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
+ *
+ * Sets the global IFS intervals on DCU (also works on AR5210) for
+ * the given slot time and the current bwmode.
+ */
+int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rate *rate;
+ u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+ u32 rate_flags, i;
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
+ return -EINVAL;
+
+ sifs = ath5k_hw_get_default_sifs(ah);
+ sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
+
+ /* EIFS
+ * Txtime of ack at lowest rate + SIFS + DIFS
+ * (DIFS = SIFS + 2 * Slot time)
+ *
+ * Note: HAL has some predefined values for EIFS
+ * Turbo: (37 + 2 * 6)
+ * Default: (74 + 2 * 9)
+ * Half: (149 + 2 * 13)
+ * Quarter: (298 + 2 * 21)
+ *
+ * (74 + 2 * 6) for AR5210 default and turbo !
+ *
+ * According to the formula we have
+ * ack_tx_time = 25 for turbo and
+ * ack_tx_time = 42.5 * clock multiplier
+ * for default/half/quarter.
+ *
+ * This can't be right, 42 is what we would get
+ * from ath5k_hw_get_frame_dur_for_bwmode or
+ * ieee80211_generic_frame_duration for zero frame
+ * length and without SIFS !
+ *
+ * Also we have different lowest rate for 802.11a
+ */
+ if (channel->band == IEEE80211_BAND_5GHZ)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
+ break;
+ default:
+ rate_flags = 0;
+ break;
+ }
+ sband = &ah->sbands[band];
+ rate = NULL;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rate = &sband->bitrates[i];
+ break;
+ }
+ if (WARN_ON(!rate))
+ return -EINVAL;
+
+ ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
+
+ /* ack_tx_time includes an SIFS already */
+ eifs = ack_tx_time + sifs + 2 * slot_time;
+ eifs_clock = ath5k_hw_htoclock(ah, eifs);
+
+ /* Set IFS settings on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ u32 pifs, pifs_clock, difs, difs_clock;
+
+ /* Set slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
+
+ /* Set EIFS */
+ eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
+
+ /* PIFS = Slot time + SIFS */
+ pifs = slot_time + sifs;
+ pifs_clock = ath5k_hw_htoclock(ah, pifs);
+ pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
+
+ /* DIFS = SIFS + 2 * Slot time */
+ difs = sifs + 2 * slot_time;
+ difs_clock = ath5k_hw_htoclock(ah, difs);
+
+ /* Set SIFS/DIFS */
+ ath5k_hw_reg_write(ah, (difs_clock <<
+ AR5K_IFS0_DIFS_S) | sifs_clock,
+ AR5K_IFS0);
+
+ /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
+ ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
+ (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
+ AR5K_IFS1);
+
+ return 0;
+ }
+
+ /* Set IFS slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
+
+ /* Set EIFS interval */
+ ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
+
+ /* Set SIFS interval in usecs */
+ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
+ sifs);
+
+ /* Set SIFS interval in clock cycles */
+ ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
+
+ return 0;
+}
+
+
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
+{
+ int i, ret;
+
+ /* TODO: HW Compression support for data queues */
+ /* TODO: Burst prefetch for data queues */
+
+ /*
+ * Reset queues and start beacon timers at the end of the reset routine
+ * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
+ * Note: If we want we can assign multiple qcus on one dcu.
+ */
+ if (ah->ah_version != AR5K_AR5210)
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
+ ret = ath5k_hw_reset_tx_queue(ah, i);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "failed to reset TX queue #%d\n", i);
+ return ret;
+ }
+ }
+ else
+ /* No QCU/DCU on AR5210, just set tx
+ * retry limits. We set IFS parameters
+ * on ath5k_hw_set_ifs_intervals */
+ ath5k_hw_set_tx_retry_limits(ah, 0);
+
+ /* Set the turbo flag when operating on 40MHz */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
+
+ /* If we didn't set IFS timings through
+ * ath5k_hw_set_coverage_class make sure
+ * we set them here */
+ if (!ah->ah_coverage_class) {
+ unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
new file mode 100644
index 000000000..0ea1608b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -0,0 +1,2604 @@
+/*
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2007-2008 Michael Taylor <mike.taylor@apprion.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*
+ * Register values for Atheros 5210/5211/5212 cards from OpenBSD's ar5k
+ * maintained by Reyk Floeter
+ *
+ * I tried to document those registers by looking at ar5k code, some
+ * 802.11 (802.11e mostly) papers and by reading various public available
+ * Atheros presentations and papers like these:
+ *
+ * 5210 - http://nova.stanford.edu/~bbaas/ps/isscc2002_slides.pdf
+ *
+ * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf
+ *
+ * This file also contains register values found on a memory dump of
+ * Atheros's ART program (Atheros Radio Test), on ath9k, on legacy-hal
+ * released by Atheros and on various debug messages found on the net.
+ */
+
+#include "../reg.h"
+
+/*====MAC DMA REGISTERS====*/
+
+/*
+ * AR5210-Specific TXDP registers
+ * 5210 has only 2 transmit queues so no DCU/QCU, just
+ * 2 transmit descriptor pointers...
+ */
+#define AR5K_NOQCU_TXDP0 0x0000 /* Queue 0 - data */
+#define AR5K_NOQCU_TXDP1 0x0004 /* Queue 1 - beacons */
+
+/*
+ * Mac Control Register
+ */
+#define AR5K_CR 0x0008 /* Register Address */
+#define AR5K_CR_TXE0 0x00000001 /* TX Enable for queue 0 on 5210 */
+#define AR5K_CR_TXE1 0x00000002 /* TX Enable for queue 1 on 5210 */
+#define AR5K_CR_RXE 0x00000004 /* RX Enable */
+#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */
+#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */
+#define AR5K_CR_RXD 0x00000020 /* RX Disable */
+#define AR5K_CR_SWI 0x00000040 /* Software Interrupt */
+
+/*
+ * RX Descriptor Pointer register
+ */
+#define AR5K_RXDP 0x000c
+
+/*
+ * Configuration and status register
+ */
+#define AR5K_CFG 0x0014 /* Register Address */
+#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */
+#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer */
+#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
+#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
+#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
+#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */
+#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
+#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
+#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
+#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */
+#define AR5K_CFG_TXCNT_S 11
+#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */
+#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */
+#define AR5K_CFG_PCI_THRES 0x00060000 /* PCI Master req q threshold [5211+] */
+#define AR5K_CFG_PCI_THRES_S 17
+
+/*
+ * Interrupt enable register
+ */
+#define AR5K_IER 0x0024 /* Register Address */
+#define AR5K_IER_DISABLE 0x00000000 /* Disable card interrupts */
+#define AR5K_IER_ENABLE 0x00000001 /* Enable card interrupts */
+
+
+/*
+ * 0x0028 is Beacon Control Register on 5210
+ * and first RTS duration register on 5211
+ */
+
+/*
+ * Beacon control register [5210]
+ */
+#define AR5K_BCR 0x0028 /* Register Address */
+#define AR5K_BCR_AP 0x00000000 /* AP mode */
+#define AR5K_BCR_ADHOC 0x00000001 /* Ad-Hoc mode */
+#define AR5K_BCR_BDMAE 0x00000002 /* DMA enable */
+#define AR5K_BCR_TQ1FV 0x00000004 /* Use Queue1 for CAB traffic */
+#define AR5K_BCR_TQ1V 0x00000008 /* Use Queue1 for Beacon traffic */
+#define AR5K_BCR_BCGET 0x00000010
+
+/*
+ * First RTS duration register [5211]
+ */
+#define AR5K_RTSD0 0x0028 /* Register Address */
+#define AR5K_RTSD0_6 0x000000ff /* 6Mb RTS duration mask (?) */
+#define AR5K_RTSD0_6_S 0 /* 6Mb RTS duration shift (?) */
+#define AR5K_RTSD0_9 0x0000ff00 /* 9Mb*/
+#define AR5K_RTSD0_9_S 8
+#define AR5K_RTSD0_12 0x00ff0000 /* 12Mb*/
+#define AR5K_RTSD0_12_S 16
+#define AR5K_RTSD0_18 0xff000000 /* 16Mb*/
+#define AR5K_RTSD0_18_S 24
+
+
+/*
+ * 0x002c is Beacon Status Register on 5210
+ * and second RTS duration register on 5211
+ */
+
+/*
+ * Beacon status register [5210]
+ *
+ * As i can see in ar5k_ar5210_tx_start Reyk uses some of the values of BCR
+ * for this register, so i guess TQ1V,TQ1FV and BDMAE have the same meaning
+ * here and SNP/SNAP means "snapshot" (so this register gets synced with BCR).
+ * So SNAPPEDBCRVALID should also stand for "snapped BCR -values- valid", so i
+ * renamed it to SNAPSHOTSVALID to make more sense. I really have no idea what
+ * else can it be. I also renamed SNPBCMD to SNPADHOC to match BCR.
+ */
+#define AR5K_BSR 0x002c /* Register Address */
+#define AR5K_BSR_BDLYSW 0x00000001 /* SW Beacon delay (?) */
+#define AR5K_BSR_BDLYDMA 0x00000002 /* DMA Beacon delay (?) */
+#define AR5K_BSR_TXQ1F 0x00000004 /* Beacon queue (1) finished */
+#define AR5K_BSR_ATIMDLY 0x00000008 /* ATIM delay (?) */
+#define AR5K_BSR_SNPADHOC 0x00000100 /* Ad-hoc mode set (?) */
+#define AR5K_BSR_SNPBDMAE 0x00000200 /* Beacon DMA enabled (?) */
+#define AR5K_BSR_SNPTQ1FV 0x00000400 /* Queue1 is used for CAB traffic (?) */
+#define AR5K_BSR_SNPTQ1V 0x00000800 /* Queue1 is used for Beacon traffic (?) */
+#define AR5K_BSR_SNAPSHOTSVALID 0x00001000 /* BCR snapshots are valid (?) */
+#define AR5K_BSR_SWBA_CNT 0x00ff0000
+
+/*
+ * Second RTS duration register [5211]
+ */
+#define AR5K_RTSD1 0x002c /* Register Address */
+#define AR5K_RTSD1_24 0x000000ff /* 24Mb */
+#define AR5K_RTSD1_24_S 0
+#define AR5K_RTSD1_36 0x0000ff00 /* 36Mb */
+#define AR5K_RTSD1_36_S 8
+#define AR5K_RTSD1_48 0x00ff0000 /* 48Mb */
+#define AR5K_RTSD1_48_S 16
+#define AR5K_RTSD1_54 0xff000000 /* 54Mb */
+#define AR5K_RTSD1_54_S 24
+
+
+/*
+ * Transmit configuration register
+ */
+#define AR5K_TXCFG 0x0030 /* Register Address */
+#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size (read) */
+#define AR5K_TXCFG_SDMAMR_S 0
+#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
+#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
+#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Trigger level mask */
+#define AR5K_TXCFG_TXFULL_S 4
+#define AR5K_TXCFG_TXFULL_0B 0x00000000
+#define AR5K_TXCFG_TXFULL_64B 0x00000010
+#define AR5K_TXCFG_TXFULL_128B 0x00000020
+#define AR5K_TXCFG_TXFULL_192B 0x00000030
+#define AR5K_TXCFG_TXFULL_256B 0x00000040
+#define AR5K_TXCFG_TXCONT_EN 0x00000080
+#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */
+#define AR5K_TXCFG_JUMBO_DESC_EN 0x00000400 /* Enable jumbo tx descriptors [5211+] */
+#define AR5K_TXCFG_ADHOC_BCN_ATIM 0x00000800 /* Adhoc Beacon ATIM Policy */
+#define AR5K_TXCFG_ATIM_WINDOW_DEF_DIS 0x00001000 /* Disable ATIM window defer [5211+] */
+#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */
+#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
+#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */
+#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */
+#define AR5K_TXCFG_DCU_DBL_BUF_DIS 0x00008000 /* Disable double buffering on DCU */
+#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */
+
+/*
+ * Receive configuration register
+ */
+#define AR5K_RXCFG 0x0034 /* Register Address */
+#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size (write) */
+#define AR5K_RXCFG_SDMAMW_S 0
+#define AR5K_RXCFG_ZLFDMA 0x00000008 /* Enable Zero-length frame DMA */
+#define AR5K_RXCFG_DEF_ANTENNA 0x00000010 /* Default antenna (?) */
+#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo rx descriptors [5211+] */
+#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames [5211+] */
+#define AR5K_RXCFG_SLE_ENTRY 0x00000080 /* Sleep entry policy */
+
+/*
+ * Receive jumbo descriptor last address register
+ * Only found in 5211 (?)
+ */
+#define AR5K_RXJLA 0x0038
+
+/*
+ * MIB control register
+ */
+#define AR5K_MIBC 0x0040 /* Register Address */
+#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
+#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
+#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
+
+/*
+ * Timeout prescale register
+ */
+#define AR5K_TOPS 0x0044
+#define AR5K_TOPS_M 0x0000ffff
+
+/*
+ * Receive timeout register (no frame received)
+ */
+#define AR5K_RXNOFRM 0x0048
+#define AR5K_RXNOFRM_M 0x000003ff
+
+/*
+ * Transmit timeout register (no frame sent)
+ */
+#define AR5K_TXNOFRM 0x004c
+#define AR5K_TXNOFRM_M 0x000003ff
+#define AR5K_TXNOFRM_QCU 0x000ffc00
+#define AR5K_TXNOFRM_QCU_S 10
+
+/*
+ * Receive frame gap timeout register
+ */
+#define AR5K_RPGTO 0x0050
+#define AR5K_RPGTO_M 0x000003ff
+
+/*
+ * Receive frame count limit register
+ */
+#define AR5K_RFCNT 0x0054
+#define AR5K_RFCNT_M 0x0000001f /* [5211+] (?) */
+#define AR5K_RFCNT_RFCL 0x0000000f /* [5210] */
+
+/*
+ * Misc settings register
+ * (reserved0-3)
+ */
+#define AR5K_MISC 0x0058 /* Register Address */
+#define AR5K_MISC_DMA_OBS_M 0x000001e0
+#define AR5K_MISC_DMA_OBS_S 5
+#define AR5K_MISC_MISC_OBS_M 0x00000e00
+#define AR5K_MISC_MISC_OBS_S 9
+#define AR5K_MISC_MAC_OBS_LSB_M 0x00007000
+#define AR5K_MISC_MAC_OBS_LSB_S 12
+#define AR5K_MISC_MAC_OBS_MSB_M 0x00038000
+#define AR5K_MISC_MAC_OBS_MSB_S 15
+#define AR5K_MISC_LED_DECAY 0x001c0000 /* [5210] */
+#define AR5K_MISC_LED_BLINK 0x00e00000 /* [5210] */
+
+/*
+ * QCU/DCU clock gating register (5311)
+ * (reserved4-5)
+ */
+#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */
+#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */
+#define AR5K_QCUDCU_CLKGT_DCU 0x07ff0000 /* Mask for DCU clock */
+
+/*
+ * Interrupt Status Registers
+ *
+ * For 5210 there is only one status register but for
+ * 5211/5212 we have one primary and 4 secondary registers.
+ * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
+ * Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
+ */
+#define AR5K_ISR 0x001c /* Register Address [5210] */
+#define AR5K_PISR 0x0080 /* Register Address [5211+] */
+#define AR5K_ISR_RXOK 0x00000001 /* Frame successfully received */
+#define AR5K_ISR_RXDESC 0x00000002 /* RX descriptor request */
+#define AR5K_ISR_RXERR 0x00000004 /* Receive error */
+#define AR5K_ISR_RXNOFRM 0x00000008 /* No frame received (receive timeout) */
+#define AR5K_ISR_RXEOL 0x00000010 /* Empty RX descriptor */
+#define AR5K_ISR_RXORN 0x00000020 /* Receive FIFO overrun */
+#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
+#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
+#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
+ * NOTE: We don't have per-queue info for this
+ * one, but we can enable it per-queue through
+ * TXNOFRM_QCU field on TXNOFRM register */
+#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
+#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
+#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
+#define AR5K_ISR_SWI 0x00002000 /* Software interrupt */
+#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */
+#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */
+#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
+#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
+#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
+#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
+ * 'or' of MCABT, SSERR, DPERR from SISR2 */
+#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
+#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
+#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
+#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
+#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
+#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
+#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
+ * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
+#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
+#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
+#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+
+#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+ AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+ AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+ AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+ AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
+/*
+ * Secondary status registers [5211+] (0 - 4)
+ *
+ * These give the status for each QCU, only QCUs 0-9 are
+ * represented.
+ */
+#define AR5K_SISR0 0x0084 /* Register Address [5211+] */
+#define AR5K_SISR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */
+#define AR5K_SISR0_QCU_TXOK_S 0
+#define AR5K_SISR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */
+#define AR5K_SISR0_QCU_TXDESC_S 16
+
+#define AR5K_SISR1 0x0088 /* Register Address [5211+] */
+#define AR5K_SISR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */
+#define AR5K_SISR1_QCU_TXERR_S 0
+#define AR5K_SISR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */
+#define AR5K_SISR1_QCU_TXEOL_S 16
+
+#define AR5K_SISR2 0x008c /* Register Address [5211+] */
+#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
+#define AR5K_SISR2_QCU_TXURN_S 0
+#define AR5K_SISR2_MCABT 0x00010000 /* Master Cycle Abort */
+#define AR5K_SISR2_SSERR 0x00020000 /* Signaled System Error */
+#define AR5K_SISR2_DPERR 0x00040000 /* Bus parity error */
+#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */
+#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */
+#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */
+#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
+#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
+#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
+#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
+
+#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
+#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
+#define AR5K_SISR3_QCBRORN_S 0
+#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */
+#define AR5K_SISR3_QCBRURN_S 16
+
+#define AR5K_SISR4 0x0094 /* Register Address [5211+] */
+#define AR5K_SISR4_QTRIG 0x000003ff /* Mask for QTRIG */
+#define AR5K_SISR4_QTRIG_S 0
+
+/*
+ * Shadow read-and-clear interrupt status registers [5211+]
+ */
+#define AR5K_RAC_PISR 0x00c0 /* Read and clear PISR */
+#define AR5K_RAC_SISR0 0x00c4 /* Read and clear SISR0 */
+#define AR5K_RAC_SISR1 0x00c8 /* Read and clear SISR1 */
+#define AR5K_RAC_SISR2 0x00cc /* Read and clear SISR2 */
+#define AR5K_RAC_SISR3 0x00d0 /* Read and clear SISR3 */
+#define AR5K_RAC_SISR4 0x00d4 /* Read and clear SISR4 */
+
+/*
+ * Interrupt Mask Registers
+ *
+ * As with ISRs 5210 has one IMR (AR5K_IMR) and 5211/5212 has one primary
+ * (AR5K_PIMR) and 4 secondary IMRs (AR5K_SIMRx). Note that ISR/IMR flags match.
+ */
+#define AR5K_IMR 0x0020 /* Register Address [5210] */
+#define AR5K_PIMR 0x00a0 /* Register Address [5211+] */
+#define AR5K_IMR_RXOK 0x00000001 /* Frame successfully received*/
+#define AR5K_IMR_RXDESC 0x00000002 /* RX descriptor request*/
+#define AR5K_IMR_RXERR 0x00000004 /* Receive error*/
+#define AR5K_IMR_RXNOFRM 0x00000008 /* No frame received (receive timeout)*/
+#define AR5K_IMR_RXEOL 0x00000010 /* Empty RX descriptor*/
+#define AR5K_IMR_RXORN 0x00000020 /* Receive FIFO overrun*/
+#define AR5K_IMR_TXOK 0x00000040 /* Frame successfully transmitted*/
+#define AR5K_IMR_TXDESC 0x00000080 /* TX descriptor request*/
+#define AR5K_IMR_TXERR 0x00000100 /* Transmit error*/
+#define AR5K_IMR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)*/
+#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/
+#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/
+#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/
+#define AR5K_IMR_SWI 0x00002000 /* Software interrupt */
+#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/
+#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */
+#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/
+#define AR5K_IMR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
+#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
+#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
+#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
+#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
+#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
+#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
+#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */
+#define AR5K_IMR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
+#define AR5K_IMR_TIM 0x00800000 /* [5211+] */
+#define AR5K_IMR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/
+#define AR5K_IMR_QCBRORN 0x02000000 /* QCU CBR overrun (?) [5211+] */
+#define AR5K_IMR_QCBRURN 0x04000000 /* QCU CBR underrun (?) [5211+] */
+#define AR5K_IMR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+
+/*
+ * Secondary interrupt mask registers [5211+] (0 - 4)
+ */
+#define AR5K_SIMR0 0x00a4 /* Register Address [5211+] */
+#define AR5K_SIMR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */
+#define AR5K_SIMR0_QCU_TXOK_S 0
+#define AR5K_SIMR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */
+#define AR5K_SIMR0_QCU_TXDESC_S 16
+
+#define AR5K_SIMR1 0x00a8 /* Register Address [5211+] */
+#define AR5K_SIMR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */
+#define AR5K_SIMR1_QCU_TXERR_S 0
+#define AR5K_SIMR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */
+#define AR5K_SIMR1_QCU_TXEOL_S 16
+
+#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */
+#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
+#define AR5K_SIMR2_QCU_TXURN_S 0
+#define AR5K_SIMR2_MCABT 0x00010000 /* Master Cycle Abort */
+#define AR5K_SIMR2_SSERR 0x00020000 /* Signaled System Error */
+#define AR5K_SIMR2_DPERR 0x00040000 /* Bus parity error */
+#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */
+#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */
+#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */
+#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
+#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
+#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */
+#define AR5K_SIMR2_TSFOOR 0x80000000 /* TSF OOR (?) */
+
+#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */
+#define AR5K_SIMR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
+#define AR5K_SIMR3_QCBRORN_S 0
+#define AR5K_SIMR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */
+#define AR5K_SIMR3_QCBRURN_S 16
+
+#define AR5K_SIMR4 0x00b4 /* Register Address [5211+] */
+#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */
+#define AR5K_SIMR4_QTRIG_S 0
+
+/*
+ * DMA Debug registers 0-7
+ * 0xe0 - 0xfc
+ */
+
+/*
+ * Decompression mask registers [5212+]
+ */
+#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (index) */
+#define AR5K_DCM_DATA 0x0404 /*Decompression mask data */
+
+/*
+ * Wake On Wireless pattern control register [5212+]
+ */
+#define AR5K_WOW_PCFG 0x0410 /* Register Address */
+#define AR5K_WOW_PCFG_PAT_MATCH_EN 0x00000001 /* Pattern match enable */
+#define AR5K_WOW_PCFG_LONG_FRAME_POL 0x00000002 /* Long frame policy */
+#define AR5K_WOW_PCFG_WOBMISS 0x00000004 /* Wake on bea(con) miss (?) */
+#define AR5K_WOW_PCFG_PAT_0_EN 0x00000100 /* Enable pattern 0 */
+#define AR5K_WOW_PCFG_PAT_1_EN 0x00000200 /* Enable pattern 1 */
+#define AR5K_WOW_PCFG_PAT_2_EN 0x00000400 /* Enable pattern 2 */
+#define AR5K_WOW_PCFG_PAT_3_EN 0x00000800 /* Enable pattern 3 */
+#define AR5K_WOW_PCFG_PAT_4_EN 0x00001000 /* Enable pattern 4 */
+#define AR5K_WOW_PCFG_PAT_5_EN 0x00002000 /* Enable pattern 5 */
+
+/*
+ * Wake On Wireless pattern index register (?) [5212+]
+ */
+#define AR5K_WOW_PAT_IDX 0x0414
+
+/*
+ * Wake On Wireless pattern data register [5212+]
+ */
+#define AR5K_WOW_PAT_DATA 0x0418 /* Register Address */
+#define AR5K_WOW_PAT_DATA_0_3_V 0x00000001 /* Pattern 0, 3 value */
+#define AR5K_WOW_PAT_DATA_1_4_V 0x00000100 /* Pattern 1, 4 value */
+#define AR5K_WOW_PAT_DATA_2_5_V 0x00010000 /* Pattern 2, 5 value */
+#define AR5K_WOW_PAT_DATA_0_3_M 0x01000000 /* Pattern 0, 3 mask */
+#define AR5K_WOW_PAT_DATA_1_4_M 0x04000000 /* Pattern 1, 4 mask */
+#define AR5K_WOW_PAT_DATA_2_5_M 0x10000000 /* Pattern 2, 5 mask */
+
+/*
+ * Decompression configuration registers [5212+]
+ */
+#define AR5K_DCCFG 0x0420 /* Register Address */
+#define AR5K_DCCFG_GLOBAL_EN 0x00000001 /* Enable decompression on all queues */
+#define AR5K_DCCFG_BYPASS_EN 0x00000002 /* Bypass decompression */
+#define AR5K_DCCFG_BCAST_EN 0x00000004 /* Enable decompression for bcast frames */
+#define AR5K_DCCFG_MCAST_EN 0x00000008 /* Enable decompression for mcast frames */
+
+/*
+ * Compression configuration registers [5212+]
+ */
+#define AR5K_CCFG 0x0600 /* Register Address */
+#define AR5K_CCFG_WINDOW_SIZE 0x00000007 /* Compression window size */
+#define AR5K_CCFG_CPC_EN 0x00000008 /* Enable performance counters */
+
+#define AR5K_CCFG_CCU 0x0604 /* Register Address */
+#define AR5K_CCFG_CCU_CUP_EN 0x00000001 /* CCU Catchup enable */
+#define AR5K_CCFG_CCU_CREDIT 0x00000002 /* CCU Credit (field) */
+#define AR5K_CCFG_CCU_CD_THRES 0x00000080 /* CCU Cyc(lic?) debt threshold (field) */
+#define AR5K_CCFG_CCU_CUP_LCNT 0x00010000 /* CCU Catchup lit(?) count */
+#define AR5K_CCFG_CCU_INIT 0x00100200 /* Initial value during reset */
+
+/*
+ * Compression performance counter registers [5212+]
+ */
+#define AR5K_CPC0 0x0610 /* Compression performance counter 0 */
+#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/
+#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */
+#define AR5K_CPC3 0x061c /* Compression performance counter 3 */
+#define AR5K_CPCOVF 0x0620 /* Compression performance overflow */
+
+
+/*
+ * Queue control unit (QCU) registers [5211+]
+ *
+ * Card has 12 TX Queues but i see that only 0-9 are used (?)
+ * both in binary HAL (see ah.h) and ar5k. Each queue has it's own
+ * TXDP at addresses 0x0800 - 0x082c, a CBR (Constant Bit Rate)
+ * configuration register (0x08c0 - 0x08ec), a ready time configuration
+ * register (0x0900 - 0x092c), a misc configuration register (0x09c0 -
+ * 0x09ec) and a status register (0x0a00 - 0x0a2c). We also have some
+ * global registers, QCU transmit enable/disable and "one shot arm (?)"
+ * set/clear, which contain status for all queues (we shift by 1 for each
+ * queue). To access these registers easily we define some macros here
+ * that are used inside HAL. For more infos check out *_tx_queue functs.
+ */
+
+/*
+ * Generic QCU Register access macros
+ */
+#define AR5K_QUEUE_REG(_r, _q) (((_q) << 2) + _r)
+#define AR5K_QCU_GLOBAL_READ(_r, _q) (AR5K_REG_READ(_r) & (1 << _q))
+#define AR5K_QCU_GLOBAL_WRITE(_r, _q) AR5K_REG_WRITE(_r, (1 << _q))
+
+/*
+ * QCU Transmit descriptor pointer registers
+ */
+#define AR5K_QCU_TXDP_BASE 0x0800 /* Register Address - Queue0 TXDP */
+#define AR5K_QUEUE_TXDP(_q) AR5K_QUEUE_REG(AR5K_QCU_TXDP_BASE, _q)
+
+/*
+ * QCU Transmit enable register
+ */
+#define AR5K_QCU_TXE 0x0840
+#define AR5K_ENABLE_QUEUE(_q) AR5K_QCU_GLOBAL_WRITE(AR5K_QCU_TXE, _q)
+#define AR5K_QUEUE_ENABLED(_q) AR5K_QCU_GLOBAL_READ(AR5K_QCU_TXE, _q)
+
+/*
+ * QCU Transmit disable register
+ */
+#define AR5K_QCU_TXD 0x0880
+#define AR5K_DISABLE_QUEUE(_q) AR5K_QCU_GLOBAL_WRITE(AR5K_QCU_TXD, _q)
+#define AR5K_QUEUE_DISABLED(_q) AR5K_QCU_GLOBAL_READ(AR5K_QCU_TXD, _q)
+
+/*
+ * QCU Constant Bit Rate configuration registers
+ */
+#define AR5K_QCU_CBRCFG_BASE 0x08c0 /* Register Address - Queue0 CBRCFG */
+#define AR5K_QCU_CBRCFG_INTVAL 0x00ffffff /* CBR Interval mask */
+#define AR5K_QCU_CBRCFG_INTVAL_S 0
+#define AR5K_QCU_CBRCFG_ORN_THRES 0xff000000 /* CBR overrun threshold mask */
+#define AR5K_QCU_CBRCFG_ORN_THRES_S 24
+#define AR5K_QUEUE_CBRCFG(_q) AR5K_QUEUE_REG(AR5K_QCU_CBRCFG_BASE, _q)
+
+/*
+ * QCU Ready time configuration registers
+ */
+#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */
+#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */
+#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0
+#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */
+#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q)
+
+/*
+ * QCU one shot arm set registers
+ */
+#define AR5K_QCU_ONESHOTARM_SET 0x0940 /* Register Address -QCU "one shot arm set (?)" */
+#define AR5K_QCU_ONESHOTARM_SET_M 0x0000ffff
+
+/*
+ * QCU one shot arm clear registers
+ */
+#define AR5K_QCU_ONESHOTARM_CLEAR 0x0980 /* Register Address -QCU "one shot arm clear (?)" */
+#define AR5K_QCU_ONESHOTARM_CLEAR_M 0x0000ffff
+
+/*
+ * QCU misc registers
+ */
+#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */
+#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame scheduling mask */
+#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
+#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
+#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated */
+#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* TIMT gated */
+#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated */
+#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */
+#define AR5K_QCU_MISC_CBREXP_DIS 0x00000020 /* Disable CBR expired counter (normal queue) */
+#define AR5K_QCU_MISC_CBREXP_BCN_DIS 0x00000040 /* Disable CBR expired counter (beacon queue) */
+#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */
+#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR expired threshold enabled */
+#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME expired or VEOL */
+#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */
+#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */
+#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */
+#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q)
+
+
+/*
+ * QCU status registers
+ */
+#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */
+#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */
+#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter */
+#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q)
+
+/*
+ * QCU ready time shutdown register
+ */
+#define AR5K_QCU_RDYTIMESHDN 0x0a40
+#define AR5K_QCU_RDYTIMESHDN_M 0x000003ff
+
+/*
+ * QCU compression buffer base registers [5212+]
+ */
+#define AR5K_QCU_CBB_SELECT 0x0b00
+#define AR5K_QCU_CBB_ADDR 0x0b04
+#define AR5K_QCU_CBB_ADDR_S 9
+
+/*
+ * QCU compression buffer configuration register [5212+]
+ * (buffer size)
+ */
+#define AR5K_QCU_CBCFG 0x0b08
+
+
+
+/*
+ * Distributed Coordination Function (DCF) control unit (DCU)
+ * registers [5211+]
+ *
+ * These registers control the various characteristics of each queue
+ * for 802.11e (WME) compatibility so they go together with
+ * QCU registers in pairs. For each queue we have a QCU mask register,
+ * (0x1000 - 0x102c), a local-IFS settings register (0x1040 - 0x106c),
+ * a retry limit register (0x1080 - 0x10ac), a channel time register
+ * (0x10c0 - 0x10ec), a misc-settings register (0x1100 - 0x112c) and
+ * a sequence number register (0x1140 - 0x116c). It seems that "global"
+ * registers here affect all queues (see use of DCU_GBL_IFS_SLOT in ar5k).
+ * We use the same macros here for easier register access.
+ *
+ */
+
+/*
+ * DCU QCU mask registers
+ */
+#define AR5K_DCU_QCUMASK_BASE 0x1000 /* Register Address -Queue0 DCU_QCUMASK */
+#define AR5K_DCU_QCUMASK_M 0x000003ff
+#define AR5K_QUEUE_QCUMASK(_q) AR5K_QUEUE_REG(AR5K_DCU_QCUMASK_BASE, _q)
+
+/*
+ * DCU local Inter Frame Space settings register
+ */
+#define AR5K_DCU_LCL_IFS_BASE 0x1040 /* Register Address -Queue0 DCU_LCL_IFS */
+#define AR5K_DCU_LCL_IFS_CW_MIN 0x000003ff /* Minimum Contention Window */
+#define AR5K_DCU_LCL_IFS_CW_MIN_S 0
+#define AR5K_DCU_LCL_IFS_CW_MAX 0x000ffc00 /* Maximum Contention Window */
+#define AR5K_DCU_LCL_IFS_CW_MAX_S 10
+#define AR5K_DCU_LCL_IFS_AIFS 0x0ff00000 /* Arbitrated Interframe Space */
+#define AR5K_DCU_LCL_IFS_AIFS_S 20
+#define AR5K_DCU_LCL_IFS_AIFS_MAX 0xfc /* Anything above that can cause DCU to hang */
+#define AR5K_QUEUE_DFS_LOCAL_IFS(_q) AR5K_QUEUE_REG(AR5K_DCU_LCL_IFS_BASE, _q)
+
+/*
+ * DCU retry limit registers
+ * all these fields don't allow zero values
+ */
+#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
+#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S 0
+#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
+#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
+#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
+
+/*
+ * DCU channel time registers
+ */
+#define AR5K_DCU_CHAN_TIME_BASE 0x10c0 /* Register Address -Queue0 DCU_CHAN_TIME */
+#define AR5K_DCU_CHAN_TIME_DUR 0x000fffff /* Channel time duration */
+#define AR5K_DCU_CHAN_TIME_DUR_S 0
+#define AR5K_DCU_CHAN_TIME_ENABLE 0x00100000 /* Enable channel time */
+#define AR5K_QUEUE_DFS_CHANNEL_TIME(_q) AR5K_QUEUE_REG(AR5K_DCU_CHAN_TIME_BASE, _q)
+
+/*
+ * DCU misc registers [5211+]
+ *
+ * Note: Arbiter lockout control controls the
+ * behaviour on low priority queues when we have multiple queues
+ * with pending frames. Intra-frame lockout means we wait until
+ * the queue's current frame transmits (with post frame backoff and bursting)
+ * before we transmit anything else and global lockout means we
+ * wait for the whole queue to finish before higher priority queues
+ * can transmit (this is used on beacon and CAB queues).
+ * No lockout means there is no special handling.
+ */
+#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */
+#define AR5K_DCU_MISC_BACKOFF 0x0000003f /* Mask for backoff threshold */
+#define AR5K_DCU_MISC_ETS_RTS_POL 0x00000040 /* End of transmission series
+ station RTS/data failure count
+ reset policy (?) */
+#define AR5K_DCU_MISC_ETS_CW_POL 0x00000080 /* End of transmission series
+ CW reset policy */
+#define AR5K_DCU_MISC_FRAG_WAIT 0x00000100 /* Wait for next fragment */
+#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */
+#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */
+#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */
+#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */
+#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */
+#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
+#define AR5K_DCU_MISC_VIRTCOL_IGNORE 1
+#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */
+#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */
+#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17
+#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */
+#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */
+#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */
+#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 /* Ignore Arbiter lockout */
+#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment */
+#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff */
+#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision cw policy */
+#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 /* Blown IFS policy (?) */
+#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */
+#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q)
+
+/*
+ * DCU frame sequence number registers
+ */
+#define AR5K_DCU_SEQNUM_BASE 0x1140
+#define AR5K_DCU_SEQNUM_M 0x00000fff
+#define AR5K_QUEUE_DCU_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q)
+
+/*
+ * DCU global IFS SIFS register
+ */
+#define AR5K_DCU_GBL_IFS_SIFS 0x1030
+#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff
+
+/*
+ * DCU global IFS slot interval register
+ */
+#define AR5K_DCU_GBL_IFS_SLOT 0x1070
+#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff
+
+/*
+ * DCU global IFS EIFS register
+ */
+#define AR5K_DCU_GBL_IFS_EIFS 0x10b0
+#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff
+
+/*
+ * DCU global IFS misc register
+ *
+ * LFSR stands for Linear Feedback Shift Register
+ * and it's used for generating pseudo-random
+ * number sequences.
+ *
+ * (If i understand correctly, random numbers are
+ * used for idle sensing -multiplied with cwmin/max etc-)
+ */
+#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */
+#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
+#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC_S 4
+#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
+#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10
+#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFS cnt reset policy (?) */
+#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */
+#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */
+
+/*
+ * DCU frame prefetch control register
+ */
+#define AR5K_DCU_FP 0x1230 /* Register Address */
+#define AR5K_DCU_FP_NOBURST_DCU_EN 0x00000001 /* Enable non-burst prefetch on DCU (?) */
+#define AR5K_DCU_FP_NOBURST_EN 0x00000010 /* Enable non-burst prefetch (?) */
+#define AR5K_DCU_FP_BURST_DCU_EN 0x00000020 /* Enable burst prefetch on DCU (?) */
+
+/*
+ * DCU transmit pause control/status register
+ */
+#define AR5K_DCU_TXP 0x1270 /* Register Address */
+#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask */
+#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status */
+
+/*
+ * DCU transmit filter table 0 (32 entries)
+ * each entry contains a 32bit slice of the
+ * 128bit tx filter for each DCU (4 slices per DCU)
+ */
+#define AR5K_DCU_TX_FILTER_0_BASE 0x1038
+#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64))
+
+/*
+ * DCU transmit filter table 1 (16 entries)
+ */
+#define AR5K_DCU_TX_FILTER_1_BASE 0x103c
+#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + (_n * 64))
+
+/*
+ * DCU clear transmit filter register
+ */
+#define AR5K_DCU_TX_FILTER_CLR 0x143c
+
+/*
+ * DCU set transmit filter register
+ */
+#define AR5K_DCU_TX_FILTER_SET 0x147c
+
+/*
+ * Reset control register
+ */
+#define AR5K_RESET_CTL 0x4000 /* Register Address */
+#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */
+#define AR5K_RESET_CTL_DMA 0x00000002 /* DMA (Rx/Tx) reset [5210] */
+#define AR5K_RESET_CTL_BASEBAND 0x00000002 /* Baseband reset [5211+] */
+#define AR5K_RESET_CTL_MAC 0x00000004 /* MAC reset (PCU+Baseband ?) [5210] */
+#define AR5K_RESET_CTL_PHY 0x00000008 /* PHY reset [5210] */
+#define AR5K_RESET_CTL_PCI 0x00000010 /* PCI Core reset (interrupts etc) */
+
+/*
+ * Sleep control register
+ */
+#define AR5K_SLEEP_CTL 0x4004 /* Register Address */
+#define AR5K_SLEEP_CTL_SLDUR 0x0000ffff /* Sleep duration mask */
+#define AR5K_SLEEP_CTL_SLDUR_S 0
+#define AR5K_SLEEP_CTL_SLE 0x00030000 /* Sleep enable mask */
+#define AR5K_SLEEP_CTL_SLE_S 16
+#define AR5K_SLEEP_CTL_SLE_WAKE 0x00000000 /* Force chip awake */
+#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */
+#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 /* Normal sleep policy */
+#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */
+#define AR5K_SLEEP_CTL_DUR_TIM_POL 0x00040000 /* Sleep duration timing policy */
+#define AR5K_SLEEP_CTL_DUR_WRITE_POL 0x00080000 /* Sleep duration write policy */
+#define AR5K_SLEEP_CTL_SLE_POL 0x00100000 /* Sleep policy mode */
+
+/*
+ * Interrupt pending register
+ */
+#define AR5K_INTPEND 0x4008
+#define AR5K_INTPEND_M 0x00000001
+
+/*
+ * Sleep force register
+ */
+#define AR5K_SFR 0x400c
+#define AR5K_SFR_EN 0x00000001
+
+/*
+ * PCI configuration register
+ * TODO: Fix LED stuff
+ */
+#define AR5K_PCICFG 0x4010 /* Register Address */
+#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */
+#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock */
+#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */
+#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */
+#define AR5K_PCICFG_EESIZE_S 3
+#define AR5K_PCICFG_EESIZE_4K 0 /* 4K */
+#define AR5K_PCICFG_EESIZE_8K 1 /* 8K */
+#define AR5K_PCICFG_EESIZE_16K 2 /* 16K */
+#define AR5K_PCICFG_EESIZE_FAIL 3 /* Failed to get size [5211+] */
+#define AR5K_PCICFG_LED 0x00000060 /* Led status [5211+] */
+#define AR5K_PCICFG_LED_NONE 0x00000000 /* Default [5211+] */
+#define AR5K_PCICFG_LED_PEND 0x00000020 /* Scan / Auth pending */
+#define AR5K_PCICFG_LED_ASSOC 0x00000040 /* Associated */
+#define AR5K_PCICFG_BUS_SEL 0x00000380 /* Mask for "bus select" [5211+] (?) */
+#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix */
+#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep */
+#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
+#define AR5K_PCICFG_RETRY_FIX 0x00001000 /* Enable pci core retry fix */
+#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even with pending interrupts*/
+#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
+#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
+#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
+#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */
+#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */
+#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */
+#define AR5K_PCICFG_LEDBLINK 0x00700000 /* Led blink rate */
+#define AR5K_PCICFG_LEDBLINK_S 20
+#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slowest led blink rate [5211+] */
+#define AR5K_PCICFG_LEDSTATE \
+ (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \
+ AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
+#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate */
+#define AR5K_PCICFG_SLEEP_CLOCK_RATE_S 24
+
+/*
+ * "General Purpose Input/Output" (GPIO) control register
+ *
+ * I'm not sure about this but after looking at the code
+ * for all chipsets here is what i got.
+ *
+ * We have 6 GPIOs (pins), each GPIO has 4 modes (2 bits)
+ * Mode 0 -> always input
+ * Mode 1 -> output when GPIODO for this GPIO is set to 0
+ * Mode 2 -> output when GPIODO for this GPIO is set to 1
+ * Mode 3 -> always output
+ *
+ * For more infos check out get_gpio/set_gpio and
+ * set_gpio_input/set_gpio_output functs.
+ * For more infos on gpio interrupt check out set_gpio_intr.
+ */
+#define AR5K_NUM_GPIO 6
+
+#define AR5K_GPIOCR 0x4014 /* Register Address */
+#define AR5K_GPIOCR_INT_ENA 0x00008000 /* Enable GPIO interrupt */
+#define AR5K_GPIOCR_INT_SELL 0x00000000 /* Generate interrupt when pin is low */
+#define AR5K_GPIOCR_INT_SELH 0x00010000 /* Generate interrupt when pin is high */
+#define AR5K_GPIOCR_IN(n) (0 << ((n) * 2)) /* Mode 0 for pin n */
+#define AR5K_GPIOCR_OUT0(n) (1 << ((n) * 2)) /* Mode 1 for pin n */
+#define AR5K_GPIOCR_OUT1(n) (2 << ((n) * 2)) /* Mode 2 for pin n */
+#define AR5K_GPIOCR_OUT(n) (3 << ((n) * 2)) /* Mode 3 for pin n */
+#define AR5K_GPIOCR_INT_SEL(n) ((n) << 12) /* Interrupt for GPIO pin n */
+
+/*
+ * "General Purpose Input/Output" (GPIO) data output register
+ */
+#define AR5K_GPIODO 0x4018
+
+/*
+ * "General Purpose Input/Output" (GPIO) data input register
+ */
+#define AR5K_GPIODI 0x401c
+#define AR5K_GPIODI_M 0x0000002f
+
+/*
+ * Silicon revision register
+ */
+#define AR5K_SREV 0x4020 /* Register Address */
+#define AR5K_SREV_REV 0x0000000f /* Mask for revision */
+#define AR5K_SREV_REV_S 0
+#define AR5K_SREV_VER 0x000000ff /* Mask for version */
+#define AR5K_SREV_VER_S 4
+
+/*
+ * TXE write posting register
+ */
+#define AR5K_TXEPOST 0x4028
+
+/*
+ * QCU sleep mask
+ */
+#define AR5K_QCU_SLEEP_MASK 0x402c
+
+/* 0x4068 is compression buffer configuration
+ * register on 5414 and pm configuration register
+ * on 5424 and newer pci-e chips. */
+
+/*
+ * Compression buffer configuration
+ * register (enable/disable) [5414]
+ */
+#define AR5K_5414_CBCFG 0x4068
+#define AR5K_5414_CBCFG_BUF_DIS 0x10 /* Disable buffer */
+
+/*
+ * PCI-E Power management configuration
+ * and status register [5424+]
+ */
+#define AR5K_PCIE_PM_CTL 0x4068 /* Register address */
+/* Only 5424 */
+#define AR5K_PCIE_PM_CTL_L1_WHEN_D2 0x00000001 /* enable PCIe core enter L1
+ when d2_sleep_en is asserted */
+#define AR5K_PCIE_PM_CTL_L0_L0S_CLEAR 0x00000002 /* Clear L0 and L0S counters */
+#define AR5K_PCIE_PM_CTL_L0_L0S_EN 0x00000004 /* Start L0 nd L0S counters */
+#define AR5K_PCIE_PM_CTL_LDRESET_EN 0x00000008 /* Enable reset when link goes
+ down */
+/* Wake On Wireless */
+#define AR5K_PCIE_PM_CTL_PME_EN 0x00000010 /* PME Enable */
+#define AR5K_PCIE_PM_CTL_AUX_PWR_DET 0x00000020 /* Aux power detect */
+#define AR5K_PCIE_PM_CTL_PME_CLEAR 0x00000040 /* Clear PME */
+#define AR5K_PCIE_PM_CTL_PSM_D0 0x00000080
+#define AR5K_PCIE_PM_CTL_PSM_D1 0x00000100
+#define AR5K_PCIE_PM_CTL_PSM_D2 0x00000200
+#define AR5K_PCIE_PM_CTL_PSM_D3 0x00000400
+
+/*
+ * PCI-E Workaround enable register
+ */
+#define AR5K_PCIE_WAEN 0x407c
+
+/*
+ * PCI-E Serializer/Deserializer
+ * registers
+ */
+#define AR5K_PCIE_SERDES 0x4080
+#define AR5K_PCIE_SERDES_RESET 0x4084
+
+/*====EEPROM REGISTERS====*/
+
+/*
+ * EEPROM access registers
+ *
+ * Here we got a difference between 5210/5211-12
+ * read data register for 5210 is at 0x6800 and
+ * status register is at 0x6c00. There is also
+ * no eeprom command register on 5210 and the
+ * offsets are different.
+ *
+ * To read eeprom data for a specific offset:
+ * 5210 - enable eeprom access (AR5K_PCICFG_EEAE)
+ * read AR5K_EEPROM_BASE +(4 * offset)
+ * check the eeprom status register
+ * and read eeprom data register.
+ *
+ * 5211 - write offset to AR5K_EEPROM_BASE
+ * 5212 write AR5K_EEPROM_CMD_READ on AR5K_EEPROM_CMD
+ * check the eeprom status register
+ * and read eeprom data register.
+ *
+ * To write eeprom data for a specific offset:
+ * 5210 - enable eeprom access (AR5K_PCICFG_EEAE)
+ * write data to AR5K_EEPROM_BASE +(4 * offset)
+ * check the eeprom status register
+ * 5211 - write AR5K_EEPROM_CMD_RESET on AR5K_EEPROM_CMD
+ * 5212 write offset to AR5K_EEPROM_BASE
+ * write data to data register
+ * write AR5K_EEPROM_CMD_WRITE on AR5K_EEPROM_CMD
+ * check the eeprom status register
+ *
+ * For more infos check eeprom_* functs and the ar5k.c
+ * file posted in madwifi-devel mailing list.
+ * http://sourceforge.net/mailarchive/message.php?msg_id=8966525
+ *
+ */
+#define AR5K_EEPROM_BASE 0x6000
+
+/*
+ * EEPROM data register
+ */
+#define AR5K_EEPROM_DATA_5211 0x6004
+#define AR5K_EEPROM_DATA_5210 0x6800
+#define AR5K_EEPROM_DATA (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_EEPROM_DATA_5210 : AR5K_EEPROM_DATA_5211)
+
+/*
+ * EEPROM command register
+ */
+#define AR5K_EEPROM_CMD 0x6008 /* Register Address */
+#define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */
+#define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */
+#define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */
+
+/*
+ * EEPROM status register
+ */
+#define AR5K_EEPROM_STAT_5210 0x6c00 /* Register Address [5210] */
+#define AR5K_EEPROM_STAT_5211 0x600c /* Register Address [5211+] */
+#define AR5K_EEPROM_STATUS (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_EEPROM_STAT_5210 : AR5K_EEPROM_STAT_5211)
+#define AR5K_EEPROM_STAT_RDERR 0x00000001 /* EEPROM read failed */
+#define AR5K_EEPROM_STAT_RDDONE 0x00000002 /* EEPROM read successful */
+#define AR5K_EEPROM_STAT_WRERR 0x00000004 /* EEPROM write failed */
+#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */
+
+/*
+ * EEPROM config register
+ */
+#define AR5K_EEPROM_CFG 0x6010 /* Register Address */
+#define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */
+#define AR5K_EEPROM_CFG_SIZE_AUTO 0
+#define AR5K_EEPROM_CFG_SIZE_4KBIT 1
+#define AR5K_EEPROM_CFG_SIZE_8KBIT 2
+#define AR5K_EEPROM_CFG_SIZE_16KBIT 3
+#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */
+#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */
+#define AR5K_EEPROM_CFG_CLK_RATE_S 3
+#define AR5K_EEPROM_CFG_CLK_RATE_156KHZ 0
+#define AR5K_EEPROM_CFG_CLK_RATE_312KHZ 1
+#define AR5K_EEPROM_CFG_CLK_RATE_625KHZ 2
+#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protection key */
+#define AR5K_EEPROM_CFG_PROT_KEY_S 8
+#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */
+
+
+/*
+ * TODO: Wake On Wireless registers
+ * Range 0x7000 - 0x7ce0
+ */
+
+/*
+ * Protocol Control Unit (PCU) registers
+ */
+/*
+ * Used for checking initial register writes
+ * during channel reset (see reset func)
+ */
+#define AR5K_PCU_MIN 0x8000
+#define AR5K_PCU_MAX 0x8fff
+
+/*
+ * First station id register (Lower 32 bits of MAC address)
+ */
+#define AR5K_STA_ID0 0x8000
+#define AR5K_STA_ID0_ARRD_L32 0xffffffff
+
+/*
+ * Second station id register (Upper 16 bits of MAC address + PCU settings)
+ */
+#define AR5K_STA_ID1 0x8004 /* Register Address */
+#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC address */
+#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
+#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
+#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
+#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */
+#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */
+#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */
+#define AR5K_STA_ID1_PCF_5210 0x00200000 /* Enable PCF on [5210]*/
+#define AR5K_STA_ID1_PCF (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211)
+#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
+#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
+#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
+#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
+#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
+#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
+#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
+#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */
+#define AR5K_STA_ID1_CBCIV_ENDIAN 0x40000000 /* ??? */
+#define AR5K_STA_ID1_KEYSRCH_MCAST 0x80000000 /* Do key cache search for mcast frames */
+
+#define AR5K_STA_ID1_ANTENNA_SETTINGS (AR5K_STA_ID1_DEFAULT_ANTENNA | \
+ AR5K_STA_ID1_DESC_ANTENNA | \
+ AR5K_STA_ID1_RTS_DEF_ANTENNA | \
+ AR5K_STA_ID1_SELFGEN_DEF_ANT)
+
+/*
+ * First BSSID register (MAC address, lower 32bits)
+ */
+#define AR5K_BSS_ID0 0x8008
+
+/*
+ * Second BSSID register (MAC address in upper 16 bits)
+ *
+ * AID: Association ID
+ */
+#define AR5K_BSS_ID1 0x800c
+#define AR5K_BSS_ID1_AID 0xffff0000
+#define AR5K_BSS_ID1_AID_S 16
+
+/*
+ * Backoff slot time register
+ */
+#define AR5K_SLOT_TIME 0x8010
+
+/*
+ * ACK/CTS timeout register
+ */
+#define AR5K_TIME_OUT 0x8014 /* Register Address */
+#define AR5K_TIME_OUT_ACK 0x00001fff /* ACK timeout mask */
+#define AR5K_TIME_OUT_ACK_S 0
+#define AR5K_TIME_OUT_CTS 0x1fff0000 /* CTS timeout mask */
+#define AR5K_TIME_OUT_CTS_S 16
+
+/*
+ * RSSI threshold register
+ */
+#define AR5K_RSSI_THR 0x8018 /* Register Address */
+#define AR5K_RSSI_THR_M 0x000000ff /* Mask for RSSI threshold [5211+] */
+#define AR5K_RSSI_THR_BMISS_5210 0x00000700 /* Mask for Beacon Missed threshold [5210] */
+#define AR5K_RSSI_THR_BMISS_5210_S 8
+#define AR5K_RSSI_THR_BMISS_5211 0x0000ff00 /* Mask for Beacon Missed threshold [5211+] */
+#define AR5K_RSSI_THR_BMISS_5211_S 8
+#define AR5K_RSSI_THR_BMISS (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_RSSI_THR_BMISS_5210 : AR5K_RSSI_THR_BMISS_5211)
+#define AR5K_RSSI_THR_BMISS_S 8
+
+/*
+ * 5210 has more PCU registers because there is no QCU/DCU
+ * so queue parameters are set here, this way a lot common
+ * registers have different address for 5210. To make things
+ * easier we define a macro based on ah->ah_version for common
+ * registers with different addresses and common flags.
+ */
+
+/*
+ * Retry limit register
+ *
+ * Retry limit register for 5210 (no QCU/DCU so it's done in PCU)
+ */
+#define AR5K_NODCU_RETRY_LMT 0x801c /* Register Address */
+#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
+#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0
+#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */
+#define AR5K_NODCU_RETRY_LMT_LG_RETRY_S 4
+#define AR5K_NODCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask */
+#define AR5K_NODCU_RETRY_LMT_SSH_RETRY_S 8
+#define AR5K_NODCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask */
+#define AR5K_NODCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_NODCU_RETRY_LMT_CW_MIN 0x3ff00000 /* Minimum contention window mask */
+#define AR5K_NODCU_RETRY_LMT_CW_MIN_S 20
+
+/*
+ * Transmit latency register
+ */
+#define AR5K_USEC_5210 0x8020 /* Register Address [5210] */
+#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */
+#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_USEC_5210 : AR5K_USEC_5211)
+#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */
+#define AR5K_USEC_1_S 0
+#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32MHz clock */
+#define AR5K_USEC_32_S 7
+#define AR5K_USEC_TX_LATENCY_5211 0x007fc000
+#define AR5K_USEC_TX_LATENCY_5211_S 14
+#define AR5K_USEC_RX_LATENCY_5211 0x1f800000
+#define AR5K_USEC_RX_LATENCY_5211_S 23
+#define AR5K_USEC_TX_LATENCY_5210 0x000fc000 /* also for 5311 */
+#define AR5K_USEC_TX_LATENCY_5210_S 14
+#define AR5K_USEC_RX_LATENCY_5210 0x03f00000 /* also for 5311 */
+#define AR5K_USEC_RX_LATENCY_5210_S 20
+
+/*
+ * PCU beacon control register
+ */
+#define AR5K_BEACON_5210 0x8024 /*Register Address [5210] */
+#define AR5K_BEACON_5211 0x8020 /*Register Address [5211+] */
+#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_BEACON_5210 : AR5K_BEACON_5211)
+#define AR5K_BEACON_PERIOD 0x0000ffff /* Mask for beacon period */
+#define AR5K_BEACON_PERIOD_S 0
+#define AR5K_BEACON_TIM 0x007f0000 /* Mask for TIM offset */
+#define AR5K_BEACON_TIM_S 16
+#define AR5K_BEACON_ENABLE 0x00800000 /* Enable beacons */
+#define AR5K_BEACON_RESET_TSF 0x01000000 /* Force TSF reset */
+
+/*
+ * CFP period register
+ */
+#define AR5K_CFP_PERIOD_5210 0x8028
+#define AR5K_CFP_PERIOD_5211 0x8024
+#define AR5K_CFP_PERIOD (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_CFP_PERIOD_5210 : AR5K_CFP_PERIOD_5211)
+
+/*
+ * Next beacon time register
+ */
+#define AR5K_TIMER0_5210 0x802c
+#define AR5K_TIMER0_5211 0x8028
+#define AR5K_TIMER0 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TIMER0_5210 : AR5K_TIMER0_5211)
+
+/*
+ * Next DMA beacon alert register
+ */
+#define AR5K_TIMER1_5210 0x8030
+#define AR5K_TIMER1_5211 0x802c
+#define AR5K_TIMER1 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TIMER1_5210 : AR5K_TIMER1_5211)
+
+/*
+ * Next software beacon alert register
+ */
+#define AR5K_TIMER2_5210 0x8034
+#define AR5K_TIMER2_5211 0x8030
+#define AR5K_TIMER2 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TIMER2_5210 : AR5K_TIMER2_5211)
+
+/*
+ * Next ATIM window time register
+ */
+#define AR5K_TIMER3_5210 0x8038
+#define AR5K_TIMER3_5211 0x8034
+#define AR5K_TIMER3 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TIMER3_5210 : AR5K_TIMER3_5211)
+
+
+/*
+ * 5210 First inter frame spacing register (IFS)
+ */
+#define AR5K_IFS0 0x8040
+#define AR5K_IFS0_SIFS 0x000007ff
+#define AR5K_IFS0_SIFS_S 0
+#define AR5K_IFS0_DIFS 0x007ff800
+#define AR5K_IFS0_DIFS_S 11
+
+/*
+ * 5210 Second inter frame spacing register (IFS)
+ */
+#define AR5K_IFS1 0x8044
+#define AR5K_IFS1_PIFS 0x00000fff
+#define AR5K_IFS1_PIFS_S 0
+#define AR5K_IFS1_EIFS 0x03fff000
+#define AR5K_IFS1_EIFS_S 12
+#define AR5K_IFS1_CS_EN 0x04000000
+#define AR5K_IFS1_CS_EN_S 26
+
+/*
+ * CFP duration register
+ */
+#define AR5K_CFP_DUR_5210 0x8048
+#define AR5K_CFP_DUR_5211 0x8038
+#define AR5K_CFP_DUR (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_CFP_DUR_5210 : AR5K_CFP_DUR_5211)
+
+/*
+ * Receive filter register
+ */
+#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */
+#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */
+#define AR5K_RX_FILTER (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_RX_FILTER_5210 : AR5K_RX_FILTER_5211)
+#define AR5K_RX_FILTER_UCAST 0x00000001 /* Don't filter unicast frames */
+#define AR5K_RX_FILTER_MCAST 0x00000002 /* Don't filter multicast frames */
+#define AR5K_RX_FILTER_BCAST 0x00000004 /* Don't filter broadcast frames */
+#define AR5K_RX_FILTER_CONTROL 0x00000008 /* Don't filter control frames */
+#define AR5K_RX_FILTER_BEACON 0x00000010 /* Don't filter beacon frames */
+#define AR5K_RX_FILTER_PROM 0x00000020 /* Set promiscuous mode */
+#define AR5K_RX_FILTER_XRPOLL 0x00000040 /* Don't filter XR poll frame [5212+] */
+#define AR5K_RX_FILTER_PROBEREQ 0x00000080 /* Don't filter probe requests [5212+] */
+#define AR5K_RX_FILTER_PHYERR_5212 0x00000100 /* Don't filter phy errors [5212+] */
+#define AR5K_RX_FILTER_RADARERR_5212 0x00000200 /* Don't filter phy radar errors [5212+] */
+#define AR5K_RX_FILTER_PHYERR_5211 0x00000040 /* [5211] */
+#define AR5K_RX_FILTER_RADARERR_5211 0x00000080 /* [5211] */
+#define AR5K_RX_FILTER_PHYERR \
+ ((ah->ah_version == AR5K_AR5211 ? \
+ AR5K_RX_FILTER_PHYERR_5211 : AR5K_RX_FILTER_PHYERR_5212))
+#define AR5K_RX_FILTER_RADARERR \
+ ((ah->ah_version == AR5K_AR5211 ? \
+ AR5K_RX_FILTER_RADARERR_5211 : AR5K_RX_FILTER_RADARERR_5212))
+
+/*
+ * Multicast filter register (lower 32 bits)
+ */
+#define AR5K_MCAST_FILTER0_5210 0x8050
+#define AR5K_MCAST_FILTER0_5211 0x8040
+#define AR5K_MCAST_FILTER0 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_MCAST_FILTER0_5210 : AR5K_MCAST_FILTER0_5211)
+
+/*
+ * Multicast filter register (higher 16 bits)
+ */
+#define AR5K_MCAST_FILTER1_5210 0x8054
+#define AR5K_MCAST_FILTER1_5211 0x8044
+#define AR5K_MCAST_FILTER1 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_MCAST_FILTER1_5210 : AR5K_MCAST_FILTER1_5211)
+
+
+/*
+ * Transmit mask register (lower 32 bits) [5210]
+ */
+#define AR5K_TX_MASK0 0x8058
+
+/*
+ * Transmit mask register (higher 16 bits) [5210]
+ */
+#define AR5K_TX_MASK1 0x805c
+
+/*
+ * Clear transmit mask [5210]
+ */
+#define AR5K_CLR_TMASK 0x8060
+
+/*
+ * Trigger level register (before transmission) [5210]
+ */
+#define AR5K_TRIG_LVL 0x8064
+
+
+/*
+ * PCU Diagnostic register
+ *
+ * Used for tweaking/diagnostics.
+ */
+#define AR5K_DIAG_SW_5210 0x8068 /* Register Address [5210] */
+#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */
+#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211)
+#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */
+#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */
+#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */
+#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable HW encryption */
+#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable HW decryption */
+#define AR5K_DIAG_SW_DIS_TX_5210 0x00000020 /* Disable transmit [5210] */
+#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable receive */
+#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020
+#define AR5K_DIAG_SW_DIS_RX (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_DIS_RX_5210 : AR5K_DIAG_SW_DIS_RX_5211)
+#define AR5K_DIAG_SW_LOOP_BACK_5210 0x00000080 /* TX Data Loopback (i guess it goes with DIS_TX) [5210] */
+#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040
+#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211)
+#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 /* Generate invalid TX FCS */
+#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080
+#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211)
+#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 /* Add 56 bytes of channel info before the frame data in the RX buffer */
+#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
+#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
+#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 /* Enable fixed scrambler seed */
+#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200
+#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
+#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */
+#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */
+#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */
+#define AR5K_DIAG_SW_SCRAM_SEED_S 10
+#define AR5K_DIAG_SW_DIS_SEQ_INC_5210 0x00040000 /* Disable seqnum increment (?)[5210] */
+#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
+#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 /* Accept frames of non-zero protocol number */
+#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
+#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 /* Observation point select (?) */
+#define AR5K_DIAG_SW_OBSPT_S 18
+#define AR5K_DIAG_SW_RX_CLEAR_HIGH 0x00100000 /* Ignore carrier sense */
+#define AR5K_DIAG_SW_IGNORE_CARR_SENSE 0x00200000 /* Ignore virtual carrier sense */
+#define AR5K_DIAG_SW_CHANNEL_IDLE_HIGH 0x00400000 /* Force channel idle high */
+#define AR5K_DIAG_SW_PHEAR_ME 0x00800000 /* ??? */
+
+/*
+ * TSF (clock) register (lower 32 bits)
+ */
+#define AR5K_TSF_L32_5210 0x806c
+#define AR5K_TSF_L32_5211 0x804c
+#define AR5K_TSF_L32 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TSF_L32_5210 : AR5K_TSF_L32_5211)
+
+/*
+ * TSF (clock) register (higher 32 bits)
+ */
+#define AR5K_TSF_U32_5210 0x8070
+#define AR5K_TSF_U32_5211 0x8050
+#define AR5K_TSF_U32 (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_TSF_U32_5210 : AR5K_TSF_U32_5211)
+
+/*
+ * Last beacon timestamp register (Read Only)
+ */
+#define AR5K_LAST_TSTP 0x8080
+
+/*
+ * ADDAC test register [5211+]
+ */
+#define AR5K_ADDAC_TEST 0x8054 /* Register Address */
+#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
+#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */
+#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */
+#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */
+#define AR5K_ADDAC_TEST_USE_U8 0x00004000 /* Use upper 8 bits */
+#define AR5K_ADDAC_TEST_MSB 0x00008000 /* State of MSB */
+#define AR5K_ADDAC_TEST_TRIG_SEL 0x00010000 /* Trigger select */
+#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */
+#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */
+#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */
+#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* ARM rx buffer for capture */
+
+/*
+ * Default antenna register [5211+]
+ */
+#define AR5K_DEFAULT_ANTENNA 0x8058
+
+/*
+ * Frame control QoS mask register (?) [5211+]
+ * (FC_QOS_MASK)
+ */
+#define AR5K_FRAME_CTL_QOSM 0x805c
+
+/*
+ * Seq mask register (?) [5211+]
+ */
+#define AR5K_SEQ_MASK 0x8060
+
+/*
+ * Retry count register [5210]
+ */
+#define AR5K_RETRY_CNT 0x8084 /* Register Address [5210] */
+#define AR5K_RETRY_CNT_SSH 0x0000003f /* Station short retry count (?) */
+#define AR5K_RETRY_CNT_SLG 0x00000fc0 /* Station long retry count (?) */
+
+/*
+ * Back-off status register [5210]
+ */
+#define AR5K_BACKOFF 0x8088 /* Register Address [5210] */
+#define AR5K_BACKOFF_CW 0x000003ff /* Backoff Contention Window (?) */
+#define AR5K_BACKOFF_CNT 0x03ff0000 /* Backoff count (?) */
+
+
+
+/*
+ * NAV register (current)
+ */
+#define AR5K_NAV_5210 0x808c
+#define AR5K_NAV_5211 0x8084
+#define AR5K_NAV (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_NAV_5210 : AR5K_NAV_5211)
+
+/*
+ * MIB counters:
+ *
+ * max value is 0xc000, if this is reached we get a MIB interrupt.
+ * they can be controlled via AR5K_MIBC and are cleared on read.
+ */
+
+/*
+ * RTS success (MIB counter)
+ */
+#define AR5K_RTS_OK_5210 0x8090
+#define AR5K_RTS_OK_5211 0x8088
+#define AR5K_RTS_OK (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
+
+/*
+ * RTS failure (MIB counter)
+ */
+#define AR5K_RTS_FAIL_5210 0x8094
+#define AR5K_RTS_FAIL_5211 0x808c
+#define AR5K_RTS_FAIL (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
+
+/*
+ * ACK failure (MIB counter)
+ */
+#define AR5K_ACK_FAIL_5210 0x8098
+#define AR5K_ACK_FAIL_5211 0x8090
+#define AR5K_ACK_FAIL (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
+
+/*
+ * FCS failure (MIB counter)
+ */
+#define AR5K_FCS_FAIL_5210 0x809c
+#define AR5K_FCS_FAIL_5211 0x8094
+#define AR5K_FCS_FAIL (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_FCS_FAIL_5210 : AR5K_FCS_FAIL_5211)
+
+/*
+ * Beacon count register
+ */
+#define AR5K_BEACON_CNT_5210 0x80a0
+#define AR5K_BEACON_CNT_5211 0x8098
+#define AR5K_BEACON_CNT (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_BEACON_CNT_5210 : AR5K_BEACON_CNT_5211)
+
+
+/*===5212 Specific PCU registers===*/
+
+/*
+ * Transmit power control register
+ */
+#define AR5K_TPC 0x80e8
+#define AR5K_TPC_ACK 0x0000003f /* ack frames */
+#define AR5K_TPC_ACK_S 0
+#define AR5K_TPC_CTS 0x00003f00 /* cts frames */
+#define AR5K_TPC_CTS_S 8
+#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */
+#define AR5K_TPC_CHIRP_S 16
+#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */
+#define AR5K_TPC_DOPPLER_S 24
+
+/*
+ * XR (eXtended Range) mode register
+ */
+#define AR5K_XRMODE 0x80c0 /* Register Address */
+#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f /* Mask for Poll type (?) */
+#define AR5K_XRMODE_POLL_TYPE_S 0
+#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c /* Mask for Poll subtype (?) */
+#define AR5K_XRMODE_POLL_SUBTYPE_S 2
+#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 /* Wait for poll */
+#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 /* Mask for SIFS delay */
+#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 /* Mask for frame hold (?) */
+#define AR5K_XRMODE_FRAME_HOLD_S 20
+
+/*
+ * XR delay register
+ */
+#define AR5K_XRDELAY 0x80c4 /* Register Address */
+#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff /* Mask for slot delay */
+#define AR5K_XRDELAY_SLOT_DELAY_S 0
+#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 /* Mask for CHIRP data delay */
+#define AR5K_XRDELAY_CHIRP_DELAY_S 16
+
+/*
+ * XR timeout register
+ */
+#define AR5K_XRTIMEOUT 0x80c8 /* Register Address */
+#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff /* Mask for CHIRP timeout */
+#define AR5K_XRTIMEOUT_CHIRP_S 0
+#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 /* Mask for Poll timeout */
+#define AR5K_XRTIMEOUT_POLL_S 16
+
+/*
+ * XR chirp register
+ */
+#define AR5K_XRCHIRP 0x80cc /* Register Address */
+#define AR5K_XRCHIRP_SEND 0x00000001 /* Send CHIRP */
+#define AR5K_XRCHIRP_GAP 0xffff0000 /* Mask for CHIRP gap (?) */
+
+/*
+ * XR stomp register
+ */
+#define AR5K_XRSTOMP 0x80d0 /* Register Address */
+#define AR5K_XRSTOMP_TX 0x00000001 /* Stomp Tx (?) */
+#define AR5K_XRSTOMP_RX 0x00000002 /* Stomp Rx (?) */
+#define AR5K_XRSTOMP_TX_RSSI 0x00000004 /* Stomp Tx RSSI (?) */
+#define AR5K_XRSTOMP_TX_BSSID 0x00000008 /* Stomp Tx BSSID (?) */
+#define AR5K_XRSTOMP_DATA 0x00000010 /* Stomp data (?)*/
+#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 /* Mask for XR RSSI threshold */
+
+/*
+ * First enhanced sleep register
+ */
+#define AR5K_SLEEP0 0x80d4 /* Register Address */
+#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */
+#define AR5K_SLEEP0_NEXT_DTIM_S 0
+#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */
+#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enhanced sleep control */
+#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */
+#define AR5K_SLEEP0_CABTO_S 24
+
+/*
+ * Second enhanced sleep register
+ */
+#define AR5K_SLEEP1 0x80d8 /* Register Address */
+#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff /* Mask for next TIM (?) */
+#define AR5K_SLEEP1_NEXT_TIM_S 0
+#define AR5K_SLEEP1_BEACON_TO 0xff000000 /* Mask for Beacon Time Out */
+#define AR5K_SLEEP1_BEACON_TO_S 24
+
+/*
+ * Third enhanced sleep register
+ */
+#define AR5K_SLEEP2 0x80dc /* Register Address */
+#define AR5K_SLEEP2_TIM_PER 0x0000ffff /* Mask for TIM period (?) */
+#define AR5K_SLEEP2_TIM_PER_S 0
+#define AR5K_SLEEP2_DTIM_PER 0xffff0000 /* Mask for DTIM period (?) */
+#define AR5K_SLEEP2_DTIM_PER_S 16
+
+/*
+ * TX power control (TPC) register
+ *
+ * XXX: PCDAC steps (0.5dBm) or dBm ?
+ *
+ */
+#define AR5K_TXPC 0x80e8 /* Register Address */
+#define AR5K_TXPC_ACK_M 0x0000003f /* ACK tx power */
+#define AR5K_TXPC_ACK_S 0
+#define AR5K_TXPC_CTS_M 0x00003f00 /* CTS tx power */
+#define AR5K_TXPC_CTS_S 8
+#define AR5K_TXPC_CHIRP_M 0x003f0000 /* CHIRP tx power */
+#define AR5K_TXPC_CHIRP_S 16
+#define AR5K_TXPC_DOPPLER 0x0f000000 /* Doppler chirp span (?) */
+#define AR5K_TXPC_DOPPLER_S 24
+
+/*
+ * Profile count registers
+ *
+ * These registers can be cleared and frozen with ATH5K_MIBC, but they do not
+ * generate a MIB interrupt.
+ * Instead of overflowing, they shift by one bit to the right. All registers
+ * shift together, i.e. when one reaches the max, all shift at the same time by
+ * one bit to the right. This way we should always get consistent values.
+ */
+#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
+#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
+
+/*
+ * Quiet period control registers
+ */
+#define AR5K_QUIET_CTL1 0x80fc /* Register Address */
+#define AR5K_QUIET_CTL1_NEXT_QT_TSF 0x0000ffff /* Next quiet period TSF (TU) */
+#define AR5K_QUIET_CTL1_NEXT_QT_TSF_S 0
+#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet period */
+#define AR5K_QUIET_CTL1_ACK_CTS_EN 0x00020000 /* Send ACK/CTS during quiet period */
+
+#define AR5K_QUIET_CTL2 0x8100 /* Register Address */
+#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period periodicity */
+#define AR5K_QUIET_CTL2_QT_PER_S 0
+#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet period duration */
+#define AR5K_QUIET_CTL2_QT_DUR_S 16
+
+/*
+ * TSF parameter register
+ */
+#define AR5K_TSF_PARM 0x8104 /* Register Address */
+#define AR5K_TSF_PARM_INC 0x000000ff /* Mask for TSF increment */
+#define AR5K_TSF_PARM_INC_S 0
+
+/*
+ * QoS NOACK policy
+ */
+#define AR5K_QOS_NOACK 0x8108 /* Register Address */
+#define AR5K_QOS_NOACK_2BIT_VALUES 0x0000000f /* ??? */
+#define AR5K_QOS_NOACK_2BIT_VALUES_S 0
+#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000070 /* ??? */
+#define AR5K_QOS_NOACK_BIT_OFFSET_S 4
+#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000180 /* ??? */
+#define AR5K_QOS_NOACK_BYTE_OFFSET_S 7
+
+/*
+ * PHY error filter register
+ */
+#define AR5K_PHY_ERR_FIL 0x810c
+#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 /* Radar signal */
+#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 /* OFDM false detect (ANI) */
+#define AR5K_PHY_ERR_FIL_CCK 0x02000000 /* CCK false detect (ANI) */
+
+/*
+ * XR latency register
+ */
+#define AR5K_XRLAT_TX 0x8110
+
+/*
+ * ACK SIFS register
+ */
+#define AR5K_ACKSIFS 0x8114 /* Register Address */
+#define AR5K_ACKSIFS_INC 0x00000000 /* ACK SIFS Increment (field) */
+
+/*
+ * MIC QoS control register (?)
+ */
+#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */
+#define AR5K_MIC_QOS_CTL_OFF(_n) (1 << (_n * 2))
+#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
+
+/*
+ * MIC QoS select register (?)
+ */
+#define AR5K_MIC_QOS_SEL 0x811c
+#define AR5K_MIC_QOS_SEL_OFF(_n) (1 << (_n * 4))
+
+/*
+ * Misc mode control register (?)
+ */
+#define AR5K_MISC_MODE 0x8120 /* Register Address */
+#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */
+#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */
+#define AR5K_MISC_MODE_COMBINED_MIC 0x00000004 /* use rx/tx MIC key */
+/* more bits */
+
+/*
+ * OFDM Filter counter
+ */
+#define AR5K_OFDM_FIL_CNT 0x8124
+
+/*
+ * CCK Filter counter
+ */
+#define AR5K_CCK_FIL_CNT 0x8128
+
+/*
+ * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
+ */
+#define AR5K_PHYERR_CNT1 0x812c
+#define AR5K_PHYERR_CNT1_MASK 0x8130
+
+#define AR5K_PHYERR_CNT2 0x8134
+#define AR5K_PHYERR_CNT2_MASK 0x8138
+
+/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
+#define ATH5K_PHYERR_CNT_MAX 0x00c00000
+
+/*
+ * TSF Threshold register (?)
+ */
+#define AR5K_TSF_THRES 0x813c
+
+/*
+ * TODO: Wake On Wireless registers
+ * Range: 0x8147 - 0x818c
+ */
+
+/*
+ * Rate -> ACK SIFS mapping table (32 entries)
+ */
+#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */
+#define AR5K_RATE_ACKSIFS(_n) (AR5K_RATE_ACKSIFS_BSE + ((_n) << 2))
+#define AR5K_RATE_ACKSIFS_NORMAL 0x00000001 /* Normal SIFS (field) */
+#define AR5K_RATE_ACKSIFS_TURBO 0x00000400 /* Turbo SIFS (field) */
+
+/*
+ * Rate -> duration mapping table (32 entries)
+ */
+#define AR5K_RATE_DUR_BASE 0x8700
+#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2))
+
+/*
+ * Rate -> db mapping table
+ * (8 entries, each one has 4 8bit fields)
+ */
+#define AR5K_RATE2DB_BASE 0x87c0
+#define AR5K_RATE2DB(_n) (AR5K_RATE2DB_BASE + ((_n) << 2))
+
+/*
+ * db -> Rate mapping table
+ * (8 entries, each one has 4 8bit fields)
+ */
+#define AR5K_DB2RATE_BASE 0x87e0
+#define AR5K_DB2RATE(_n) (AR5K_DB2RATE_BASE + ((_n) << 2))
+
+/*===5212 end===*/
+
+#define AR5K_KEYTABLE_SIZE_5210 64
+#define AR5K_KEYTABLE_SIZE_5211 128
+
+/*===PHY REGISTERS===*/
+
+/*
+ * PHY registers start
+ */
+#define AR5K_PHY_BASE 0x9800
+#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2))
+
+/*
+ * TST_2 (Misc config parameters)
+ */
+#define AR5K_PHY_TST2 0x9800 /* Register Address */
+#define AR5K_PHY_TST2_TRIG_SEL 0x00000007 /* Trigger select (?)*/
+#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) */
+#define AR5K_PHY_TST2_CBUS_MODE 0x00000060 /* Cardbus mode (?) */
+#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32kHz external) */
+#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
+#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
+#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
+#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch ?) */
+#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */
+#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */
+#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */
+#define AR5K_PHY_TST2_AGC_OBS_SEL_3 0x00040000 /* AGC OBS Select 3 (?) */
+#define AR5K_PHY_TST2_BBB_OBS_SEL 0x00080000 /* BB OBS Select (field ?) */
+#define AR5K_PHY_TST2_ADC_OBS_SEL 0x00800000 /* ADC OBS Select (field ?) */
+#define AR5K_PHY_TST2_RX_CLR_SEL 0x08000000 /* RX Clear Select (?) */
+#define AR5K_PHY_TST2_FORCE_AGC_CLR 0x10000000 /* Force AGC clear (?) */
+#define AR5K_PHY_SHIFT_2GHZ 0x00004007 /* Used to access 2GHz radios */
+#define AR5K_PHY_SHIFT_5GHZ 0x00000007 /* Used to access 5GHz radios (default) */
+
+/*
+ * PHY frame control register [5110] /turbo mode register [5111+]
+ *
+ * There is another frame control register for [5111+]
+ * at address 0x9944 (see below) but the 2 first flags
+ * are common here between 5110 frame control register
+ * and [5111+] turbo mode register, so this also works as
+ * a "turbo mode register" for 5110. We treat this one as
+ * a frame control register for 5110 below.
+ */
+#define AR5K_PHY_TURBO 0x9804 /* Register Address */
+#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
+#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */
+#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo */
+
+/*
+ * PHY agility command register
+ * (aka TST_1)
+ */
+#define AR5K_PHY_AGC 0x9808 /* Register Address */
+#define AR5K_PHY_TST1 0x9808
+#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/
+#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */
+#define AR5K_PHY_TST1_TXSRC_SRC 0x00000002 /* Used with bit 7 (?) */
+#define AR5K_PHY_TST1_TXSRC_SRC_S 1
+#define AR5K_PHY_TST1_TXSRC_ALT 0x00000080 /* Set input to tsdac (?) */
+#define AR5K_PHY_TST1_TXSRC_ALT_S 7
+
+
+/*
+ * PHY timing register 3 [5112+]
+ */
+#define AR5K_PHY_TIMING_3 0x9814
+#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000
+#define AR5K_PHY_TIMING_3_DSC_MAN_S 17
+#define AR5K_PHY_TIMING_3_DSC_EXP 0x0001e000
+#define AR5K_PHY_TIMING_3_DSC_EXP_S 13
+
+/*
+ * PHY chip revision register
+ */
+#define AR5K_PHY_CHIP_ID 0x9818
+
+/*
+ * PHY activation register
+ */
+#define AR5K_PHY_ACT 0x981c /* Register Address */
+#define AR5K_PHY_ACT_ENABLE 0x00000001 /* Activate PHY */
+#define AR5K_PHY_ACT_DISABLE 0x00000002 /* Deactivate PHY */
+
+/*
+ * PHY RF control registers
+ */
+#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */
+#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* TX frame to TX data start */
+#define AR5K_PHY_RF_CTL2_TXF2TXD_START_S 0
+
+#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
+#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000ff00 /* TX end to XLNA on */
+#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S 8
+
+#define AR5K_PHY_ADC_CTL 0x982c
+#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF 0x00000003
+#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF_S 0
+#define AR5K_PHY_ADC_CTL_PWD_DAC_OFF 0x00002000
+#define AR5K_PHY_ADC_CTL_PWD_BAND_GAP_OFF 0x00004000
+#define AR5K_PHY_ADC_CTL_PWD_ADC_OFF 0x00008000
+#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON 0x00030000
+#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON_S 16
+
+#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */
+#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */
+#define AR5K_PHY_RF_CTL4_TXF2XPA_B_ON 0x00000100 /* TX frame to XPA B on (field) */
+#define AR5K_PHY_RF_CTL4_TXE2XPA_A_OFF 0x00010000 /* TX end to XPA A off (field) */
+#define AR5K_PHY_RF_CTL4_TXE2XPA_B_OFF 0x01000000 /* TX end to XPA B off (field) */
+
+/*
+ * Pre-Amplifier control register
+ * (XPA -> external pre-amplifier)
+ */
+#define AR5K_PHY_PA_CTL 0x9838 /* Register Address */
+#define AR5K_PHY_PA_CTL_XPA_A_HI 0x00000001 /* XPA A high (?) */
+#define AR5K_PHY_PA_CTL_XPA_B_HI 0x00000002 /* XPA B high (?) */
+#define AR5K_PHY_PA_CTL_XPA_A_EN 0x00000004 /* Enable XPA A */
+#define AR5K_PHY_PA_CTL_XPA_B_EN 0x00000008 /* Enable XPA B */
+
+/*
+ * PHY settling register
+ */
+#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
+#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
+#define AR5K_PHY_SETTLING_AGC_S 0
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
+#define AR5K_PHY_SETTLING_SWITCH_S 7
+
+/*
+ * PHY Gain registers
+ */
+#define AR5K_PHY_GAIN 0x9848 /* Register Address */
+#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* TX-RX Attenuation */
+#define AR5K_PHY_GAIN_TXRX_ATTEN_S 12
+#define AR5K_PHY_GAIN_TXRX_RF_MAX 0x007c0000
+#define AR5K_PHY_GAIN_TXRX_RF_MAX_S 18
+
+#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */
+#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
+
+/*
+ * Desired ADC/PGA size register
+ * (for more infos read ANI patent)
+ */
+#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
+#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* ADC desired size */
+#define AR5K_PHY_DESIRED_SIZE_ADC_S 0
+#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* PGA desired size */
+#define AR5K_PHY_DESIRED_SIZE_PGA_S 8
+#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Total desired size */
+#define AR5K_PHY_DESIRED_SIZE_TOT_S 20
+
+/*
+ * PHY signal register
+ * (for more infos read ANI patent)
+ */
+#define AR5K_PHY_SIG 0x9858 /* Register Address */
+#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* FIRSTEP */
+#define AR5K_PHY_SIG_FIRSTEP_S 12
+#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* FIPWR */
+#define AR5K_PHY_SIG_FIRPWR_S 18
+
+/*
+ * PHY coarse agility control register
+ * (for more infos read ANI patent)
+ */
+#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */
+#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* AGC Coarse low */
+#define AR5K_PHY_AGCCOARSE_LO_S 7
+#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* AGC Coarse high */
+#define AR5K_PHY_AGCCOARSE_HI_S 15
+
+/*
+ * PHY agility control register
+ */
+#define AR5K_PHY_AGCCTL 0x9860 /* Register address */
+#define AR5K_PHY_AGCCTL_CAL 0x00000001 /* Enable PHY calibration */
+#define AR5K_PHY_AGCCTL_NF 0x00000002 /* Enable Noise Floor calibration */
+#define AR5K_PHY_AGCCTL_OFDM_DIV_DIS 0x00000008 /* Disable antenna diversity on OFDM modes */
+#define AR5K_PHY_AGCCTL_NF_EN 0x00008000 /* Enable nf calibration to happen (?) */
+#define AR5K_PHY_AGCTL_FLTR_CAL 0x00010000 /* Allow filter calibration (?) */
+#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automatically */
+
+/*
+ * PHY noise floor status register (CCA = Clear Channel Assessment)
+ */
+#define AR5K_PHY_NF 0x9864 /* Register address */
+#define AR5K_PHY_NF_M 0x000001ff /* Noise floor, written to hardware in 1/2 dBm units */
+#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
+#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */
+#define AR5K_PHY_NF_THRESH62_S 12
+#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* Minimum measured noise level, read from hardware in 1 dBm units */
+#define AR5K_PHY_NF_MINCCA_PWR_S 19
+
+/*
+ * PHY ADC saturation register [5110]
+ */
+#define AR5K_PHY_ADCSAT 0x9868
+#define AR5K_PHY_ADCSAT_ICNT 0x0001f800
+#define AR5K_PHY_ADCSAT_ICNT_S 11
+#define AR5K_PHY_ADCSAT_THR 0x000007e0
+#define AR5K_PHY_ADCSAT_THR_S 5
+
+/*
+ * PHY Weak ofdm signal detection threshold registers (ANI) [5212+]
+ */
+
+/* High thresholds */
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR 0x9868
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT 0x0000001f
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT_S 0
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1 0x00fe0000
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1_S 17
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2 0x7f000000
+#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24
+
+/* Low thresholds */
+#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1 0x001fc000
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1_S 14
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2 0x0fe00000
+#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_S 21
+
+
+/*
+ * PHY sleep registers [5112+]
+ */
+#define AR5K_PHY_SCR 0x9870
+
+#define AR5K_PHY_SLMT 0x9874
+#define AR5K_PHY_SLMT_32MHZ 0x0000007f
+
+#define AR5K_PHY_SCAL 0x9878
+#define AR5K_PHY_SCAL_32MHZ 0x0000000e
+#define AR5K_PHY_SCAL_32MHZ_5311 0x00000008
+#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a
+#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032
+
+/*
+ * PHY PLL (Phase Locked Loop) control register
+ */
+#define AR5K_PHY_PLL 0x987c
+#define AR5K_PHY_PLL_20MHZ 0x00000013 /* For half rate (?) */
+/* 40MHz -> 5GHz band */
+#define AR5K_PHY_PLL_40MHZ_5211 0x00000018
+#define AR5K_PHY_PLL_40MHZ_5212 0x000000aa
+#define AR5K_PHY_PLL_40MHZ_5413 0x00000004
+#define AR5K_PHY_PLL_40MHZ (ah->ah_version == AR5K_AR5211 ? \
+ AR5K_PHY_PLL_40MHZ_5211 : AR5K_PHY_PLL_40MHZ_5212)
+/* 44MHz -> 2.4GHz band */
+#define AR5K_PHY_PLL_44MHZ_5211 0x00000019
+#define AR5K_PHY_PLL_44MHZ_5212 0x000000ab
+#define AR5K_PHY_PLL_44MHZ (ah->ah_version == AR5K_AR5211 ? \
+ AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
+
+#define AR5K_PHY_PLL_RF5111 0x00000000
+#define AR5K_PHY_PLL_RF5112 0x00000040
+#define AR5K_PHY_PLL_HALF_RATE 0x00000100
+#define AR5K_PHY_PLL_QUARTER_RATE 0x00000200
+
+/*
+ * RF Buffer register
+ *
+ * It's obvious from the code that 0x989c is the buffer register but
+ * for the other special registers that we write to after sending each
+ * packet, i have no idea. So I'll name them BUFFER_CONTROL_X registers
+ * for now. It's interesting that they are also used for some other operations.
+ */
+
+#define AR5K_RF_BUFFER 0x989c
+#define AR5K_RF_BUFFER_CONTROL_0 0x98c0 /* Channel on 5110 */
+#define AR5K_RF_BUFFER_CONTROL_1 0x98c4 /* Bank 7 on 5112 */
+#define AR5K_RF_BUFFER_CONTROL_2 0x98cc /* Bank 7 on 5111 */
+
+#define AR5K_RF_BUFFER_CONTROL_3 0x98d0 /* Bank 2 on 5112 */
+ /* Channel set on 5111 */
+ /* Used to read radio revision*/
+
+#define AR5K_RF_BUFFER_CONTROL_4 0x98d4 /* RF Stage register on 5110 */
+ /* Bank 0,1,2,6 on 5111 */
+ /* Bank 1 on 5112 */
+ /* Used during activation on 5111 */
+
+#define AR5K_RF_BUFFER_CONTROL_5 0x98d8 /* Bank 3 on 5111 */
+ /* Used during activation on 5111 */
+ /* Channel on 5112 */
+ /* Bank 6 on 5112 */
+
+#define AR5K_RF_BUFFER_CONTROL_6 0x98dc /* Bank 3 on 5112 */
+
+/*
+ * PHY RF stage register [5210]
+ */
+#define AR5K_PHY_RFSTG 0x98d4
+#define AR5K_PHY_RFSTG_DISABLE 0x00000021
+
+/*
+ * BIN masks (?)
+ */
+#define AR5K_PHY_BIN_MASK_1 0x9900
+#define AR5K_PHY_BIN_MASK_2 0x9904
+#define AR5K_PHY_BIN_MASK_3 0x9908
+
+#define AR5K_PHY_BIN_MASK_CTL 0x990c
+#define AR5K_PHY_BIN_MASK_CTL_MASK_4 0x00003fff
+#define AR5K_PHY_BIN_MASK_CTL_MASK_4_S 0
+#define AR5K_PHY_BIN_MASK_CTL_RATE 0xff000000
+#define AR5K_PHY_BIN_MASK_CTL_RATE_S 24
+
+/*
+ * PHY Antenna control register
+ */
+#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */
+#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */
+#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */
+#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */
+#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x000003f0 /* Switch table idle (?) */
+#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE_S 4
+
+/*
+ * PHY receiver delay register [5111+]
+ */
+#define AR5K_PHY_RX_DELAY 0x9914 /* Register Address */
+#define AR5K_PHY_RX_DELAY_M 0x00003fff /* Mask for RX activate to receive delay (/100ns) */
+
+/*
+ * PHY max rx length register (?) [5111]
+ */
+#define AR5K_PHY_MAX_RX_LEN 0x991c
+
+/*
+ * PHY timing register 4
+ * I(nphase)/Q(adrature) calibration register [5111+]
+ */
+#define AR5K_PHY_IQ 0x9920 /* Register Address */
+#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
+#define AR5K_PHY_IQ_CORR_Q_Q_COFF_S 0
+#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
+#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
+#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
+#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 /* Mask for max number of samples in log scale */
+#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12
+#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */
+#define AR5K_PHY_IQ_USE_PT_DF 0x00020000 /* Use pilot track df (?) */
+#define AR5K_PHY_IQ_EARLY_TRIG_THR 0x00200000 /* Early trigger threshold (?) (field) */
+#define AR5K_PHY_IQ_PILOT_MASK_EN 0x10000000 /* Enable pilot mask (?) */
+#define AR5K_PHY_IQ_CHAN_MASK_EN 0x20000000 /* Enable channel mask (?) */
+#define AR5K_PHY_IQ_SPUR_FILT_EN 0x40000000 /* Enable spur filter */
+#define AR5K_PHY_IQ_SPUR_RSSI_EN 0x80000000 /* Enable spur rssi */
+
+/*
+ * PHY timing register 5
+ * OFDM Self-correlator Cyclic RSSI threshold params
+ * (Check out bb_cycpwr_thr1 on ANI patent)
+ */
+#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S 1
+#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
+#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
+#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
+#define AR5K_PHY_OFDM_SELFCORR_LSCTHR_HIRSSI 0x00800000 /* Long sc threshold hi rssi (?) */
+
+/*
+ * PHY-only warm reset register
+ */
+#define AR5K_PHY_WARM_RESET 0x9928
+
+/*
+ * PHY-only control register
+ */
+#define AR5K_PHY_CTL 0x992c /* Register Address */
+#define AR5K_PHY_CTL_RX_DRAIN_RATE 0x00000001 /* RX drain rate (?) */
+#define AR5K_PHY_CTL_LATE_TX_SIG_SYM 0x00000002 /* Late tx signal symbol (?) */
+#define AR5K_PHY_CTL_GEN_SCRAMBLER 0x00000004 /* Generate scrambler */
+#define AR5K_PHY_CTL_TX_ANT_SEL 0x00000008 /* TX antenna select */
+#define AR5K_PHY_CTL_TX_ANT_STATIC 0x00000010 /* Static TX antenna */
+#define AR5K_PHY_CTL_RX_ANT_SEL 0x00000020 /* RX antenna select */
+#define AR5K_PHY_CTL_RX_ANT_STATIC 0x00000040 /* Static RX antenna */
+#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */
+
+/*
+ * PHY PAPD probe register [5111+]
+ */
+#define AR5K_PHY_PAPD_PROBE 0x9930
+#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001
+#define AR5K_PHY_PAPD_PROBE_PCDAC_BIAS 0x00000002
+#define AR5K_PHY_PAPD_PROBE_COMP_GAIN 0x00000040
+#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00
+#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9
+#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000
+#define AR5K_PHY_PAPD_PROBE_PREDIST_EN 0x00010000
+#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */
+#define AR5K_PHY_PAPD_PROBE_TYPE_S 23
+#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0
+#define AR5K_PHY_PAPD_PROBE_TYPE_XR 1
+#define AR5K_PHY_PAPD_PROBE_TYPE_CCK 2
+#define AR5K_PHY_PAPD_PROBE_GAINF 0xfe000000
+#define AR5K_PHY_PAPD_PROBE_GAINF_S 25
+#define AR5K_PHY_PAPD_PROBE_INI_5111 0x00004883 /* [5212+] */
+#define AR5K_PHY_PAPD_PROBE_INI_5112 0x00004882 /* [5212+] */
+
+/*
+ * PHY TX rate power registers [5112+]
+ */
+#define AR5K_PHY_TXPOWER_RATE1 0x9934
+#define AR5K_PHY_TXPOWER_RATE2 0x9938
+#define AR5K_PHY_TXPOWER_RATE_MAX 0x993c
+#define AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE 0x00000040
+#define AR5K_PHY_TXPOWER_RATE3 0xa234
+#define AR5K_PHY_TXPOWER_RATE4 0xa238
+
+/*
+ * PHY frame control register [5111+]
+ */
+#define AR5K_PHY_FRAME_CTL_5210 0x9804
+#define AR5K_PHY_FRAME_CTL_5211 0x9944
+#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
+ AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
+/*---[5111+]---*/
+#define AR5K_PHY_FRAME_CTL_WIN_LEN 0x00000003 /* Force window length (?) */
+#define AR5K_PHY_FRAME_CTL_WIN_LEN_S 0
+#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
+#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
+#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
+#define AR5K_PHY_FRAME_CTL_EMU 0x80000000
+#define AR5K_PHY_FRAME_CTL_EMU_S 31
+/*---[5110/5111]---*/
+#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */
+#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */
+#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* Illegal rate */
+#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */
+#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000
+#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */
+#define AR5K_PHY_FRAME_CTL_INI \
+ (AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
+ AR5K_PHY_FRAME_CTL_TXURN_ERR | \
+ AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
+ AR5K_PHY_FRAME_CTL_ILLRATE_ERR | \
+ AR5K_PHY_FRAME_CTL_PARITY_ERR | \
+ AR5K_PHY_FRAME_CTL_TIMING_ERR)
+
+/*
+ * PHY Tx Power adjustment register [5212A+]
+ */
+#define AR5K_PHY_TX_PWR_ADJ 0x994c
+#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA 0x00000fc0
+#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA_S 6
+#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX 0x00fc0000
+#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX_S 18
+
+/*
+ * PHY radar detection register [5111+]
+ */
+#define AR5K_PHY_RADAR 0x9954
+#define AR5K_PHY_RADAR_ENABLE 0x00000001
+#define AR5K_PHY_RADAR_DISABLE 0x00000000
+#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e /* Inband threshold
+ 5-bits, units unknown {0..31}
+ (? MHz ?) */
+#define AR5K_PHY_RADAR_INBANDTHR_S 1
+
+#define AR5K_PHY_RADAR_PRSSI_THR 0x00000fc0 /* Pulse RSSI/SNR threshold
+ 6-bits, dBm range {0..63}
+ in dBm units. */
+#define AR5K_PHY_RADAR_PRSSI_THR_S 6
+
+#define AR5K_PHY_RADAR_PHEIGHT_THR 0x0003f000 /* Pulse height threshold
+ 6-bits, dBm range {0..63}
+ in dBm units. */
+#define AR5K_PHY_RADAR_PHEIGHT_THR_S 12
+
+#define AR5K_PHY_RADAR_RSSI_THR 0x00fc0000 /* Radar RSSI/SNR threshold.
+ 6-bits, dBm range {0..63}
+ in dBm units. */
+#define AR5K_PHY_RADAR_RSSI_THR_S 18
+
+#define AR5K_PHY_RADAR_FIRPWR_THR 0x7f000000 /* Finite Impulse Response
+ filter power out threshold.
+ 7-bits, standard power range
+ {0..127} in 1/2 dBm units. */
+#define AR5K_PHY_RADAR_FIRPWR_THRS 24
+
+/*
+ * PHY antenna switch table registers
+ */
+#define AR5K_PHY_ANT_SWITCH_TABLE_0 0x9960
+#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964
+
+/*
+ * PHY Noise floor threshold
+ */
+#define AR5K_PHY_NFTHRES 0x9968
+
+/*
+ * Sigma Delta register (?) [5213]
+ */
+#define AR5K_PHY_SIGMA_DELTA 0x996C
+#define AR5K_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR5K_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR5K_PHY_SIGMA_DELTA_FILT2 0x000000f8
+#define AR5K_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR5K_PHY_SIGMA_DELTA_FILT1 0x00001f00
+#define AR5K_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000
+#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+/*
+ * RF restart register [5112+] (?)
+ */
+#define AR5K_PHY_RESTART 0x9970 /* restart */
+#define AR5K_PHY_RESTART_DIV_GC 0x001c0000 /* Fast diversity gc_limit (?) */
+#define AR5K_PHY_RESTART_DIV_GC_S 18
+
+/*
+ * RF Bus access request register (for synth-only channel switching)
+ */
+#define AR5K_PHY_RFBUS_REQ 0x997C
+#define AR5K_PHY_RFBUS_REQ_REQUEST 0x00000001
+
+/*
+ * Spur mitigation masks (?)
+ */
+#define AR5K_PHY_TIMING_7 0x9980
+#define AR5K_PHY_TIMING_8 0x9984
+#define AR5K_PHY_TIMING_8_PILOT_MASK_2 0x000fffff
+#define AR5K_PHY_TIMING_8_PILOT_MASK_2_S 0
+
+#define AR5K_PHY_BIN_MASK2_1 0x9988
+#define AR5K_PHY_BIN_MASK2_2 0x998c
+#define AR5K_PHY_BIN_MASK2_3 0x9990
+
+#define AR5K_PHY_BIN_MASK2_4 0x9994
+#define AR5K_PHY_BIN_MASK2_4_MASK_4 0x00003fff
+#define AR5K_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR5K_PHY_TIMING_9 0x9998
+#define AR5K_PHY_TIMING_10 0x999c
+#define AR5K_PHY_TIMING_10_PILOT_MASK_2 0x000fffff
+#define AR5K_PHY_TIMING_10_PILOT_MASK_2_S 0
+
+/*
+ * Spur mitigation control
+ */
+#define AR5K_PHY_TIMING_11 0x99a0 /* Register address */
+#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE 0x000fffff /* Spur delta phase */
+#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE_S 0
+#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD 0x3ff00000 /* Freq sigma delta */
+#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD_S 20
+#define AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC 0x40000000 /* Spur filter in AGC detector */
+#define AR5K_PHY_TIMING_11_USE_SPUR_IN_SELFCOR 0x80000000 /* Spur filter in OFDM self correlator */
+
+/*
+ * Gain tables
+ */
+#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
+#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
+#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplifier Gain table base address */
+#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2))
+
+/*
+ * PHY timing IQ calibration result register [5111+]
+ */
+#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */
+#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */
+#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */
+
+/*
+ * PHY current RSSI register [5111+]
+ */
+#define AR5K_PHY_CURRENT_RSSI 0x9c1c
+
+/*
+ * PHY RF Bus grant register
+ */
+#define AR5K_PHY_RFBUS_GRANT 0x9c20
+#define AR5K_PHY_RFBUS_GRANT_OK 0x00000001
+
+/*
+ * PHY ADC test register
+ */
+#define AR5K_PHY_ADC_TEST 0x9c24
+#define AR5K_PHY_ADC_TEST_I 0x00000001
+#define AR5K_PHY_ADC_TEST_Q 0x00000200
+
+/*
+ * PHY DAC test register
+ */
+#define AR5K_PHY_DAC_TEST 0x9c28
+#define AR5K_PHY_DAC_TEST_I 0x00000001
+#define AR5K_PHY_DAC_TEST_Q 0x00000200
+
+/*
+ * PHY PTAT register (?)
+ */
+#define AR5K_PHY_PTAT 0x9c2c
+
+/*
+ * PHY Illegal TX rate register [5112+]
+ */
+#define AR5K_PHY_BAD_TX_RATE 0x9c30
+
+/*
+ * PHY SPUR Power register [5112+]
+ */
+#define AR5K_PHY_SPUR_PWR 0x9c34 /* Register Address */
+#define AR5K_PHY_SPUR_PWR_I 0x00000001 /* SPUR Power estimate for I (field) */
+#define AR5K_PHY_SPUR_PWR_Q 0x00000100 /* SPUR Power estimate for Q (field) */
+#define AR5K_PHY_SPUR_PWR_FILT 0x00010000 /* Power with SPUR removed (field) */
+
+/*
+ * PHY Channel status register [5112+] (?)
+ */
+#define AR5K_PHY_CHAN_STATUS 0x9c38
+#define AR5K_PHY_CHAN_STATUS_BT_ACT 0x00000001
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_RAW 0x00000002
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_MAC 0x00000004
+#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008
+
+/*
+ * Heavy clip enable register
+ */
+#define AR5K_PHY_HEAVY_CLIP_ENABLE 0x99e0
+
+/*
+ * PHY clock sleep registers [5112+]
+ */
+#define AR5K_PHY_SCLOCK 0x99f0
+#define AR5K_PHY_SCLOCK_32MHZ 0x0000000c
+#define AR5K_PHY_SDELAY 0x99f4
+#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
+#define AR5K_PHY_SPENDING 0x99f8
+
+
+/*
+ * PHY PAPD I (power?) table (?)
+ * (92! entries)
+ */
+#define AR5K_PHY_PAPD_I_BASE 0xa000
+#define AR5K_PHY_PAPD_I(_n) (AR5K_PHY_PAPD_I_BASE + ((_n) << 2))
+
+/*
+ * PHY PCDAC TX power table
+ */
+#define AR5K_PHY_PCDAC_TXPOWER_BASE 0xa180
+#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2))
+
+/*
+ * PHY mode register [5111+]
+ */
+#define AR5K_PHY_MODE 0x0a200 /* Register Address */
+#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation bit */
+#define AR5K_PHY_MODE_MOD_OFDM 0
+#define AR5K_PHY_MODE_MOD_CCK 1
+#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode bit */
+#define AR5K_PHY_MODE_FREQ_5GHZ 0
+#define AR5K_PHY_MODE_FREQ_2GHZ 2
+#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Enable Dynamic OFDM/CCK mode [5112+] */
+#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */
+#define AR5K_PHY_MODE_RAD_RF5111 0
+#define AR5K_PHY_MODE_RAD_RF5112 8
+#define AR5K_PHY_MODE_XR 0x00000010 /* Enable XR mode [5112+] */
+#define AR5K_PHY_MODE_HALF_RATE 0x00000020 /* Enable Half rate (test) */
+#define AR5K_PHY_MODE_QUARTER_RATE 0x00000040 /* Enable Quarter rat (test) */
+
+/*
+ * PHY CCK transmit control register [5111+ (?)]
+ */
+#define AR5K_PHY_CCKTXCTL 0xa204
+#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000
+#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010
+#define AR5K_PHY_CCKTXCTL_SCRAMBLER_DIS 0x00000001
+#define AR5K_PHY_CCKTXCTK_DAC_SCALE 0x00000004
+
+/*
+ * PHY CCK Cross-correlator Barker RSSI threshold register [5212+]
+ */
+#define AR5K_PHY_CCK_CROSSCORR 0xa208
+#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000003f
+#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
+
+/* Same address is used for antenna diversity activation */
+#define AR5K_PHY_FAST_ANT_DIV 0xa208
+#define AR5K_PHY_FAST_ANT_DIV_EN 0x00002000
+
+/*
+ * PHY 2GHz gain register [5111+]
+ */
+#define AR5K_PHY_GAIN_2GHZ 0xa20c
+#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX 0x00fc0000
+#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX_S 18
+#define AR5K_PHY_GAIN_2GHZ_INI_5111 0x6480416c
+
+#define AR5K_PHY_CCK_RX_CTL_4 0xa21c
+#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT 0x01f80000
+#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT_S 19
+
+#define AR5K_PHY_DAG_CCK_CTL 0xa228
+#define AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR 0x00000200
+#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR 0x0001fc00
+#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR_S 10
+
+#define AR5K_PHY_FAST_ADC 0xa24c
+
+#define AR5K_PHY_BLUETOOTH 0xa254
+
+/*
+ * Transmit Power Control register
+ * [2413+]
+ */
+#define AR5K_PHY_TPC_RG1 0xa258
+#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000
+#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14
+#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000
+#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16
+#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000
+#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18
+#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000
+#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20
+
+#define AR5K_PHY_TPC_RG5 0xa26C
+#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP_S 0
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S 22
+
+/*
+ * PHY PDADC Tx power table
+ */
+#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280
+#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2))
+
+/*
+ * Platform registers for WiSoC
+ */
+#define AR5K_AR5312_RESET 0xbc003020
+#define AR5K_AR5312_RESET_BB0_COLD 0x00000004
+#define AR5K_AR5312_RESET_BB1_COLD 0x00000200
+#define AR5K_AR5312_RESET_WMAC0 0x00002000
+#define AR5K_AR5312_RESET_BB0_WARM 0x00004000
+#define AR5K_AR5312_RESET_WMAC1 0x00020000
+#define AR5K_AR5312_RESET_BB1_WARM 0x00040000
+
+#define AR5K_AR5312_ENABLE 0xbc003080
+#define AR5K_AR5312_ENABLE_WLAN0 0x00000001
+#define AR5K_AR5312_ENABLE_WLAN1 0x00000008
+
+#define AR5K_AR2315_RESET 0xb1000004
+#define AR5K_AR2315_RESET_WMAC 0x00000001
+#define AR5K_AR2315_RESET_BB_WARM 0x00000002
+
+#define AR5K_AR2315_AHB_ARB_CTL 0xb1000008
+#define AR5K_AR2315_AHB_ARB_CTL_WLAN 0x00000002
+
+#define AR5K_AR2315_BYTESWAP 0xb100000c
+#define AR5K_AR2315_BYTESWAP_WMAC 0x00000002
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
new file mode 100644
index 000000000..99e62f99a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -0,0 +1,1380 @@
+/*
+ * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
+ * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
+ * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/****************************\
+ Reset function and helpers
+\****************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/unaligned.h>
+
+#include <linux/pci.h> /* To determine if a card is pci-e */
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include "ath5k.h"
+#include "reg.h"
+#include "debug.h"
+
+
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
+/******************\
+* Helper functions *
+\******************/
+
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
+ */
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set)
+{
+ int i;
+ u32 data;
+
+ for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+ data = ath5k_hw_reg_read(ah, reg);
+ if (is_set && (data & flag))
+ break;
+ else if ((data & flag) == val)
+ break;
+ udelay(15);
+ }
+
+ return (i <= 0) ? -EAGAIN : 0;
+}
+
+
+/*************************\
+* Clock related functions *
+\*************************/
+
+/**
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
+ */
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return usec * common->clockrate;
+}
+
+/**
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
+ * @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
+ */
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return clock / common->clockrate;
+}
+
+/**
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
+ *
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
+ */
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ struct ath_common *common = ath5k_hw_common(ah);
+ u32 usec_reg, txlat, rxlat, usec, clock, sclock, txf2txs;
+
+ /*
+ * Set core clock frequency
+ */
+ switch (channel->hw_value) {
+ case AR5K_MODE_11A:
+ clock = 40;
+ break;
+ case AR5K_MODE_11B:
+ clock = 22;
+ break;
+ case AR5K_MODE_11G:
+ default:
+ clock = 44;
+ break;
+ }
+
+ /* Use clock multiplier for non-default
+ * bwmode */
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ clock *= 2;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ clock /= 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ clock /= 4;
+ break;
+ default:
+ break;
+ }
+
+ common->clockrate = clock;
+
+ /*
+ * Set USEC parameters
+ */
+ /* Set USEC counter on PCU*/
+ usec = clock - 1;
+ usec = AR5K_REG_SM(usec, AR5K_USEC_1);
+
+ /* Set usec duration on DCU */
+ if (ah->ah_version != AR5K_AR5210)
+ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_USEC_DUR,
+ clock);
+
+ /* Set 32MHz USEC counter */
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF2413) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_radio == AR5K_RF2317))
+ /* Remain on 40MHz clock ? */
+ sclock = 40 - 1;
+ else
+ sclock = 32 - 1;
+ sclock = AR5K_REG_SM(sclock, AR5K_USEC_32);
+
+ /*
+ * Set tx/rx latencies
+ */
+ usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
+ txlat = AR5K_REG_MS(usec_reg, AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211);
+
+ /*
+ * Set default Tx frame to Tx data start delay
+ */
+ txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
+
+ /*
+ * 5210 initvals don't include usec settings
+ * so we need to use magic values here for
+ * tx/rx latencies
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ /* same for turbo */
+ txlat = AR5K_INIT_TX_LATENCY_5210;
+ rxlat = AR5K_INIT_RX_LATENCY_5210;
+ }
+
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+ /* 5311 has different tx/rx latency masks
+ * from 5211, since we deal 5311 the same
+ * as 5211 when setting initvals, shift
+ * values here to their proper locations
+ *
+ * Note: Initvals indicate tx/rx/ latencies
+ * are the same for turbo mode */
+ txlat = AR5K_REG_SM(txlat, AR5K_USEC_TX_LATENCY_5210);
+ rxlat = AR5K_REG_SM(rxlat, AR5K_USEC_RX_LATENCY_5210);
+ } else
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_10MHZ:
+ txlat = AR5K_REG_SM(txlat * 2,
+ AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_10MHZ;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ txlat = AR5K_REG_SM(txlat * 4,
+ AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_5MHZ;
+ break;
+ case AR5K_BWMODE_40MHZ:
+ txlat = AR5K_INIT_TX_LAT_MIN;
+ rxlat = AR5K_REG_SM(rxlat / 2,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
+ break;
+ default:
+ break;
+ }
+
+ usec_reg = (usec | sclock | txlat | rxlat);
+ ath5k_hw_reg_write(ah, usec_reg, AR5K_USEC);
+
+ /* On 5112 set tx frame to tx data start delay */
+ if (ah->ah_radio == AR5K_RF5112) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL2,
+ AR5K_PHY_RF_CTL2_TXF2TXD_START,
+ txf2txs);
+ }
+}
+
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
+ * If there is an external 32KHz crystal available, use it
+ * as ref. clock instead of 32/40MHz clock and baseband clocks
+ * to save power during sleep or restore normal 32/40MHz
+ * operation.
+ *
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
+ */
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 scal, spending, sclock;
+
+ /* Only set 32KHz settings if we have an external
+ * 32KHz crystal present */
+ if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
+ AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
+ enable) {
+
+ /* 1 usec/cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
+ /* Set up tsf increment on each cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
+
+ /* Set baseband sleep control registers
+ * and sleep control rate */
+ ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ spending = 0x14;
+ else
+ spending = 0x18;
+ ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
+ ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
+ ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
+ ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
+ } else {
+ ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
+ ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
+ ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
+ }
+
+ /* Enable sleep clock operation */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_EN);
+
+ } else {
+
+ /* Disable sleep clock operation and
+ * restore default parameters */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_EN);
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
+
+ /* Set DAC/ADC delays */
+ ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
+ ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
+
+ if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
+ scal = AR5K_PHY_SCAL_32MHZ_2417;
+ else if (ee->ee_is_hb63)
+ scal = AR5K_PHY_SCAL_32MHZ_HB63;
+ else
+ scal = AR5K_PHY_SCAL_32MHZ;
+ ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
+
+ ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ spending = 0x14;
+ else
+ spending = 0x18;
+ ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
+
+ /* Set up tsf increment on each cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_radio == AR5K_RF2317))
+ sclock = 40 - 1;
+ else
+ sclock = 32 - 1;
+ AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, sclock);
+ }
+}
+
+
+/*********************\
+* Reset/Sleep control *
+\*********************/
+
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
+ */
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+{
+ int ret;
+ u32 mask = val ? val : ~0U;
+
+ /* Read-and-clear RX Descriptor Pointer*/
+ ath5k_hw_reg_read(ah, AR5K_RXDP);
+
+ /*
+ * Reset the device and wait until success
+ */
+ ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
+
+ /* Wait at least 128 PCI clocks */
+ usleep_range(15, 20);
+
+ if (ah->ah_version == AR5K_AR5210) {
+ val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
+ | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
+ mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
+ | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
+ } else {
+ val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
+ mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
+ }
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
+
+ /*
+ * Reset configuration register (for hw byte-swap). Note that this
+ * is only set for big endian. We do the necessary magic in
+ * AR5K_INIT_CFG.
+ */
+ if ((val & AR5K_RESET_CTL_PCU) == 0)
+ ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
+
+ return ret;
+}
+
+/**
+ * ath5k_hw_wisoc_reset() - Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
+ */
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+{
+ u32 mask = flags ? flags : ~0U;
+ u32 __iomem *reg;
+ u32 regval;
+ u32 val = 0;
+
+ /* ah->ah_mac_srev is not available at this point yet */
+ if (ah->devid >= AR5K_SREV_AR2315_R6) {
+ reg = (u32 __iomem *) AR5K_AR2315_RESET;
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR2315_RESET_WMAC;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR2315_RESET_BB_WARM;
+ } else {
+ reg = (u32 __iomem *) AR5K_AR5312_RESET;
+ if (to_platform_device(ah->dev)->id == 0) {
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR5312_RESET_WMAC0;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR5312_RESET_BB0_COLD |
+ AR5K_AR5312_RESET_BB0_WARM;
+ } else {
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR5312_RESET_WMAC1;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR5312_RESET_BB1_COLD |
+ AR5K_AR5312_RESET_BB1_WARM;
+ }
+ }
+
+ /* Put BB/MAC into reset */
+ regval = ioread32(reg);
+ iowrite32(regval | val, reg);
+ regval = ioread32(reg);
+ udelay(100); /* NB: should be atomic */
+
+ /* Bring BB/MAC out of reset */
+ iowrite32(regval & ~val, reg);
+ regval = ioread32(reg);
+
+ /*
+ * Reset configuration register (for hw byte-swap). Note that this
+ * is only set for big endian. We do the necessary magic in
+ * AR5K_INIT_CFG.
+ */
+ if ((flags & AR5K_RESET_CTL_PCU) == 0)
+ ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
+ */
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+ bool set_chip, u16 sleep_duration)
+{
+ unsigned int i;
+ u32 staid, data;
+
+ staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
+
+ switch (mode) {
+ case AR5K_PM_AUTO:
+ staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
+ /* fallthrough */
+ case AR5K_PM_NETWORK_SLEEP:
+ if (set_chip)
+ ath5k_hw_reg_write(ah,
+ AR5K_SLEEP_CTL_SLE_ALLOW |
+ sleep_duration,
+ AR5K_SLEEP_CTL);
+
+ staid |= AR5K_STA_ID1_PWR_SV;
+ break;
+
+ case AR5K_PM_FULL_SLEEP:
+ if (set_chip)
+ ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
+ AR5K_SLEEP_CTL);
+
+ staid |= AR5K_STA_ID1_PWR_SV;
+ break;
+
+ case AR5K_PM_AWAKE:
+
+ staid &= ~AR5K_STA_ID1_PWR_SV;
+
+ if (!set_chip)
+ goto commit;
+
+ data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
+
+ /* If card is down we 'll get 0xffff... so we
+ * need to clean this up before we write the register
+ */
+ if (data & 0xffc00000)
+ data = 0;
+ else
+ /* Preserve sleep duration etc */
+ data = data & ~AR5K_SLEEP_CTL_SLE;
+
+ ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
+ AR5K_SLEEP_CTL);
+ usleep_range(15, 20);
+
+ for (i = 200; i > 0; i--) {
+ /* Check if the chip did wake up */
+ if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
+ AR5K_PCICFG_SPWR_DN) == 0)
+ break;
+
+ /* Wait a bit and retry */
+ usleep_range(50, 75);
+ ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
+ AR5K_SLEEP_CTL);
+ }
+
+ /* Fail if the chip didn't wake up */
+ if (i == 0)
+ return -EIO;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+commit:
+ ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
+
+ return 0;
+}
+
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
+ *
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
+ * without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
+ */
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
+{
+ struct pci_dev *pdev = ah->pdev;
+ u32 bus_flags;
+ int ret;
+
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ return 0;
+
+ /* Make sure device is awake */
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
+ if (ret) {
+ ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
+ return ret;
+ }
+
+ /*
+ * Put chipset on warm reset...
+ *
+ * Note: putting PCI core on warm reset on PCI-E cards
+ * results card to hang and always return 0xffff... so
+ * we ignore that flag for PCI-E cards. On PCI cards
+ * this flag gets cleared after 64 PCI clocks.
+ */
+ bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
+
+ if (ah->ah_version == AR5K_AR5210) {
+ ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
+ AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
+ usleep_range(2000, 2500);
+ } else {
+ ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_BASEBAND | bus_flags);
+ }
+
+ if (ret) {
+ ATH5K_ERR(ah, "failed to put device on warm reset\n");
+ return -EIO;
+ }
+
+ /* ...wakeup again!*/
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
+ if (ret) {
+ ATH5K_ERR(ah, "failed to put device on hold\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Bring up MAC + PHY Chips and program PLL
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
+ */
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+{
+ struct pci_dev *pdev = ah->pdev;
+ u32 turbo, mode, clock, bus_flags;
+ int ret;
+
+ turbo = 0;
+ mode = 0;
+ clock = 0;
+
+ if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
+ /* Wakeup the device */
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
+ if (ret) {
+ ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Put chipset on warm reset...
+ *
+ * Note: putting PCI core on warm reset on PCI-E cards
+ * results card to hang and always return 0xffff... so
+ * we ignore that flag for PCI-E cards. On PCI cards
+ * this flag gets cleared after 64 PCI clocks.
+ */
+ bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
+
+ if (ah->ah_version == AR5K_AR5210) {
+ ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
+ AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
+ usleep_range(2000, 2500);
+ } else {
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_BASEBAND);
+ else
+ ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_BASEBAND | bus_flags);
+ }
+
+ if (ret) {
+ ATH5K_ERR(ah, "failed to reset the MAC Chip\n");
+ return -EIO;
+ }
+
+ /* ...wakeup again!...*/
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
+ if (ret) {
+ ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
+ return ret;
+ }
+
+ /* ...reset configuration register on Wisoc ...
+ * ...clear reset control register and pull device out of
+ * warm reset on others */
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ ret = ath5k_hw_wisoc_reset(ah, 0);
+ else
+ ret = ath5k_hw_nic_reset(ah, 0);
+
+ if (ret) {
+ ATH5K_ERR(ah, "failed to warm reset the MAC Chip\n");
+ return -EIO;
+ }
+
+ /* On initialization skip PLL programming since we don't have
+ * a channel / mode set yet */
+ if (!channel)
+ return 0;
+
+ if (ah->ah_version != AR5K_AR5210) {
+ /*
+ * Get channel mode flags
+ */
+
+ if (ah->ah_radio >= AR5K_RF5112) {
+ mode = AR5K_PHY_MODE_RAD_RF5112;
+ clock = AR5K_PHY_PLL_RF5112;
+ } else {
+ mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
+ clock = AR5K_PHY_PLL_RF5111; /*Zero*/
+ }
+
+ if (channel->band == IEEE80211_BAND_2GHZ) {
+ mode |= AR5K_PHY_MODE_FREQ_2GHZ;
+ clock |= AR5K_PHY_PLL_44MHZ;
+
+ if (channel->hw_value == AR5K_MODE_11B) {
+ mode |= AR5K_PHY_MODE_MOD_CCK;
+ } else {
+ /* XXX Dynamic OFDM/CCK is not supported by the
+ * AR5211 so we set MOD_OFDM for plain g (no
+ * CCK headers) operation. We need to test
+ * this, 5211 might support ofdm-only g after
+ * all, there are also initial register values
+ * in the code for g mode (see initvals.c).
+ */
+ if (ah->ah_version == AR5K_AR5211)
+ mode |= AR5K_PHY_MODE_MOD_OFDM;
+ else
+ mode |= AR5K_PHY_MODE_MOD_DYN;
+ }
+ } else if (channel->band == IEEE80211_BAND_5GHZ) {
+ mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
+ AR5K_PHY_MODE_MOD_OFDM);
+
+ /* Different PLL setting for 5413 */
+ if (ah->ah_radio == AR5K_RF5413)
+ clock = AR5K_PHY_PLL_40MHZ_5413;
+ else
+ clock |= AR5K_PHY_PLL_40MHZ;
+ } else {
+ ATH5K_ERR(ah, "invalid radio frequency mode\n");
+ return -EINVAL;
+ }
+
+ /*XXX: Can bwmode be used with dynamic mode ?
+ * (I don't think it supports 44MHz) */
+ /* On 2425 initvals TURBO_SHORT is not present */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
+ turbo = AR5K_PHY_TURBO_MODE;
+ if (ah->ah_radio != AR5K_RF2425)
+ turbo |= AR5K_PHY_TURBO_SHORT;
+ } else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
+ if (ah->ah_radio == AR5K_RF5413) {
+ mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
+ AR5K_PHY_MODE_HALF_RATE :
+ AR5K_PHY_MODE_QUARTER_RATE;
+ } else if (ah->ah_version == AR5K_AR5212) {
+ clock |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
+ AR5K_PHY_PLL_HALF_RATE :
+ AR5K_PHY_PLL_QUARTER_RATE;
+ }
+ }
+
+ } else { /* Reset the device */
+
+ /* ...enable Atheros turbo mode if requested */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
+ AR5K_PHY_TURBO);
+ }
+
+ if (ah->ah_version != AR5K_AR5210) {
+
+ /* ...update PLL if needed */
+ if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
+ ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
+ usleep_range(300, 350);
+ }
+
+ /* ...set the PHY operating mode */
+ ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
+ ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
+ }
+
+ return 0;
+}
+
+
+/**************************************\
+* Post-initvals register modifications *
+\**************************************/
+
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
+
+ /* Setup ADC control */
+ ath5k_hw_reg_write(ah,
+ (AR5K_REG_SM(2,
+ AR5K_PHY_ADC_CTL_INBUFGAIN_OFF) |
+ AR5K_REG_SM(2,
+ AR5K_PHY_ADC_CTL_INBUFGAIN_ON) |
+ AR5K_PHY_ADC_CTL_PWD_DAC_OFF |
+ AR5K_PHY_ADC_CTL_PWD_ADC_OFF),
+ AR5K_PHY_ADC_CTL);
+
+
+
+ /* Disable barker RSSI threshold */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
+ AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR);
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
+ AR5K_PHY_DAG_CCK_CTL_RSSI_THR, 2);
+
+ /* Set the mute mask */
+ ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
+ }
+
+ /* Clear PHY_BLUETOOTH to allow RX_CLEAR line debug */
+ if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212B)
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_BLUETOOTH);
+
+ /* Enable DCU double buffering */
+ if (ah->ah_phy_revision > AR5K_SREV_PHY_5212B)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_DCU_DBL_BUF_DIS);
+
+ /* Set fast ADC */
+ if ((ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2317) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
+ u32 fast_adc = true;
+
+ if (channel->center_freq == 2462 ||
+ channel->center_freq == 2467)
+ fast_adc = 0;
+
+ /* Only update if needed */
+ if (ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ADC) != fast_adc)
+ ath5k_hw_reg_write(ah, fast_adc,
+ AR5K_PHY_FAST_ADC);
+ }
+
+ /* Fix for first revision of the RF5112 RF chipset */
+ if (ah->ah_radio == AR5K_RF5112 &&
+ ah->ah_radio_5ghz_revision <
+ AR5K_SREV_RAD_5112A) {
+ u32 data;
+ ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
+ AR5K_PHY_CCKTXCTL);
+ if (channel->band == IEEE80211_BAND_5GHZ)
+ data = 0xffb81020;
+ else
+ data = 0xffb80d20;
+ ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
+ }
+
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+ /* Clear QCU/DCU clock gating register */
+ ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT);
+ /* Set DAC/ADC delays */
+ ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ_5311,
+ AR5K_PHY_SCAL);
+ /* Enable PCU FIFO corruption ECO */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
+ AR5K_DIAG_SW_ECO_ENABLE);
+ }
+
+ if (ah->ah_bwmode) {
+ /* Increase PHY switch and AGC settling time
+ * on turbo mode (ath5k_hw_commit_eeprom_settings
+ * will override settling time if available) */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_AGC,
+ AR5K_AGC_SETTLING_TURBO);
+
+ /* XXX: Initvals indicate we only increase
+ * switch time on AR5212, 5211 and 5210
+ * only change agc time (bug?) */
+ if (ah->ah_version == AR5K_AR5212)
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_SWITCH,
+ AR5K_SWITCH_SETTLING_TURBO);
+
+ if (ah->ah_version == AR5K_AR5210) {
+ /* Set Frame Control Register */
+ ath5k_hw_reg_write(ah,
+ (AR5K_PHY_FRAME_CTL_INI |
+ AR5K_PHY_TURBO_MODE |
+ AR5K_PHY_TURBO_SHORT | 0x2020),
+ AR5K_PHY_FRAME_CTL_5210);
+ }
+ /* On 5413 PHY force window length for half/quarter rate*/
+ } else if ((ah->ah_mac_srev >= AR5K_SREV_AR5424) &&
+ (ah->ah_mac_srev <= AR5K_SREV_AR5414)) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL_5211,
+ AR5K_PHY_FRAME_CTL_WIN_LEN,
+ 3);
+ }
+ } else if (ah->ah_version == AR5K_AR5210) {
+ /* Set Frame Control Register for normal operation */
+ ath5k_hw_reg_write(ah, (AR5K_PHY_FRAME_CTL_INI | 0x1020),
+ AR5K_PHY_FRAME_CTL_5210);
+ }
+}
+
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ s16 cck_ofdm_pwr_delta;
+ u8 ee_mode;
+
+ /* TODO: Add support for AR5210 EEPROM */
+ if (ah->ah_version == AR5K_AR5210)
+ return;
+
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
+
+ /* Adjust power delta for channel 14 */
+ if (channel->center_freq == 2484)
+ cck_ofdm_pwr_delta =
+ ((ee->ee_cck_ofdm_power_delta -
+ ee->ee_scaled_cck_delta) * 2) / 10;
+ else
+ cck_ofdm_pwr_delta =
+ (ee->ee_cck_ofdm_power_delta * 2) / 10;
+
+ /* Set CCK to OFDM power delta on tx power
+ * adjustment register */
+ if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
+ if (channel->hw_value == AR5K_MODE_11G)
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
+ AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
+ AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
+ AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
+ AR5K_PHY_TX_PWR_ADJ);
+ else
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
+ } else {
+ /* For older revs we scale power on sw during tx power
+ * setup */
+ ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta;
+ ah->ah_txpower.txp_cck_ofdm_gainf_delta =
+ ee->ee_cck_ofdm_gain_delta;
+ }
+
+ /* XXX: necessary here? is called from ath5k_hw_set_antenna_mode()
+ * too */
+ ath5k_hw_set_antenna_switch(ah, ee_mode);
+
+ /* Noise floor threshold */
+ ath5k_hw_reg_write(ah,
+ AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
+ AR5K_PHY_NFTHRES);
+
+ if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
+ (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) {
+ /* Switch settling time (Turbo) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_SWITCH,
+ ee->ee_switch_settling_turbo[ee_mode]);
+
+ /* Tx/Rx attenuation (Turbo) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
+ AR5K_PHY_GAIN_TXRX_ATTEN,
+ ee->ee_atn_tx_rx_turbo[ee_mode]);
+
+ /* ADC/PGA desired size (Turbo) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_ADC,
+ ee->ee_adc_desired_size_turbo[ee_mode]);
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_PGA,
+ ee->ee_pga_desired_size_turbo[ee_mode]);
+
+ /* Tx/Rx margin (Turbo) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
+ AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
+ ee->ee_margin_tx_rx_turbo[ee_mode]);
+
+ } else {
+ /* Switch settling time */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_SWITCH,
+ ee->ee_switch_settling[ee_mode]);
+
+ /* Tx/Rx attenuation */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
+ AR5K_PHY_GAIN_TXRX_ATTEN,
+ ee->ee_atn_tx_rx[ee_mode]);
+
+ /* ADC/PGA desired size */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_ADC,
+ ee->ee_adc_desired_size[ee_mode]);
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_PGA,
+ ee->ee_pga_desired_size[ee_mode]);
+
+ /* Tx/Rx margin */
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
+ AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
+ ee->ee_margin_tx_rx[ee_mode]);
+ }
+
+ /* XPA delays */
+ ath5k_hw_reg_write(ah,
+ (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
+ (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
+ (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
+ (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
+
+ /* XLNA delay */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL3,
+ AR5K_PHY_RF_CTL3_TXE2XLNA_ON,
+ ee->ee_tx_end2xlna_enable[ee_mode]);
+
+ /* Thresh64 (ANI) */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_NF,
+ AR5K_PHY_NF_THRESH62,
+ ee->ee_thr_62[ee_mode]);
+
+ /* False detect backoff for channels
+ * that have spur noise. Write the new
+ * cyclic power RSSI threshold. */
+ if (ath5k_hw_chan_has_spur_noise(ah, channel))
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
+ AR5K_INIT_CYCRSSI_THR1 +
+ ee->ee_false_detect[ee_mode]);
+ else
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
+ AR5K_INIT_CYCRSSI_THR1);
+
+ /* I/Q correction (set enable bit last to match HAL sources) */
+ /* TODO: Per channel i/q infos ? */
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF,
+ ee->ee_i_cal[ee_mode]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF,
+ ee->ee_q_cal[ee_mode]);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
+ }
+
+ /* Heavy clipping -disable for now */
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
+ ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
+}
+
+
+/*********************\
+* Main reset function *
+\*********************/
+
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool fast, bool skip_pcu)
+{
+ u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
+ u8 mode;
+ int i, ret;
+
+ tsf_up = 0;
+ tsf_lo = 0;
+ mode = 0;
+
+ /*
+ * Sanity check for fast flag
+ * Fast channel change only available
+ * on AR2413/AR5413.
+ */
+ if (fast && (ah->ah_radio != AR5K_RF2413) &&
+ (ah->ah_radio != AR5K_RF5413))
+ fast = false;
+
+ /* Disable sleep clock operation
+ * to avoid register access delay on certain
+ * PHY registers */
+ if (ah->ah_version == AR5K_AR5212)
+ ath5k_hw_set_sleep_clock(ah, false);
+
+ mode = channel->hw_value;
+ switch (mode) {
+ case AR5K_MODE_11A:
+ break;
+ case AR5K_MODE_11G:
+ if (ah->ah_version <= AR5K_AR5211) {
+ ATH5K_ERR(ah,
+ "G mode not available on 5210/5211");
+ return -EINVAL;
+ }
+ break;
+ case AR5K_MODE_11B:
+ if (ah->ah_version < AR5K_AR5211) {
+ ATH5K_ERR(ah,
+ "B mode not available on 5210");
+ return -EINVAL;
+ }
+ break;
+ default:
+ ATH5K_ERR(ah,
+ "invalid channel: %d\n", channel->center_freq);
+ return -EINVAL;
+ }
+
+ /*
+ * If driver requested fast channel change and DMA has stopped
+ * go on. If it fails continue with a normal reset.
+ */
+ if (fast) {
+ ret = ath5k_hw_phy_init(ah, channel, mode, true);
+ if (ret) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "fast chan change failed, falling back to normal reset\n");
+ /* Non fatal, can happen eg.
+ * on mode change */
+ ret = 0;
+ } else {
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "fast chan change successful\n");
+ return 0;
+ }
+ }
+
+ /*
+ * Save some registers before a reset
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ /*
+ * Save frame sequence count
+ * For revs. after Oahu, only save
+ * seq num for DCU 0 (Global seq num)
+ */
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+
+ for (i = 0; i < 10; i++)
+ s_seq[i] = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_DCU_SEQNUM(i));
+
+ } else {
+ s_seq[0] = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_DCU_SEQNUM(0));
+ }
+
+ /* TSF accelerates on AR5211 during reset
+ * As a workaround save it here and restore
+ * it later so that it's back in time after
+ * reset. This way it'll get re-synced on the
+ * next beacon without breaking ad-hoc.
+ *
+ * On AR5212 TSF is almost preserved across a
+ * reset so it stays back in time anyway and
+ * we don't have to save/restore it.
+ *
+ * XXX: Since this breaks power saving we have
+ * to disable power saving until we receive the
+ * next beacon, so we can resync beacon timers */
+ if (ah->ah_version == AR5K_AR5211) {
+ tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ }
+ }
+
+
+ /*GPIOs*/
+ s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) &
+ AR5K_PCICFG_LEDSTATE;
+ s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
+ s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
+
+
+ /*
+ * Since we are going to write rf buffer
+ * check if we have any pending gain_F
+ * optimization settings
+ */
+ if (ah->ah_version == AR5K_AR5212 &&
+ (ah->ah_radio <= AR5K_RF5112)) {
+ if (!fast && ah->ah_rf_banks != NULL)
+ ath5k_hw_gainf_calibrate(ah);
+ }
+
+ /* Wakeup the device */
+ ret = ath5k_hw_nic_wakeup(ah, channel);
+ if (ret)
+ return ret;
+
+ /* PHY access enable */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+ else
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ | 0x40,
+ AR5K_PHY(0));
+
+ /* Write initial settings */
+ ret = ath5k_hw_write_initvals(ah, mode, skip_pcu);
+ if (ret)
+ return ret;
+
+ /* Initialize core clock settings */
+ ath5k_hw_init_core_clock(ah);
+
+ /*
+ * Tweak initval settings for revised
+ * chipsets and add some more config
+ * bits
+ */
+ ath5k_hw_tweak_initval_settings(ah, channel);
+
+ /* Commit values from EEPROM */
+ ath5k_hw_commit_eeprom_settings(ah, channel);
+
+
+ /*
+ * Restore saved values
+ */
+
+ /* Seqnum, TSF */
+ if (ah->ah_version != AR5K_AR5210) {
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+ for (i = 0; i < 10; i++)
+ ath5k_hw_reg_write(ah, s_seq[i],
+ AR5K_QUEUE_DCU_SEQNUM(i));
+ } else {
+ ath5k_hw_reg_write(ah, s_seq[0],
+ AR5K_QUEUE_DCU_SEQNUM(0));
+ }
+
+ if (ah->ah_version == AR5K_AR5211) {
+ ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
+ ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
+ }
+ }
+
+ /* Ledstate */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
+
+ /* Gpio settings */
+ ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
+ ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
+
+ /*
+ * Initialize PCU
+ */
+ ath5k_hw_pcu_init(ah, op_mode);
+
+ /*
+ * Initialize PHY
+ */
+ ret = ath5k_hw_phy_init(ah, channel, mode, false);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "failed to initialize PHY (%i) !\n", ret);
+ return ret;
+ }
+
+ /*
+ * Configure QCUs/DCUs
+ */
+ ret = ath5k_hw_init_queues(ah);
+ if (ret)
+ return ret;
+
+
+ /*
+ * Initialize DMA/Interrupts
+ */
+ ath5k_hw_dma_init(ah);
+
+
+ /*
+ * Enable 32KHz clock function for AR5212+ chips
+ * Set clocks to 32KHz operation and use an
+ * external 32KHz crystal when sleeping if one
+ * exists.
+ * Disabled by default because it is also disabled in
+ * other drivers and it is known to cause stability
+ * issues on some devices
+ */
+ if (ah->ah_use_32khz_clock && ah->ah_version == AR5K_AR5212 &&
+ op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
+
+ /*
+ * Disable beacons and reset the TSF
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
+ ath5k_hw_reset_tsf(ah);
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
new file mode 100644
index 000000000..aed34d995
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -0,0 +1,853 @@
+/*
+ * RF Buffer handling functions
+ *
+ * Copyright (c) 2009 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+
+/**
+ * DOC: RF Buffer registers
+ *
+ * There are some special registers on the RF chip
+ * that control various operation settings related mostly to
+ * the analog parts (channel, gain adjustment etc).
+ *
+ * We don't write on those registers directly but
+ * we send a data packet on the chip, using a special register,
+ * that holds all the settings we need. After we've sent the
+ * data packet, we write on another special register to notify hw
+ * to apply the settings. This is done so that control registers
+ * can be dynamically programmed during operation and the settings
+ * are applied faster on the hw.
+ *
+ * We call each data packet an "RF Bank" and all the data we write
+ * (all RF Banks) "RF Buffer". This file holds initial RF Buffer
+ * data for the different RF chips, and various info to match RF
+ * Buffer offsets with specific RF registers so that we can access
+ * them. We tweak these settings on rfregs_init function.
+ *
+ * Also check out reg.h and U.S. Patent 6677779 B1 (about buffer
+ * registers and control registers):
+ *
+ * http://www.google.com/patents?id=qNURAAAAEBAJ
+ */
+
+
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
+ * Struct to hold default mode specific RF
+ * register values (RF Banks) for each chip.
+ */
+struct ath5k_ini_rfbuffer {
+ u8 rfb_bank;
+ u16 rfb_ctrl_register;
+ u32 rfb_mode_data[3];
+};
+
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
+ * Struct to hold RF Buffer field
+ * infos used to access certain RF
+ * analog registers
+ */
+struct ath5k_rfb_field {
+ u8 len;
+ u16 pos;
+ u8 col;
+};
+
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
+ */
+struct ath5k_rf_reg {
+ u8 bank;
+ u8 index;
+ struct ath5k_rfb_field field;
+};
+
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
+ * We do this to handle common bits and make our
+ * life easier by using an index for each register
+ * instead of a full rfb_field
+ */
+enum ath5k_rf_regs_idx {
+ /* BANK 2 */
+ AR5K_RF_TURBO = 0,
+ /* BANK 6 */
+ AR5K_RF_OB_2GHZ,
+ AR5K_RF_OB_5GHZ,
+ AR5K_RF_DB_2GHZ,
+ AR5K_RF_DB_5GHZ,
+ AR5K_RF_FIXED_BIAS_A,
+ AR5K_RF_FIXED_BIAS_B,
+ AR5K_RF_PWD_XPD,
+ AR5K_RF_XPD_SEL,
+ AR5K_RF_XPD_GAIN,
+ AR5K_RF_PD_GAIN_LO,
+ AR5K_RF_PD_GAIN_HI,
+ AR5K_RF_HIGH_VC_CP,
+ AR5K_RF_MID_VC_CP,
+ AR5K_RF_LOW_VC_CP,
+ AR5K_RF_PUSH_UP,
+ AR5K_RF_PAD2GND,
+ AR5K_RF_XB2_LVL,
+ AR5K_RF_XB5_LVL,
+ AR5K_RF_PWD_ICLOBUF_2G,
+ AR5K_RF_PWD_84,
+ AR5K_RF_PWD_90,
+ AR5K_RF_PWD_130,
+ AR5K_RF_PWD_131,
+ AR5K_RF_PWD_132,
+ AR5K_RF_PWD_136,
+ AR5K_RF_PWD_137,
+ AR5K_RF_PWD_138,
+ AR5K_RF_PWD_166,
+ AR5K_RF_PWD_167,
+ AR5K_RF_DERBY_CHAN_SEL_MODE,
+ /* BANK 7 */
+ AR5K_RF_GAIN_I,
+ AR5K_RF_PLO_SEL,
+ AR5K_RF_RFGAIN_SEL,
+ AR5K_RF_RFGAIN_STEP,
+ AR5K_RF_WAIT_S,
+ AR5K_RF_WAIT_I,
+ AR5K_RF_MAX_TIME,
+ AR5K_RF_MIXVGA_OVR,
+ AR5K_RF_MIXGAIN_OVR,
+ AR5K_RF_MIXGAIN_STEP,
+ AR5K_RF_PD_DELAY_A,
+ AR5K_RF_PD_DELAY_B,
+ AR5K_RF_PD_DELAY_XR,
+ AR5K_RF_PD_PERIOD_A,
+ AR5K_RF_PD_PERIOD_B,
+ AR5K_RF_PD_PERIOD_XR,
+};
+
+
+/*******************\
+* RF5111 (Sombrero) *
+\*******************/
+
+/* BANK 2 len pos col */
+#define AR5K_RF5111_RF_TURBO { 1, 3, 0 }
+
+/* BANK 6 len pos col */
+#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 }
+#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 }
+
+#define AR5K_RF5111_OB_5GHZ { 3, 104, 0 }
+#define AR5K_RF5111_DB_5GHZ { 3, 107, 0 }
+
+#define AR5K_RF5111_PWD_XPD { 1, 95, 0 }
+#define AR5K_RF5111_XPD_GAIN { 4, 96, 0 }
+
+/* Access to PWD registers */
+#define AR5K_RF5111_PWD(_n) { 1, (135 - _n), 3 }
+
+/* BANK 7 len pos col */
+#define AR5K_RF5111_GAIN_I { 6, 29, 0 }
+#define AR5K_RF5111_PLO_SEL { 1, 4, 0 }
+#define AR5K_RF5111_RFGAIN_SEL { 1, 36, 0 }
+#define AR5K_RF5111_RFGAIN_STEP { 6, 37, 0 }
+/* Only on AR5212 BaseBand and up */
+#define AR5K_RF5111_WAIT_S { 5, 19, 0 }
+#define AR5K_RF5111_WAIT_I { 5, 24, 0 }
+#define AR5K_RF5111_MAX_TIME { 2, 49, 0 }
+
+static const struct ath5k_rf_reg rf_regs_5111[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5111_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ},
+ {6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ},
+ {6, AR5K_RF_DB_5GHZ, AR5K_RF5111_DB_5GHZ},
+ {6, AR5K_RF_PWD_XPD, AR5K_RF5111_PWD_XPD},
+ {6, AR5K_RF_XPD_GAIN, AR5K_RF5111_XPD_GAIN},
+ {6, AR5K_RF_PWD_84, AR5K_RF5111_PWD(84)},
+ {6, AR5K_RF_PWD_90, AR5K_RF5111_PWD(90)},
+ {7, AR5K_RF_GAIN_I, AR5K_RF5111_GAIN_I},
+ {7, AR5K_RF_PLO_SEL, AR5K_RF5111_PLO_SEL},
+ {7, AR5K_RF_RFGAIN_SEL, AR5K_RF5111_RFGAIN_SEL},
+ {7, AR5K_RF_RFGAIN_STEP, AR5K_RF5111_RFGAIN_STEP},
+ {7, AR5K_RF_WAIT_S, AR5K_RF5111_WAIT_S},
+ {7, AR5K_RF_WAIT_I, AR5K_RF5111_WAIT_I},
+ {7, AR5K_RF_MAX_TIME, AR5K_RF5111_MAX_TIME}
+};
+
+/* Default mode specific settings */
+static const struct ath5k_ini_rfbuffer rfb_5111[] = {
+ /* BANK / C.R. A/XR B G */
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00380000, 0x00380000, 0x00380000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x000000c0, 0x00000080 } },
+ { 0, 0x989c, { 0x000400f9, 0x000400ff, 0x000400fd } },
+ { 0, 0x98d4, { 0x00000000, 0x00000004, 0x00000004 } },
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d4, { 0x00000010, 0x00000010, 0x00000010 } },
+ { 3, 0x98d8, { 0x00601068, 0x00601068, 0x00601068 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x04000000, 0x04000000, 0x04000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x0a000000, 0x00000000 } },
+ { 6, 0x989c, { 0x003800c0, 0x023800c0, 0x003800c0 } },
+ { 6, 0x989c, { 0x00020006, 0x00000006, 0x00020006 } },
+ { 6, 0x989c, { 0x00000089, 0x00000089, 0x00000089 } },
+ { 6, 0x989c, { 0x000000a0, 0x000000a0, 0x000000a0 } },
+ { 6, 0x989c, { 0x00040007, 0x00040007, 0x00040007 } },
+ { 6, 0x98d4, { 0x0000001a, 0x0000001a, 0x0000001a } },
+ { 7, 0x989c, { 0x00000040, 0x00000040, 0x00000040 } },
+ { 7, 0x989c, { 0x00000010, 0x00000010, 0x00000010 } },
+ { 7, 0x989c, { 0x00000008, 0x00000008, 0x00000008 } },
+ { 7, 0x989c, { 0x0000004f, 0x0000004f, 0x0000004f } },
+ { 7, 0x989c, { 0x000000f1, 0x00000061, 0x000000f1 } },
+ { 7, 0x989c, { 0x0000904f, 0x0000904c, 0x0000904f } },
+ { 7, 0x989c, { 0x0000125a, 0x0000129a, 0x0000125a } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000f, 0x0000000e } },
+};
+
+
+
+/***********************\
+* RF5112/RF2112 (Derby) *
+\***********************/
+
+/* BANK 2 (Common) len pos col */
+#define AR5K_RF5112X_RF_TURBO { 1, 1, 2 }
+
+/* BANK 7 (Common) len pos col */
+#define AR5K_RF5112X_GAIN_I { 6, 14, 0 }
+#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 }
+#define AR5K_RF5112X_MIXGAIN_OVR { 2, 37, 0 }
+#define AR5K_RF5112X_MIXGAIN_STEP { 4, 32, 0 }
+#define AR5K_RF5112X_PD_DELAY_A { 4, 58, 0 }
+#define AR5K_RF5112X_PD_DELAY_B { 4, 62, 0 }
+#define AR5K_RF5112X_PD_DELAY_XR { 4, 66, 0 }
+#define AR5K_RF5112X_PD_PERIOD_A { 4, 70, 0 }
+#define AR5K_RF5112X_PD_PERIOD_B { 4, 74, 0 }
+#define AR5K_RF5112X_PD_PERIOD_XR { 4, 78, 0 }
+
+/* RFX112 (Derby 1) */
+
+/* BANK 6 len pos col */
+#define AR5K_RF5112_OB_2GHZ { 3, 269, 0 }
+#define AR5K_RF5112_DB_2GHZ { 3, 272, 0 }
+
+#define AR5K_RF5112_OB_5GHZ { 3, 261, 0 }
+#define AR5K_RF5112_DB_5GHZ { 3, 264, 0 }
+
+#define AR5K_RF5112_FIXED_BIAS_A { 1, 260, 0 }
+#define AR5K_RF5112_FIXED_BIAS_B { 1, 259, 0 }
+
+#define AR5K_RF5112_XPD_SEL { 1, 284, 0 }
+#define AR5K_RF5112_XPD_GAIN { 2, 252, 0 }
+
+/* Access to PWD registers */
+#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 }
+
+static const struct ath5k_rf_reg rf_regs_5112[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ},
+ {6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ},
+ {6, AR5K_RF_DB_5GHZ, AR5K_RF5112_DB_5GHZ},
+ {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112_FIXED_BIAS_A},
+ {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112_FIXED_BIAS_B},
+ {6, AR5K_RF_XPD_SEL, AR5K_RF5112_XPD_SEL},
+ {6, AR5K_RF_XPD_GAIN, AR5K_RF5112_XPD_GAIN},
+ {6, AR5K_RF_PWD_130, AR5K_RF5112_PWD(130)},
+ {6, AR5K_RF_PWD_131, AR5K_RF5112_PWD(131)},
+ {6, AR5K_RF_PWD_132, AR5K_RF5112_PWD(132)},
+ {6, AR5K_RF_PWD_136, AR5K_RF5112_PWD(136)},
+ {6, AR5K_RF_PWD_137, AR5K_RF5112_PWD(137)},
+ {6, AR5K_RF_PWD_138, AR5K_RF5112_PWD(138)},
+ {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
+ {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
+ {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
+ {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
+ {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
+ {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
+ {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
+ {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
+ {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
+ {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
+};
+
+/* Default mode specific settings */
+static const struct ath5k_ini_rfbuffer rfb_5112[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
+ { 3, 0x98dc, { 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
+ { 6, 0x989c, { 0x00a00000, 0x00a00000, 0x00a00000 } },
+ { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00660000, 0x00660000, 0x00660000 } },
+ { 6, 0x989c, { 0x00db0000, 0x00db0000, 0x00db0000 } },
+ { 6, 0x989c, { 0x00f10000, 0x00f10000, 0x00f10000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x008b0000, 0x008b0000, 0x008b0000 } },
+ { 6, 0x989c, { 0x00600000, 0x00600000, 0x00600000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
+ { 6, 0x989c, { 0x00640000, 0x00640000, 0x00640000 } },
+ { 6, 0x989c, { 0x00200000, 0x00200000, 0x00200000 } },
+ { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
+ { 6, 0x989c, { 0x00250000, 0x00250000, 0x00250000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x00510000, 0x00510000, 0x00510000 } },
+ { 6, 0x989c, { 0x1c040000, 0x1c040000, 0x1c040000 } },
+ { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
+ { 6, 0x989c, { 0x00a10000, 0x00a10000, 0x00a10000 } },
+ { 6, 0x989c, { 0x00400000, 0x00400000, 0x00400000 } },
+ { 6, 0x989c, { 0x03090000, 0x03090000, 0x03090000 } },
+ { 6, 0x989c, { 0x06000000, 0x06000000, 0x06000000 } },
+ { 6, 0x989c, { 0x000000b0, 0x000000a8, 0x000000a8 } },
+ { 6, 0x989c, { 0x0000002e, 0x0000002e, 0x0000002e } },
+ { 6, 0x989c, { 0x006c4a41, 0x006c4af1, 0x006c4a61 } },
+ { 6, 0x989c, { 0x0050892a, 0x0050892b, 0x0050892b } },
+ { 6, 0x989c, { 0x00842400, 0x00842400, 0x00842400 } },
+ { 6, 0x989c, { 0x00c69200, 0x00c69200, 0x00c69200 } },
+ { 6, 0x98d0, { 0x0002000c, 0x0002000c, 0x0002000c } },
+ { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
+ { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
+ { 7, 0x989c, { 0x0000000a, 0x00000012, 0x00000012 } },
+ { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
+ { 7, 0x989c, { 0x000000c1, 0x000000c1, 0x000000c1 } },
+ { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
+ { 7, 0x989c, { 0x00000022, 0x00000022, 0x00000022 } },
+ { 7, 0x989c, { 0x00000092, 0x00000092, 0x00000092 } },
+ { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
+ { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
+ { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
+ { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
+};
+
+/* RFX112A (Derby 2) */
+
+/* BANK 6 len pos col */
+#define AR5K_RF5112A_OB_2GHZ { 3, 287, 0 }
+#define AR5K_RF5112A_DB_2GHZ { 3, 290, 0 }
+
+#define AR5K_RF5112A_OB_5GHZ { 3, 279, 0 }
+#define AR5K_RF5112A_DB_5GHZ { 3, 282, 0 }
+
+#define AR5K_RF5112A_FIXED_BIAS_A { 1, 278, 0 }
+#define AR5K_RF5112A_FIXED_BIAS_B { 1, 277, 0 }
+
+#define AR5K_RF5112A_XPD_SEL { 1, 302, 0 }
+#define AR5K_RF5112A_PDGAINLO { 2, 270, 0 }
+#define AR5K_RF5112A_PDGAINHI { 2, 257, 0 }
+
+/* Access to PWD registers */
+#define AR5K_RF5112A_PWD(_n) { 1, (306 - _n), 3 }
+
+/* Voltage regulators */
+#define AR5K_RF5112A_HIGH_VC_CP { 2, 90, 2 }
+#define AR5K_RF5112A_MID_VC_CP { 2, 92, 2 }
+#define AR5K_RF5112A_LOW_VC_CP { 2, 94, 2 }
+#define AR5K_RF5112A_PUSH_UP { 1, 254, 2 }
+
+/* Power consumption */
+#define AR5K_RF5112A_PAD2GND { 1, 281, 1 }
+#define AR5K_RF5112A_XB2_LVL { 2, 1, 3 }
+#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 }
+
+static const struct ath5k_rf_reg rf_regs_5112a[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ},
+ {6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ},
+ {6, AR5K_RF_DB_5GHZ, AR5K_RF5112A_DB_5GHZ},
+ {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112A_FIXED_BIAS_A},
+ {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112A_FIXED_BIAS_B},
+ {6, AR5K_RF_XPD_SEL, AR5K_RF5112A_XPD_SEL},
+ {6, AR5K_RF_PD_GAIN_LO, AR5K_RF5112A_PDGAINLO},
+ {6, AR5K_RF_PD_GAIN_HI, AR5K_RF5112A_PDGAINHI},
+ {6, AR5K_RF_PWD_130, AR5K_RF5112A_PWD(130)},
+ {6, AR5K_RF_PWD_131, AR5K_RF5112A_PWD(131)},
+ {6, AR5K_RF_PWD_132, AR5K_RF5112A_PWD(132)},
+ {6, AR5K_RF_PWD_136, AR5K_RF5112A_PWD(136)},
+ {6, AR5K_RF_PWD_137, AR5K_RF5112A_PWD(137)},
+ {6, AR5K_RF_PWD_138, AR5K_RF5112A_PWD(138)},
+ {6, AR5K_RF_PWD_166, AR5K_RF5112A_PWD(166)},
+ {6, AR5K_RF_PWD_167, AR5K_RF5112A_PWD(167)},
+ {6, AR5K_RF_HIGH_VC_CP, AR5K_RF5112A_HIGH_VC_CP},
+ {6, AR5K_RF_MID_VC_CP, AR5K_RF5112A_MID_VC_CP},
+ {6, AR5K_RF_LOW_VC_CP, AR5K_RF5112A_LOW_VC_CP},
+ {6, AR5K_RF_PUSH_UP, AR5K_RF5112A_PUSH_UP},
+ {6, AR5K_RF_PAD2GND, AR5K_RF5112A_PAD2GND},
+ {6, AR5K_RF_XB2_LVL, AR5K_RF5112A_XB2_LVL},
+ {6, AR5K_RF_XB5_LVL, AR5K_RF5112A_XB5_LVL},
+ {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
+ {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
+ {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
+ {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
+ {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
+ {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
+ {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
+ {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
+ {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
+ {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
+};
+
+/* Default mode specific settings */
+static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00800000, 0x00800000, 0x00800000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00010000, 0x00010000, 0x00010000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00180000, 0x00180000, 0x00180000 } },
+ { 6, 0x989c, { 0x00600000, 0x006e0000, 0x006e0000 } },
+ { 6, 0x989c, { 0x00c70000, 0x00c70000, 0x00c70000 } },
+ { 6, 0x989c, { 0x004b0000, 0x004b0000, 0x004b0000 } },
+ { 6, 0x989c, { 0x04480000, 0x04480000, 0x04480000 } },
+ { 6, 0x989c, { 0x004c0000, 0x004c0000, 0x004c0000 } },
+ { 6, 0x989c, { 0x00e40000, 0x00e40000, 0x00e40000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x043f0000, 0x043f0000, 0x043f0000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x02190000, 0x02190000, 0x02190000 } },
+ { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
+ { 6, 0x989c, { 0x00b40000, 0x00b40000, 0x00b40000 } },
+ { 6, 0x989c, { 0x00990000, 0x00990000, 0x00990000 } },
+ { 6, 0x989c, { 0x00500000, 0x00500000, 0x00500000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0xc0320000, 0xc0320000, 0xc0320000 } },
+ { 6, 0x989c, { 0x01740000, 0x01740000, 0x01740000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x86280000, 0x86280000, 0x86280000 } },
+ { 6, 0x989c, { 0x31840000, 0x31840000, 0x31840000 } },
+ { 6, 0x989c, { 0x00f20080, 0x00f20080, 0x00f20080 } },
+ { 6, 0x989c, { 0x00270019, 0x00270019, 0x00270019 } },
+ { 6, 0x989c, { 0x00000003, 0x00000003, 0x00000003 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000000b2, 0x000000b2, 0x000000b2 } },
+ { 6, 0x989c, { 0x00b02084, 0x00b02084, 0x00b02084 } },
+ { 6, 0x989c, { 0x004125a4, 0x004125a4, 0x004125a4 } },
+ { 6, 0x989c, { 0x00119220, 0x00119220, 0x00119220 } },
+ { 6, 0x989c, { 0x001a4800, 0x001a4800, 0x001a4800 } },
+ { 6, 0x98d8, { 0x000b0230, 0x000b0230, 0x000b0230 } },
+ { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
+ { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
+ { 7, 0x989c, { 0x00000012, 0x00000012, 0x00000012 } },
+ { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
+ { 7, 0x989c, { 0x000000d9, 0x000000d9, 0x000000d9 } },
+ { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
+ { 7, 0x989c, { 0x000000a2, 0x000000a2, 0x000000a2 } },
+ { 7, 0x989c, { 0x00000052, 0x00000052, 0x00000052 } },
+ { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
+ { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
+ { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
+ { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
+};
+
+
+
+/******************\
+* RF2413 (Griffin) *
+\******************/
+
+/* BANK 2 len pos col */
+#define AR5K_RF2413_RF_TURBO { 1, 1, 2 }
+
+/* BANK 6 len pos col */
+#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
+#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
+
+static const struct ath5k_rf_reg rf_regs_2413[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2413_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ},
+};
+
+/* Default mode specific settings
+ * XXX: a/aTurbo ???
+ */
+static const struct ath5k_ini_rfbuffer rfb_2413[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0xf0000000, 0xf0000000, 0xf0000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x03000000, 0x03000000, 0x03000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x40400000, 0x40400000, 0x40400000 } },
+ { 6, 0x989c, { 0x65050000, 0x65050000, 0x65050000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00420000, 0x00420000, 0x00420000 } },
+ { 6, 0x989c, { 0x00b50000, 0x00b50000, 0x00b50000 } },
+ { 6, 0x989c, { 0x00030000, 0x00030000, 0x00030000 } },
+ { 6, 0x989c, { 0x00f70000, 0x00f70000, 0x00f70000 } },
+ { 6, 0x989c, { 0x009d0000, 0x009d0000, 0x009d0000 } },
+ { 6, 0x989c, { 0x00220000, 0x00220000, 0x00220000 } },
+ { 6, 0x989c, { 0x04220000, 0x04220000, 0x04220000 } },
+ { 6, 0x989c, { 0x00230018, 0x00230018, 0x00230018 } },
+ { 6, 0x989c, { 0x00280000, 0x00280060, 0x00280060 } },
+ { 6, 0x989c, { 0x005000c0, 0x005000c3, 0x005000c3 } },
+ { 6, 0x989c, { 0x0004007f, 0x0004007f, 0x0004007f } },
+ { 6, 0x989c, { 0x00000458, 0x00000458, 0x00000458 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x0000c000, 0x0000c000, 0x0000c000 } },
+ { 6, 0x98d8, { 0x00400230, 0x00400230, 0x00400230 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
+
+
+
+/***************************\
+* RF2315/RF2316 (Cobra SoC) *
+\***************************/
+
+/* BANK 2 len pos col */
+#define AR5K_RF2316_RF_TURBO { 1, 1, 2 }
+
+/* BANK 6 len pos col */
+#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 }
+#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 }
+
+static const struct ath5k_rf_reg rf_regs_2316[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2316_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ},
+};
+
+/* Default mode specific settings */
+static const struct ath5k_ini_rfbuffer rfb_2316[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0xc0000000, 0xc0000000, 0xc0000000 } },
+ { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
+ { 6, 0x989c, { 0x02000000, 0x02000000, 0x02000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0xf8000000, 0xf8000000, 0xf8000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x95150000, 0x95150000, 0x95150000 } },
+ { 6, 0x989c, { 0xc1000000, 0xc1000000, 0xc1000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00080000, 0x00080000, 0x00080000 } },
+ { 6, 0x989c, { 0x00d50000, 0x00d50000, 0x00d50000 } },
+ { 6, 0x989c, { 0x000e0000, 0x000e0000, 0x000e0000 } },
+ { 6, 0x989c, { 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
+ { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
+ { 6, 0x989c, { 0x008a0000, 0x008a0000, 0x008a0000 } },
+ { 6, 0x989c, { 0x10880000, 0x10880000, 0x10880000 } },
+ { 6, 0x989c, { 0x008c0060, 0x008c0060, 0x008c0060 } },
+ { 6, 0x989c, { 0x00a00000, 0x00a00080, 0x00a00080 } },
+ { 6, 0x989c, { 0x00400000, 0x0040000d, 0x0040000d } },
+ { 6, 0x989c, { 0x00110400, 0x00110400, 0x00110400 } },
+ { 6, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 6, 0x989c, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 6, 0x989c, { 0x00000b00, 0x00000b00, 0x00000b00 } },
+ { 6, 0x989c, { 0x00000be8, 0x00000be8, 0x00000be8 } },
+ { 6, 0x98c0, { 0x00010000, 0x00010000, 0x00010000 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
+
+
+
+/******************************\
+* RF5413/RF5424 (Eagle/Condor) *
+\******************************/
+
+/* BANK 6 len pos col */
+#define AR5K_RF5413_OB_2GHZ { 3, 241, 0 }
+#define AR5K_RF5413_DB_2GHZ { 3, 238, 0 }
+
+#define AR5K_RF5413_OB_5GHZ { 3, 247, 0 }
+#define AR5K_RF5413_DB_5GHZ { 3, 244, 0 }
+
+#define AR5K_RF5413_PWD_ICLOBUF2G { 3, 131, 3 }
+#define AR5K_RF5413_DERBY_CHAN_SEL_MODE { 1, 291, 2 }
+
+static const struct ath5k_rf_reg rf_regs_5413[] = {
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF5413_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF5413_DB_2GHZ},
+ {6, AR5K_RF_OB_5GHZ, AR5K_RF5413_OB_5GHZ},
+ {6, AR5K_RF_DB_5GHZ, AR5K_RF5413_DB_5GHZ},
+ {6, AR5K_RF_PWD_ICLOBUF_2G, AR5K_RF5413_PWD_ICLOBUF2G},
+ {6, AR5K_RF_DERBY_CHAN_SEL_MODE, AR5K_RF5413_DERBY_CHAN_SEL_MODE},
+};
+
+/* Default mode specific settings */
+static const struct ath5k_ini_rfbuffer rfb_5413[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x00000008, 0x00000008, 0x00000008 } },
+ { 3, 0x98dc, { 0x00a000c0, 0x00e000c0, 0x00e000c0 } },
+ { 6, 0x989c, { 0x33000000, 0x33000000, 0x33000000 } },
+ { 6, 0x989c, { 0x01000000, 0x01000000, 0x01000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x1f000000, 0x1f000000, 0x1f000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00b80000, 0x00b80000, 0x00b80000 } },
+ { 6, 0x989c, { 0x00b70000, 0x00b70000, 0x00b70000 } },
+ { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
+ { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
+ { 6, 0x989c, { 0x00c00000, 0x00c00000, 0x00c00000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00d70000, 0x00d70000, 0x00d70000 } },
+ { 6, 0x989c, { 0x00610000, 0x00610000, 0x00610000 } },
+ { 6, 0x989c, { 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
+ { 6, 0x989c, { 0x00de0000, 0x00de0000, 0x00de0000 } },
+ { 6, 0x989c, { 0x007f0000, 0x007f0000, 0x007f0000 } },
+ { 6, 0x989c, { 0x043d0000, 0x043d0000, 0x043d0000 } },
+ { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
+ { 6, 0x989c, { 0x00440000, 0x00440000, 0x00440000 } },
+ { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
+ { 6, 0x989c, { 0x00100080, 0x00100080, 0x00100080 } },
+ { 6, 0x989c, { 0x0005c034, 0x0005c034, 0x0005c034 } },
+ { 6, 0x989c, { 0x003100f0, 0x003100f0, 0x003100f0 } },
+ { 6, 0x989c, { 0x000c011f, 0x000c011f, 0x000c011f } },
+ { 6, 0x989c, { 0x00510040, 0x00510040, 0x00510040 } },
+ { 6, 0x989c, { 0x005000da, 0x005000da, 0x005000da } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00004044, 0x00004044, 0x00004044 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000060c0, 0x000060c0, 0x000060c0 } },
+ { 6, 0x989c, { 0x00002c00, 0x00003600, 0x00003600 } },
+ { 6, 0x98c8, { 0x00000403, 0x00040403, 0x00040403 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
+
+
+
+/***************************\
+* RF2425/RF2417 (Swan/Nala) *
+* AR2317 (Spider SoC) *
+\***************************/
+
+/* BANK 2 len pos col */
+#define AR5K_RF2425_RF_TURBO { 1, 1, 2 }
+
+/* BANK 6 len pos col */
+#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 }
+#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 }
+
+static const struct ath5k_rf_reg rf_regs_2425[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2425_RF_TURBO},
+ {6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ},
+ {6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ},
+};
+
+/* Default mode specific settings
+ */
+static const struct ath5k_ini_rfbuffer rfb_2425[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
+ { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
+
+/*
+ * TODO: Handle the few differences with swan during
+ * bank modification and get rid of this
+ */
+static const struct ath5k_ini_rfbuffer rfb_2317[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
+ { 6, 0x989c, { 0x00140100, 0x00140100, 0x00140100 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00009688, 0x00009688, 0x00009688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
+
+/*
+ * TODO: Handle the few differences with swan during
+ * bank modification and get rid of this
+ */
+static const struct ath5k_ini_rfbuffer rfb_2417[] = {
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x80e70000, 0x80e70000 } },
+ { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0207001a, 0x0207001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
+};
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
new file mode 100644
index 000000000..4d21df0e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -0,0 +1,534 @@
+/*
+ * RF Gain optimization
+ *
+ * Copyright (c) 2004-2009 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
+ * Mode-specific RF Gain table (64bytes) for RF5111/5112
+ * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
+ * RF Gain values are included in AR5K_AR5210_INI)
+ */
+struct ath5k_ini_rfgain {
+ u16 rfg_register;
+ u32 rfg_value[2]; /* [freq (see below)] */
+};
+
+/* Initial RF Gain settings for RF5111 */
+static const struct ath5k_ini_rfgain rfgain_5111[] = {
+ /* 5GHz 2GHz */
+ { AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x00000069, 0x00000150 } },
+ { AR5K_RF_GAIN(4), { 0x00000199, 0x00000190 } },
+ { AR5K_RF_GAIN(5), { 0x000001d9, 0x000001d0 } },
+ { AR5K_RF_GAIN(6), { 0x00000019, 0x00000010 } },
+ { AR5K_RF_GAIN(7), { 0x00000059, 0x00000044 } },
+ { AR5K_RF_GAIN(8), { 0x00000099, 0x00000084 } },
+ { AR5K_RF_GAIN(9), { 0x000001a5, 0x00000148 } },
+ { AR5K_RF_GAIN(10), { 0x000001e5, 0x00000188 } },
+ { AR5K_RF_GAIN(11), { 0x00000025, 0x000001c8 } },
+ { AR5K_RF_GAIN(12), { 0x000001c8, 0x00000014 } },
+ { AR5K_RF_GAIN(13), { 0x00000008, 0x00000042 } },
+ { AR5K_RF_GAIN(14), { 0x00000048, 0x00000082 } },
+ { AR5K_RF_GAIN(15), { 0x00000088, 0x00000178 } },
+ { AR5K_RF_GAIN(16), { 0x00000198, 0x000001b8 } },
+ { AR5K_RF_GAIN(17), { 0x000001d8, 0x000001f8 } },
+ { AR5K_RF_GAIN(18), { 0x00000018, 0x00000012 } },
+ { AR5K_RF_GAIN(19), { 0x00000058, 0x00000052 } },
+ { AR5K_RF_GAIN(20), { 0x00000098, 0x00000092 } },
+ { AR5K_RF_GAIN(21), { 0x000001a4, 0x0000017c } },
+ { AR5K_RF_GAIN(22), { 0x000001e4, 0x000001bc } },
+ { AR5K_RF_GAIN(23), { 0x00000024, 0x000001fc } },
+ { AR5K_RF_GAIN(24), { 0x00000064, 0x0000000a } },
+ { AR5K_RF_GAIN(25), { 0x000000a4, 0x0000004a } },
+ { AR5K_RF_GAIN(26), { 0x000000e4, 0x0000008a } },
+ { AR5K_RF_GAIN(27), { 0x0000010a, 0x0000015a } },
+ { AR5K_RF_GAIN(28), { 0x0000014a, 0x0000019a } },
+ { AR5K_RF_GAIN(29), { 0x0000018a, 0x000001da } },
+ { AR5K_RF_GAIN(30), { 0x000001ca, 0x0000000e } },
+ { AR5K_RF_GAIN(31), { 0x0000000a, 0x0000004e } },
+ { AR5K_RF_GAIN(32), { 0x0000004a, 0x0000008e } },
+ { AR5K_RF_GAIN(33), { 0x0000008a, 0x0000015e } },
+ { AR5K_RF_GAIN(34), { 0x000001ba, 0x0000019e } },
+ { AR5K_RF_GAIN(35), { 0x000001fa, 0x000001de } },
+ { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000009 } },
+ { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000049 } },
+ { AR5K_RF_GAIN(38), { 0x00000186, 0x00000089 } },
+ { AR5K_RF_GAIN(39), { 0x000001c6, 0x00000179 } },
+ { AR5K_RF_GAIN(40), { 0x00000006, 0x000001b9 } },
+ { AR5K_RF_GAIN(41), { 0x00000046, 0x000001f9 } },
+ { AR5K_RF_GAIN(42), { 0x00000086, 0x00000039 } },
+ { AR5K_RF_GAIN(43), { 0x000000c6, 0x00000079 } },
+ { AR5K_RF_GAIN(44), { 0x000000c6, 0x000000b9 } },
+ { AR5K_RF_GAIN(45), { 0x000000c6, 0x000001bd } },
+ { AR5K_RF_GAIN(46), { 0x000000c6, 0x000001fd } },
+ { AR5K_RF_GAIN(47), { 0x000000c6, 0x0000003d } },
+ { AR5K_RF_GAIN(48), { 0x000000c6, 0x0000007d } },
+ { AR5K_RF_GAIN(49), { 0x000000c6, 0x000000bd } },
+ { AR5K_RF_GAIN(50), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(51), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(52), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(53), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(54), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(55), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(56), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(57), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(58), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(59), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(60), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(61), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(62), { 0x000000c6, 0x000000fd } },
+ { AR5K_RF_GAIN(63), { 0x000000c6, 0x000000fd } },
+};
+
+/* Initial RF Gain settings for RF5112 */
+static const struct ath5k_ini_rfgain rfgain_5112[] = {
+ /* 5GHz 2GHz */
+ { AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } },
+ { AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } },
+ { AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } },
+ { AR5K_RF_GAIN(3), { 0x000001a0, 0x000001a0 } },
+ { AR5K_RF_GAIN(4), { 0x000001e0, 0x000001e0 } },
+ { AR5K_RF_GAIN(5), { 0x00000020, 0x00000020 } },
+ { AR5K_RF_GAIN(6), { 0x00000060, 0x00000060 } },
+ { AR5K_RF_GAIN(7), { 0x000001a1, 0x000001a1 } },
+ { AR5K_RF_GAIN(8), { 0x000001e1, 0x000001e1 } },
+ { AR5K_RF_GAIN(9), { 0x00000021, 0x00000021 } },
+ { AR5K_RF_GAIN(10), { 0x00000061, 0x00000061 } },
+ { AR5K_RF_GAIN(11), { 0x00000162, 0x00000162 } },
+ { AR5K_RF_GAIN(12), { 0x000001a2, 0x000001a2 } },
+ { AR5K_RF_GAIN(13), { 0x000001e2, 0x000001e2 } },
+ { AR5K_RF_GAIN(14), { 0x00000022, 0x00000022 } },
+ { AR5K_RF_GAIN(15), { 0x00000062, 0x00000062 } },
+ { AR5K_RF_GAIN(16), { 0x00000163, 0x00000163 } },
+ { AR5K_RF_GAIN(17), { 0x000001a3, 0x000001a3 } },
+ { AR5K_RF_GAIN(18), { 0x000001e3, 0x000001e3 } },
+ { AR5K_RF_GAIN(19), { 0x00000023, 0x00000023 } },
+ { AR5K_RF_GAIN(20), { 0x00000063, 0x00000063 } },
+ { AR5K_RF_GAIN(21), { 0x00000184, 0x00000184 } },
+ { AR5K_RF_GAIN(22), { 0x000001c4, 0x000001c4 } },
+ { AR5K_RF_GAIN(23), { 0x00000004, 0x00000004 } },
+ { AR5K_RF_GAIN(24), { 0x000001ea, 0x0000000b } },
+ { AR5K_RF_GAIN(25), { 0x0000002a, 0x0000004b } },
+ { AR5K_RF_GAIN(26), { 0x0000006a, 0x0000008b } },
+ { AR5K_RF_GAIN(27), { 0x000000aa, 0x000001ac } },
+ { AR5K_RF_GAIN(28), { 0x000001ab, 0x000001ec } },
+ { AR5K_RF_GAIN(29), { 0x000001eb, 0x0000002c } },
+ { AR5K_RF_GAIN(30), { 0x0000002b, 0x00000012 } },
+ { AR5K_RF_GAIN(31), { 0x0000006b, 0x00000052 } },
+ { AR5K_RF_GAIN(32), { 0x000000ab, 0x00000092 } },
+ { AR5K_RF_GAIN(33), { 0x000001ac, 0x00000193 } },
+ { AR5K_RF_GAIN(34), { 0x000001ec, 0x000001d3 } },
+ { AR5K_RF_GAIN(35), { 0x0000002c, 0x00000013 } },
+ { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000053 } },
+ { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000093 } },
+ { AR5K_RF_GAIN(38), { 0x000000ba, 0x00000194 } },
+ { AR5K_RF_GAIN(39), { 0x000001bb, 0x000001d4 } },
+ { AR5K_RF_GAIN(40), { 0x000001fb, 0x00000014 } },
+ { AR5K_RF_GAIN(41), { 0x0000003b, 0x0000003a } },
+ { AR5K_RF_GAIN(42), { 0x0000007b, 0x0000007a } },
+ { AR5K_RF_GAIN(43), { 0x000000bb, 0x000000ba } },
+ { AR5K_RF_GAIN(44), { 0x000001bc, 0x000001bb } },
+ { AR5K_RF_GAIN(45), { 0x000001fc, 0x000001fb } },
+ { AR5K_RF_GAIN(46), { 0x0000003c, 0x0000003b } },
+ { AR5K_RF_GAIN(47), { 0x0000007c, 0x0000007b } },
+ { AR5K_RF_GAIN(48), { 0x000000bc, 0x000000bb } },
+ { AR5K_RF_GAIN(49), { 0x000000fc, 0x000001bc } },
+ { AR5K_RF_GAIN(50), { 0x000000fc, 0x000001fc } },
+ { AR5K_RF_GAIN(51), { 0x000000fc, 0x0000003c } },
+ { AR5K_RF_GAIN(52), { 0x000000fc, 0x0000007c } },
+ { AR5K_RF_GAIN(53), { 0x000000fc, 0x000000bc } },
+ { AR5K_RF_GAIN(54), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(55), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(56), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(57), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(58), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(59), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(60), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(61), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(62), { 0x000000fc, 0x000000fc } },
+ { AR5K_RF_GAIN(63), { 0x000000fc, 0x000000fc } },
+};
+
+/* Initial RF Gain settings for RF2413 */
+static const struct ath5k_ini_rfgain rfgain_2413[] = {
+ { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
+ { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
+ { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
+ { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
+ { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
+ { AR5K_RF_GAIN(8), { 0x00000000, 0x00000168 } },
+ { AR5K_RF_GAIN(9), { 0x00000000, 0x000001a8 } },
+ { AR5K_RF_GAIN(10), { 0x00000000, 0x000001e8 } },
+ { AR5K_RF_GAIN(11), { 0x00000000, 0x00000028 } },
+ { AR5K_RF_GAIN(12), { 0x00000000, 0x00000068 } },
+ { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
+ { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
+ { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
+ { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
+ { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
+ { AR5K_RF_GAIN(18), { 0x00000000, 0x00000190 } },
+ { AR5K_RF_GAIN(19), { 0x00000000, 0x000001d0 } },
+ { AR5K_RF_GAIN(20), { 0x00000000, 0x00000010 } },
+ { AR5K_RF_GAIN(21), { 0x00000000, 0x00000050 } },
+ { AR5K_RF_GAIN(22), { 0x00000000, 0x00000090 } },
+ { AR5K_RF_GAIN(23), { 0x00000000, 0x00000191 } },
+ { AR5K_RF_GAIN(24), { 0x00000000, 0x000001d1 } },
+ { AR5K_RF_GAIN(25), { 0x00000000, 0x00000011 } },
+ { AR5K_RF_GAIN(26), { 0x00000000, 0x00000051 } },
+ { AR5K_RF_GAIN(27), { 0x00000000, 0x00000091 } },
+ { AR5K_RF_GAIN(28), { 0x00000000, 0x00000178 } },
+ { AR5K_RF_GAIN(29), { 0x00000000, 0x000001b8 } },
+ { AR5K_RF_GAIN(30), { 0x00000000, 0x000001f8 } },
+ { AR5K_RF_GAIN(31), { 0x00000000, 0x00000038 } },
+ { AR5K_RF_GAIN(32), { 0x00000000, 0x00000078 } },
+ { AR5K_RF_GAIN(33), { 0x00000000, 0x00000199 } },
+ { AR5K_RF_GAIN(34), { 0x00000000, 0x000001d9 } },
+ { AR5K_RF_GAIN(35), { 0x00000000, 0x00000019 } },
+ { AR5K_RF_GAIN(36), { 0x00000000, 0x00000059 } },
+ { AR5K_RF_GAIN(37), { 0x00000000, 0x00000099 } },
+ { AR5K_RF_GAIN(38), { 0x00000000, 0x000000d9 } },
+ { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
+};
+
+/* Initial RF Gain settings for AR2316 */
+static const struct ath5k_ini_rfgain rfgain_2316[] = {
+ { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x00000000, 0x000000c0 } },
+ { AR5K_RF_GAIN(4), { 0x00000000, 0x000000e0 } },
+ { AR5K_RF_GAIN(5), { 0x00000000, 0x000000e0 } },
+ { AR5K_RF_GAIN(6), { 0x00000000, 0x00000128 } },
+ { AR5K_RF_GAIN(7), { 0x00000000, 0x00000128 } },
+ { AR5K_RF_GAIN(8), { 0x00000000, 0x00000128 } },
+ { AR5K_RF_GAIN(9), { 0x00000000, 0x00000168 } },
+ { AR5K_RF_GAIN(10), { 0x00000000, 0x000001a8 } },
+ { AR5K_RF_GAIN(11), { 0x00000000, 0x000001e8 } },
+ { AR5K_RF_GAIN(12), { 0x00000000, 0x00000028 } },
+ { AR5K_RF_GAIN(13), { 0x00000000, 0x00000068 } },
+ { AR5K_RF_GAIN(14), { 0x00000000, 0x000000a8 } },
+ { AR5K_RF_GAIN(15), { 0x00000000, 0x000000e8 } },
+ { AR5K_RF_GAIN(16), { 0x00000000, 0x000000e8 } },
+ { AR5K_RF_GAIN(17), { 0x00000000, 0x00000130 } },
+ { AR5K_RF_GAIN(18), { 0x00000000, 0x00000130 } },
+ { AR5K_RF_GAIN(19), { 0x00000000, 0x00000170 } },
+ { AR5K_RF_GAIN(20), { 0x00000000, 0x000001b0 } },
+ { AR5K_RF_GAIN(21), { 0x00000000, 0x000001f0 } },
+ { AR5K_RF_GAIN(22), { 0x00000000, 0x00000030 } },
+ { AR5K_RF_GAIN(23), { 0x00000000, 0x00000070 } },
+ { AR5K_RF_GAIN(24), { 0x00000000, 0x000000b0 } },
+ { AR5K_RF_GAIN(25), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(26), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(27), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(28), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(29), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(30), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(31), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(32), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(33), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(34), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(35), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(36), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f0 } },
+ { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f0 } },
+};
+
+
+/* Initial RF Gain settings for RF5413 */
+static const struct ath5k_ini_rfgain rfgain_5413[] = {
+ /* 5GHz 2GHz */
+ { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x000001a1, 0x00000161 } },
+ { AR5K_RF_GAIN(4), { 0x000001e1, 0x000001a1 } },
+ { AR5K_RF_GAIN(5), { 0x00000021, 0x000001e1 } },
+ { AR5K_RF_GAIN(6), { 0x00000061, 0x00000021 } },
+ { AR5K_RF_GAIN(7), { 0x00000188, 0x00000061 } },
+ { AR5K_RF_GAIN(8), { 0x000001c8, 0x00000188 } },
+ { AR5K_RF_GAIN(9), { 0x00000008, 0x000001c8 } },
+ { AR5K_RF_GAIN(10), { 0x00000048, 0x00000008 } },
+ { AR5K_RF_GAIN(11), { 0x00000088, 0x00000048 } },
+ { AR5K_RF_GAIN(12), { 0x000001a9, 0x00000088 } },
+ { AR5K_RF_GAIN(13), { 0x000001e9, 0x00000169 } },
+ { AR5K_RF_GAIN(14), { 0x00000029, 0x000001a9 } },
+ { AR5K_RF_GAIN(15), { 0x00000069, 0x000001e9 } },
+ { AR5K_RF_GAIN(16), { 0x000001d0, 0x00000029 } },
+ { AR5K_RF_GAIN(17), { 0x00000010, 0x00000069 } },
+ { AR5K_RF_GAIN(18), { 0x00000050, 0x00000190 } },
+ { AR5K_RF_GAIN(19), { 0x00000090, 0x000001d0 } },
+ { AR5K_RF_GAIN(20), { 0x000001b1, 0x00000010 } },
+ { AR5K_RF_GAIN(21), { 0x000001f1, 0x00000050 } },
+ { AR5K_RF_GAIN(22), { 0x00000031, 0x00000090 } },
+ { AR5K_RF_GAIN(23), { 0x00000071, 0x00000171 } },
+ { AR5K_RF_GAIN(24), { 0x000001b8, 0x000001b1 } },
+ { AR5K_RF_GAIN(25), { 0x000001f8, 0x000001f1 } },
+ { AR5K_RF_GAIN(26), { 0x00000038, 0x00000031 } },
+ { AR5K_RF_GAIN(27), { 0x00000078, 0x00000071 } },
+ { AR5K_RF_GAIN(28), { 0x00000199, 0x00000198 } },
+ { AR5K_RF_GAIN(29), { 0x000001d9, 0x000001d8 } },
+ { AR5K_RF_GAIN(30), { 0x00000019, 0x00000018 } },
+ { AR5K_RF_GAIN(31), { 0x00000059, 0x00000058 } },
+ { AR5K_RF_GAIN(32), { 0x00000099, 0x00000098 } },
+ { AR5K_RF_GAIN(33), { 0x000000d9, 0x00000179 } },
+ { AR5K_RF_GAIN(34), { 0x000000f9, 0x000001b9 } },
+ { AR5K_RF_GAIN(35), { 0x000000f9, 0x000001f9 } },
+ { AR5K_RF_GAIN(36), { 0x000000f9, 0x00000039 } },
+ { AR5K_RF_GAIN(37), { 0x000000f9, 0x00000079 } },
+ { AR5K_RF_GAIN(38), { 0x000000f9, 0x000000b9 } },
+ { AR5K_RF_GAIN(39), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(40), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(41), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(42), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(43), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(44), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(45), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(46), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(47), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(48), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(49), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(50), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(51), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(52), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(53), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(54), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(55), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(56), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(57), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(58), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(59), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(60), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(61), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(62), { 0x000000f9, 0x000000f9 } },
+ { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } },
+};
+
+
+/* Initial RF Gain settings for RF2425 */
+static const struct ath5k_ini_rfgain rfgain_2425[] = {
+ { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
+ { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
+ { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
+ { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
+ { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
+ { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
+ { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
+ { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
+ { AR5K_RF_GAIN(8), { 0x00000000, 0x00000188 } },
+ { AR5K_RF_GAIN(9), { 0x00000000, 0x000001c8 } },
+ { AR5K_RF_GAIN(10), { 0x00000000, 0x00000008 } },
+ { AR5K_RF_GAIN(11), { 0x00000000, 0x00000048 } },
+ { AR5K_RF_GAIN(12), { 0x00000000, 0x00000088 } },
+ { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
+ { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
+ { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
+ { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
+ { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
+ { AR5K_RF_GAIN(18), { 0x00000000, 0x000001b0 } },
+ { AR5K_RF_GAIN(19), { 0x00000000, 0x000001f0 } },
+ { AR5K_RF_GAIN(20), { 0x00000000, 0x00000030 } },
+ { AR5K_RF_GAIN(21), { 0x00000000, 0x00000070 } },
+ { AR5K_RF_GAIN(22), { 0x00000000, 0x00000171 } },
+ { AR5K_RF_GAIN(23), { 0x00000000, 0x000001b1 } },
+ { AR5K_RF_GAIN(24), { 0x00000000, 0x000001f1 } },
+ { AR5K_RF_GAIN(25), { 0x00000000, 0x00000031 } },
+ { AR5K_RF_GAIN(26), { 0x00000000, 0x00000071 } },
+ { AR5K_RF_GAIN(27), { 0x00000000, 0x000001b8 } },
+ { AR5K_RF_GAIN(28), { 0x00000000, 0x000001f8 } },
+ { AR5K_RF_GAIN(29), { 0x00000000, 0x00000038 } },
+ { AR5K_RF_GAIN(30), { 0x00000000, 0x00000078 } },
+ { AR5K_RF_GAIN(31), { 0x00000000, 0x000000b8 } },
+ { AR5K_RF_GAIN(32), { 0x00000000, 0x000001b9 } },
+ { AR5K_RF_GAIN(33), { 0x00000000, 0x000001f9 } },
+ { AR5K_RF_GAIN(34), { 0x00000000, 0x00000039 } },
+ { AR5K_RF_GAIN(35), { 0x00000000, 0x00000079 } },
+ { AR5K_RF_GAIN(36), { 0x00000000, 0x000000b9 } },
+ { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
+ { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
+};
+
+#define AR5K_GAIN_CRN_FIX_BITS_5111 4
+#define AR5K_GAIN_CRN_FIX_BITS_5112 7
+#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
+#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
+#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
+#define AR5K_GAIN_CCK_PROBE_CORR 5
+#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
+#define AR5K_GAIN_STEP_COUNT 10
+
+/* Check if our current measurement is inside our
+ * current variable attenuation window */
+#define AR5K_GAIN_CHECK_ADJUST(_g) \
+ ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
+struct ath5k_gain_opt_step {
+ s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
+ s8 gos_gain;
+};
+
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
+struct ath5k_gain_opt {
+ u8 go_default;
+ u8 go_steps_count;
+ const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
+};
+
+
+/*
+ * RF5111
+ * Parameters on gos_param:
+ * 1) Tx clip PHY register
+ * 2) PWD 90 RF register
+ * 3) PWD 84 RF register
+ * 4) RFGainSel RF register
+ */
+static const struct ath5k_gain_opt rfgain_opt_5111 = {
+ 4,
+ 9,
+ {
+ { { 4, 1, 1, 1 }, 6 },
+ { { 4, 0, 1, 1 }, 4 },
+ { { 3, 1, 1, 1 }, 3 },
+ { { 4, 0, 0, 1 }, 1 },
+ { { 4, 1, 1, 0 }, 0 },
+ { { 4, 0, 1, 0 }, -2 },
+ { { 3, 1, 1, 0 }, -3 },
+ { { 4, 0, 0, 0 }, -4 },
+ { { 2, 1, 1, 0 }, -6 }
+ }
+};
+
+/*
+ * RF5112
+ * Parameters on gos_param:
+ * 1) Mixgain ovr RF register
+ * 2) PWD 138 RF register
+ * 3) PWD 137 RF register
+ * 4) PWD 136 RF register
+ * 5) PWD 132 RF register
+ * 6) PWD 131 RF register
+ * 7) PWD 130 RF register
+ */
+static const struct ath5k_gain_opt rfgain_opt_5112 = {
+ 1,
+ 8,
+ {
+ { { 3, 0, 0, 0, 0, 0, 0 }, 6 },
+ { { 2, 0, 0, 0, 0, 0, 0 }, 0 },
+ { { 1, 0, 0, 0, 0, 0, 0 }, -3 },
+ { { 0, 0, 0, 0, 0, 0, 0 }, -6 },
+ { { 0, 1, 1, 0, 0, 0, 0 }, -8 },
+ { { 0, 1, 1, 0, 1, 1, 0 }, -10 },
+ { { 0, 1, 0, 1, 1, 1, 0 }, -13 },
+ { { 0, 1, 0, 1, 1, 0, 1 }, -16 },
+ }
+};
+
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
new file mode 100644
index 000000000..270a319f3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -0,0 +1,116 @@
+/*
+ * RFKILL support for ath5k
+ *
+ * Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include "ath5k.h"
+
+
+static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
+{
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
+ ah->rf_kill.gpio, ah->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
+}
+
+
+static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
+{
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
+ ah->rf_kill.gpio, ah->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
+}
+
+static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
+{
+ u32 curval;
+
+ ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
+ curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
+ !!curval : !curval);
+}
+
+static bool
+ath5k_is_rfkill_set(struct ath5k_hw *ah)
+{
+ /* configuring GPIO for input for some reason disables rfkill */
+ /*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
+ return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
+ ah->rf_kill.polarity;
+}
+
+static void
+ath5k_tasklet_rfkill_toggle(unsigned long data)
+{
+ struct ath5k_hw *ah = (void *)data;
+ bool blocked;
+
+ blocked = ath5k_is_rfkill_set(ah);
+ wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
+}
+
+
+void
+ath5k_rfkill_hw_start(struct ath5k_hw *ah)
+{
+ /* read rfkill GPIO configuration from EEPROM header */
+ ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
+ ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
+
+ tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
+ (unsigned long)ah);
+
+ ath5k_rfkill_disable(ah);
+
+ /* enable interrupt for rfkill switch */
+ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
+ ath5k_rfkill_set_intr(ah, true);
+}
+
+
+void
+ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
+{
+ /* disable interrupt for rfkill switch */
+ if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
+ ath5k_rfkill_set_intr(ah, false);
+
+ tasklet_kill(&ah->rf_kill.toggleq);
+
+ /* enable RFKILL when stopping HW so Wifi LED is turned off */
+ ath5k_rfkill_enable(ah);
+}
+
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
new file mode 100644
index 000000000..04cf0ca72
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -0,0 +1,122 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "ath5k.h"
+#include "reg.h"
+
+#define SIMPLE_SHOW_STORE(name, get, set) \
+static ssize_t ath5k_attr_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+} \
+ \
+static ssize_t ath5k_attr_store_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ int val, ret; \
+ \
+ ret = kstrtoint(buf, 10, &val); \
+ if (ret < 0) \
+ return ret; \
+ set(ah, val); \
+ return count; \
+} \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
+ ath5k_attr_show_##name, ath5k_attr_store_##name)
+
+#define SIMPLE_SHOW(name, get) \
+static ssize_t ath5k_attr_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
+
+/*** ANI ***/
+
+SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
+SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
+ ath5k_ani_set_noise_immunity_level);
+SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
+ ath5k_ani_set_spur_immunity_level);
+SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
+ ath5k_ani_set_firstep_level);
+SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
+ ath5k_ani_set_ofdm_weak_signal_detection);
+SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
+ ath5k_ani_set_cck_weak_signal_detection);
+SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
+
+static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
+}
+static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO,
+ ath5k_attr_show_noise_immunity_level_max, NULL);
+
+static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
+}
+static DEVICE_ATTR(firstep_level_max, S_IRUGO,
+ ath5k_attr_show_firstep_level_max, NULL);
+
+static struct attribute *ath5k_sysfs_entries_ani[] = {
+ &dev_attr_ani_mode.attr,
+ &dev_attr_noise_immunity_level.attr,
+ &dev_attr_spur_level.attr,
+ &dev_attr_firstep_level.attr,
+ &dev_attr_ofdm_weak_signal_detection.attr,
+ &dev_attr_cck_weak_signal_detection.attr,
+ &dev_attr_noise_immunity_level_max.attr,
+ &dev_attr_spur_level_max.attr,
+ &dev_attr_firstep_level_max.attr,
+ NULL
+};
+
+static struct attribute_group ath5k_attribute_group_ani = {
+ .name = "ani",
+ .attrs = ath5k_sysfs_entries_ani,
+};
+
+
+/*** register / unregister ***/
+
+int
+ath5k_sysfs_register(struct ath5k_hw *ah)
+{
+ struct device *dev = ah->dev;
+ int err;
+
+ err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
+ if (err) {
+ ATH5K_ERR(ah, "failed to create sysfs group\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void
+ath5k_sysfs_unregister(struct ath5k_hw *ah)
+{
+ struct device *dev = ah->dev;
+
+ sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
+}
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 000000000..c6eef519b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,106 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+
+
+#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+struct ath5k_txq;
+struct ath5k_tx_status;
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
+ TP_ARGS(priv, skb),
+ TP_STRUCT__entry(
+ __field(struct ath5k_hw *, priv)
+ __field(unsigned long, skbaddr)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+ TP_fast_assign(
+ __entry->priv = priv;
+ __entry->skbaddr = (unsigned long) skb;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+ TP_printk(
+ "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+ )
+);
+
+TRACE_EVENT(ath5k_tx,
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
+ struct ath5k_txq *q),
+
+ TP_ARGS(priv, skb, q),
+
+ TP_STRUCT__entry(
+ __field(struct ath5k_hw *, priv)
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+
+ TP_fast_assign(
+ __entry->priv = priv;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+
+ TP_printk(
+ "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+ __entry->qnum
+ )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
+ struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+ TP_ARGS(priv, skb, q, ts),
+
+ TP_STRUCT__entry(
+ __field(struct ath5k_hw *, priv)
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __field(u8, ts_status)
+ __field(s8, ts_rssi)
+ __field(u8, ts_antenna)
+ ),
+
+ TP_fast_assign(
+ __entry->priv = priv;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ __entry->ts_status = ts->ts_status;
+ __entry->ts_rssi = ts->ts_rssi;
+ __entry->ts_antenna = ts->ts_antenna;
+ ),
+
+ TP_printk(
+ "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+ __entry->priv, __entry->skbaddr, __entry->qnum,
+ __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+ )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
new file mode 100644
index 000000000..9c125ff08
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -0,0 +1,65 @@
+config ATH6KL
+ tristate "Atheros mobile chipsets support"
+ depends on CFG80211
+ ---help---
+ This module adds core support for wireless adapters based on
+ Atheros AR6003 and AR6004 chipsets. You still need separate
+ bus drivers for USB and SDIO to be able to use real devices.
+
+ If you choose to build it as a module, it will be called
+ ath6kl_core. Please note that AR6002 and AR6001 are not
+ supported by this driver.
+
+config ATH6KL_SDIO
+ tristate "Atheros ath6kl SDIO support"
+ depends on ATH6KL
+ depends on MMC
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros AR6003 and AR6004 chipsets running over SDIO. If you
+ choose to build it as a module, it will be called ath6kl_sdio.
+ Please note that AR6002 and AR6001 are not supported by this
+ driver.
+
+config ATH6KL_USB
+ tristate "Atheros ath6kl USB support"
+ depends on ATH6KL
+ depends on USB
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros AR6004 chipset and chipsets based on it running over
+ USB. If you choose to build it as a module, it will be
+ called ath6kl_usb.
+
+config ATH6KL_DEBUG
+ bool "Atheros ath6kl debugging"
+ depends on ATH6KL
+ ---help---
+ Enables ath6kl debug support, including debug messages
+ enabled with debug_mask module parameter and debugfs
+ interface.
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH6KL_TRACING
+ bool "Atheros ath6kl tracing support"
+ depends on ATH6KL
+ depends on EVENT_TRACING
+ ---help---
+ Select this to ath6kl use tracing infrastructure which, for
+ example, can be enabled with help of trace-cmd. All debug
+ messages and commands are delivered to using individually
+ enablable trace points.
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH6KL_REGDOMAIN
+ bool "Atheros ath6kl regdomain support"
+ depends on ATH6KL
+ depends on CFG80211_CERTIFICATION_ONUS
+ ---help---
+ Enabling this makes it possible to change the regdomain in
+ the firmware. This can be only enabled if regulatory requirements
+ are taken into account.
+
+ If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
new file mode 100644
index 000000000..dc2b3b467
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -0,0 +1,49 @@
+#------------------------------------------------------------------------------
+# Copyright (c) 2004-2011 Atheros Communications Inc.
+# Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+# All rights reserved.
+#
+#
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+#
+#
+# Author(s): ="Atheros"
+#------------------------------------------------------------------------------
+
+obj-$(CONFIG_ATH6KL) += ath6kl_core.o
+ath6kl_core-y += debug.o
+ath6kl_core-y += hif.o
+ath6kl_core-y += htc_mbox.o
+ath6kl_core-y += htc_pipe.o
+ath6kl_core-y += bmi.o
+ath6kl_core-y += cfg80211.o
+ath6kl_core-y += init.o
+ath6kl_core-y += main.o
+ath6kl_core-y += txrx.o
+ath6kl_core-y += wmi.o
+ath6kl_core-y += core.o
+ath6kl_core-y += recovery.o
+
+ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
+
+obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
+ath6kl_sdio-y += sdio.o
+
+obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
+ath6kl_usb-y += usb.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
new file mode 100644
index 000000000..334dbd834
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+
+int ath6kl_bmi_done(struct ath6kl *ar)
+{
+ int ret;
+ u32 cid = BMI_DONE;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send bmi done: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info)
+{
+ int ret;
+ u32 cid = BMI_GET_TARGET_INFO;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send get target info: %d\n", ret);
+ return ret;
+ }
+
+ if (ar->hif_type == ATH6KL_HIF_TYPE_USB) {
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info,
+ sizeof(*targ_info));
+ } else {
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
+ sizeof(targ_info->version));
+ }
+
+ if (ret) {
+ ath6kl_err("Unable to recv target info: %d\n", ret);
+ return ret;
+ }
+
+ if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
+ /* Determine how many bytes are in the Target's targ_info */
+ ret = ath6kl_hif_bmi_read(ar,
+ (u8 *)&targ_info->byte_count,
+ sizeof(targ_info->byte_count));
+ if (ret) {
+ ath6kl_err("unable to read target info byte count: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * The target's targ_info doesn't match the host's targ_info.
+ * We need to do some backwards compatibility to make this work.
+ */
+ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Read the remainder of the targ_info */
+ ret = ath6kl_hif_bmi_read(ar,
+ ((u8 *)targ_info) +
+ sizeof(targ_info->byte_count),
+ sizeof(*targ_info) -
+ sizeof(targ_info->byte_count));
+
+ if (ret) {
+ ath6kl_err("Unable to read target info (%d bytes): %d\n",
+ targ_info->byte_count, ret);
+ return ret;
+ }
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
+ targ_info->version, targ_info->type);
+
+ return 0;
+}
+
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_READ_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, rx_len;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi read memory: device: addr: 0x%x, len: %d\n",
+ addr, len);
+
+ len_remain = len;
+
+ while (len_remain) {
+ rx_len = (len_remain < ar->bmi.max_data_size) ?
+ len_remain : ar->bmi.max_data_size;
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
+ offset += sizeof(len);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
+ len_remain -= rx_len; addr += rx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_WRITE_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
+ u8 aligned_buf[400];
+ u8 *src;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
+ return -E2BIG;
+
+ memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
+
+ len_remain = len;
+ while (len_remain) {
+ src = &buf[len - len_remain];
+
+ if (len_remain < (ar->bmi.max_data_size - header)) {
+ if (len_remain & 3) {
+ /* align it with 4 bytes */
+ len_remain = len_remain +
+ (4 - (len_remain & 3));
+ memcpy(aligned_buf, src, len_remain);
+ src = aligned_buf;
+ }
+ tx_len = len_remain;
+ } else {
+ tx_len = (ar->bmi.max_data_size - header);
+ }
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ len_remain -= tx_len; addr += tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_EXECUTE;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
+ addr, *param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
+ offset += sizeof(*param);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_SET_APP_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_READ_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
+{
+ u32 cid = BMI_WRITE_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write SOC reg: addr: 0x%x, param: %d\n",
+ addr, param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
+ offset += sizeof(param);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ u32 cid = BMI_LZ_DATA;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(len);
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = ar->bmi.max_data_size + header;
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
+ len);
+
+ len_remain = len;
+ while (len_remain) {
+ tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
+ len_remain : (ar->bmi.max_data_size - header);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
+ tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ len_remain -= tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_LZ_STREAM_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > ar->bmi.max_cmd_size) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi LZ stream start: addr: 0x%x)\n",
+ addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to start LZ stream to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ int ret;
+ u32 last_word = 0;
+ u32 last_word_offset = len & ~0x3;
+ u32 unaligned_bytes = len & 0x3;
+
+ ret = ath6kl_bmi_lz_stream_start(ar, addr);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes) {
+ /* copy the last word into a zero padded buffer */
+ memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
+ }
+
+ ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes)
+ ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
+
+ if (!ret) {
+ /* Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches. */
+ ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
+ }
+ return ret;
+}
+
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+ ar->bmi.done_sent = false;
+}
+
+int ath6kl_bmi_init(struct ath6kl *ar)
+{
+ if (WARN_ON(ar->bmi.max_data_size == 0))
+ return -EINVAL;
+
+ /* cmd + addr + len + data_size */
+ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
+
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
+ if (!ar->bmi.cmd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ath6kl_bmi_cleanup(struct ath6kl *ar)
+{
+ kfree(ar->bmi.cmd_buf);
+ ar->bmi.cmd_buf = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
new file mode 100644
index 000000000..397a52f26
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BMI_H
+#define BMI_H
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to ATH6KL, to provide
+ * patches to code that is already resident on ATH6KL, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using ATH6KL Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* BMI Commands */
+
+#define BMI_NO_COMMAND 0
+
+#define BMI_DONE 1
+/*
+ * Semantics: Host is done using BMI
+ * Request format:
+ * u32 command (BMI_DONE)
+ * Response format: none
+ */
+
+#define BMI_READ_MEMORY 2
+/*
+ * Semantics: Host reads ATH6KL memory
+ * Request format:
+ * u32 command (BMI_READ_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * Response format:
+ * u8 data[length]
+ */
+
+#define BMI_WRITE_MEMORY 3
+/*
+ * Semantics: Host writes ATH6KL memory
+ * Request format:
+ * u32 command (BMI_WRITE_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * u8 data[length]
+ * Response format: none
+ */
+
+#define BMI_EXECUTE 4
+/*
+ * Semantics: Causes ATH6KL to execute code
+ * Request format:
+ * u32 command (BMI_EXECUTE)
+ * u32 address
+ * u32 parameter
+ * Response format:
+ * u32 return value
+ */
+
+#define BMI_SET_APP_START 5
+/*
+ * Semantics: Set Target application starting address
+ * Request format:
+ * u32 command (BMI_SET_APP_START)
+ * u32 address
+ * Response format: none
+ */
+
+#define BMI_READ_SOC_REGISTER 6
+/*
+ * Semantics: Read a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_READ_REGISTER)
+ * u32 address
+ * Response format:
+ * u32 value
+ */
+
+#define BMI_WRITE_SOC_REGISTER 7
+/*
+ * Semantics: Write a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_WRITE_REGISTER)
+ * u32 address
+ * u32 value
+ *
+ * Response format: none
+ */
+
+#define BMI_GET_TARGET_ID 8
+#define BMI_GET_TARGET_INFO 8
+/*
+ * Semantics: Fetch the 4-byte Target information
+ * Request format:
+ * u32 command (BMI_GET_TARGET_ID/INFO)
+ * Response format1 (old firmware):
+ * u32 TargetVersionID
+ * Response format2 (newer firmware):
+ * u32 TARGET_VERSION_SENTINAL
+ * struct bmi_target_info;
+ */
+
+#define TARGET_VERSION_SENTINAL 0xffffffff
+#define TARGET_TYPE_AR6003 3
+#define TARGET_TYPE_AR6004 5
+#define BMI_ROMPATCH_INSTALL 9
+/*
+ * Semantics: Install a ROM Patch.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_INSTALL)
+ * u32 Target ROM Address
+ * u32 Target RAM Address or Value (depending on Target Type)
+ * u32 Size, in bytes
+ * u32 Activate? 1-->activate;
+ * 0-->install but do not activate
+ * Response format:
+ * u32 PatchID
+ */
+
+#define BMI_ROMPATCH_UNINSTALL 10
+/*
+ * Semantics: Uninstall a previously-installed ROM Patch,
+ * automatically deactivating, if necessary.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_UNINSTALL)
+ * u32 PatchID
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_ACTIVATE 11
+/*
+ * Semantics: Activate a list of previously-installed ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_ACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_DEACTIVATE 12
+/*
+ * Semantics: Deactivate a list of active ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_DEACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+
+#define BMI_LZ_STREAM_START 13
+/*
+ * Semantics: Begin an LZ-compressed stream of input
+ * which is to be uncompressed by the Target to an
+ * output buffer at address. The output buffer must
+ * be sufficiently large to hold the uncompressed
+ * output from the compressed input stream. This BMI
+ * command should be followed by a series of 1 or more
+ * BMI_LZ_DATA commands.
+ * u32 command (BMI_LZ_STREAM_START)
+ * u32 address
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_LZ_DATA 14
+/*
+ * Semantics: Host writes ATH6KL memory with LZ-compressed
+ * data which is uncompressed by the Target. This command
+ * must be preceded by a BMI_LZ_STREAM_START command. A series
+ * of BMI_LZ_DATA commands are considered part of a single
+ * input stream until another BMI_LZ_STREAM_START is issued.
+ * Request format:
+ * u32 command (BMI_LZ_DATA)
+ * u32 length (of compressed data),
+ * at most BMI_DATASZ_MAX
+ * u8 CompressedData[length]
+ * Response format: none
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
+
+struct ath6kl;
+struct ath6kl_bmi_target_info {
+ __le32 byte_count; /* size of this structure */
+ __le32 version; /* target version id */
+ __le32 type; /* target type */
+} __packed;
+
+#define ath6kl_bmi_write_hi32(ar, item, val) \
+ ({ \
+ u32 addr; \
+ __le32 v; \
+ \
+ addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
+ v = cpu_to_le32(val); \
+ ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
+ })
+
+#define ath6kl_bmi_read_hi32(ar, item, val) \
+ ({ \
+ u32 addr, *check_type = val; \
+ __le32 tmp; \
+ int ret; \
+ \
+ (void) (check_type == val); \
+ addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
+ ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
+ if (!ret) \
+ *val = le32_to_cpu(tmp); \
+ ret; \
+ })
+
+int ath6kl_bmi_init(struct ath6kl *ar);
+void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
+int ath6kl_bmi_done(struct ath6kl *ar);
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info);
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_execute(struct ath6kl *ar,
+ u32 addr, u32 *param);
+int ath6kl_bmi_set_app_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
+int ath6kl_bmi_lz_data(struct ath6kl *ar,
+ u8 *buf, u32 len);
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_fast_download(struct ath6kl *ar,
+ u32 addr, u8 *buf, u32 len);
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
new file mode 100644
index 000000000..cce4625a5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -0,0 +1,3882 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/inetdevice.h>
+#include <linux/export.h>
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "testmode.h"
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define DEFAULT_BG_SCAN_PERIOD 60
+
+struct ath6kl_cfg80211_match_probe_ssid {
+ struct cfg80211_ssid ssid;
+ u8 flag;
+};
+
+static struct ieee80211_rate ath6kl_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ath6kl_a_rates (ath6kl_rates + 4)
+#define ath6kl_a_rates_size 8
+#define ath6kl_g_rates (ath6kl_rates + 0)
+#define ath6kl_g_rates_size 12
+
+#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
+#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
+ IEEE80211_HT_CAP_SGI_20 | \
+ IEEE80211_HT_CAP_SGI_40)
+
+static struct ieee80211_channel ath6kl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band ath6kl_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
+ .channels = ath6kl_2ghz_channels,
+ .n_bitrates = ath6kl_g_rates_size,
+ .bitrates = ath6kl_g_rates,
+ .ht_cap.cap = ath6kl_g_htcap,
+ .ht_cap.ht_supported = true,
+};
+
+static struct ieee80211_supported_band ath6kl_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
+ .channels = ath6kl_5ghz_a_channels,
+ .n_bitrates = ath6kl_a_rates_size,
+ .bitrates = ath6kl_a_rates,
+ .ht_cap.cap = ath6kl_a_htcap,
+ .ht_cap.ht_supported = true,
+};
+
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+/* returns true if scheduled scan was stopped */
+static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags))
+ return false;
+
+ del_timer_sync(&vif->sched_scan_timer);
+
+ if (ar->state == ATH6KL_STATE_RECOVERY)
+ return true;
+
+ ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false);
+
+ return true;
+}
+
+static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return;
+
+ cfg80211_sched_scan_stopped(ar->wiphy);
+}
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
+ enum nl80211_wpa_versions wpa_version)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ vif->auth_mode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ vif->auth_mode = WPA2_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ vif->auth_mode = WPA_AUTH;
+ } else {
+ ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
+ enum nl80211_auth_type auth_type)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ vif->dot11_auth_mode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ vif->dot11_auth_mode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ vif->dot11_auth_mode = LEAP_AUTH;
+ break;
+
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ break;
+
+ default:
+ ath6kl_err("%s: 0x%x not supported\n", __func__, auth_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
+{
+ u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+ &vif->grp_crypto_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
+ __func__, cipher, ucast);
+
+ switch (cipher) {
+ case 0:
+ /* our own hack to use value 0 as no crypto used */
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ *ar_cipher = WAPI_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ ath6kl_err("cipher 0x%x not supported\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
+
+ if (key_mgmt == WLAN_AKM_SUITE_PSK) {
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_PSK_AUTH;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt == 0x00409600) {
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_AUTH_CCKM;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_AUTH_CCKM;
+ } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
+ vif->auth_mode = NONE_AUTH;
+ }
+}
+
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("wmi is not ready\n");
+ return false;
+ }
+
+ if (!test_bit(WLAN_ENABLED, &vif->flags)) {
+ ath6kl_err("wlan disabled\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 &&
+ pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+ return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+ pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+ size_t ies_len)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Clear previously set flag
+ */
+
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ /*
+ * Filter out RSN/WPA IE(s)
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+
+ if (ath6kl_is_wps_ie(pos))
+ ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_REQ, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ *nw_type = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ *nw_type = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+ u8 *if_idx, u8 *nw_type)
+{
+ int i;
+
+ if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+ return false;
+
+ if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+ ar->num_vif))
+ return false;
+
+ if (type == NL80211_IFTYPE_STATION ||
+ type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+ for (i = 0; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map) & BIT(i)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ if (type == NL80211_IFTYPE_P2P_CLIENT ||
+ type == NL80211_IFTYPE_P2P_GO) {
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map) & BIT(i)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool ath6kl_is_tx_pending(struct ath6kl *ar)
+{
+ return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
+}
+
+static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
+ bool enable)
+{
+ int err;
+
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
+
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
+
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
+}
+
+static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ int status;
+ u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
+ u16 interval;
+
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ vif->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(SKIP_SCAN, &ar->flag) &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
+ ath6kl_err("SkipScan: channel or bssid invalid\n");
+ return -EINVAL;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ up(&ar->sem);
+ return -EBUSY;
+ }
+
+ if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(ar->event_wq,
+ ath6kl_is_tx_pending(ar),
+ WMI_TIMEOUT);
+ if (signal_pending(current)) {
+ ath6kl_err("cmd queue drain timeout\n");
+ up(&ar->sem);
+ return -EINTR;
+ }
+ }
+
+ status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->ie == NULL || sme->ie_len == 0)
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ if (test_bit(CONNECTED, &vif->flags) &&
+ vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ vif->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->req_bssid,
+ vif->ch_hint);
+
+ up(&ar->sem);
+ if (status) {
+ ath6kl_err("wmi_reconnect_cmd failed\n");
+ return -EIO;
+ }
+ return 0;
+ } else if (vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ ath6kl_disconnect(vif);
+ }
+
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = sme->ssid_len;
+ memcpy(vif->ssid, sme->ssid, sme->ssid_len);
+
+ if (sme->channel)
+ vif->ch_hint = sme->channel->center_freq;
+
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
+ memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
+
+ ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
+
+ status = ath6kl_set_auth_type(vif, sme->auth_type);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise)
+ ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
+ else
+ ath6kl_set_cipher(vif, 0, true);
+
+ ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
+
+ if (sme->crypto.n_akm_suites)
+ ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
+
+ if ((sme->key_len) &&
+ (vif->auth_mode == NONE_AUTH) &&
+ (vif->prwise_crypto == WEP_CRYPT)) {
+ struct ath6kl_key *key = NULL;
+
+ if (sme->key_idx > WMI_MAX_KEY_INDEX) {
+ ath6kl_err("key index %d out of bounds\n",
+ sme->key_idx);
+ up(&ar->sem);
+ return -ENOENT;
+ }
+
+ key = &vif->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ memcpy(key->key, sme->key, key->key_len);
+ key->cipher = vif->prwise_crypto;
+ vif->def_txkey_index = sme->key_idx;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+ vif->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL, 0,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ up(&ar->sem);
+ return -EIO;
+ }
+ }
+
+ vif->nw_type = vif->next_mode;
+
+ /* enable enhanced bmiss detection if applicable */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
+ nw_subtype = SUBTYPE_P2PCLIENT;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ vif->reconnect_flag = 0;
+
+ if (vif->nw_type == INFRA_NETWORK) {
+ interval = max_t(u16, vif->listen_intvl_t,
+ ATH6KL_MAX_WOW_LISTEN_INTL);
+ status = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ interval,
+ 0);
+ if (status) {
+ ath6kl_err("couldn't set listen intervel\n");
+ up(&ar->sem);
+ return status;
+ }
+ }
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, nw_subtype);
+
+ if (sme->bg_scan_period == 0) {
+ /* disable background scan if period is 0 */
+ sme->bg_scan_period = 0xffff;
+ } else if (sme->bg_scan_period == -1) {
+ /* configure default value if not specified */
+ sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+ }
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
+ sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
+
+ up(&ar->sem);
+
+ if (status == -EINVAL) {
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
+ ath6kl_err("invalid request\n");
+ return -ENOENT;
+ } else if (status) {
+ ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
+ return -EIO;
+ }
+
+ if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((vif->auth_mode == WPA_PSK_AUTH) ||
+ (vif->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&vif->disconnect_timer,
+ jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
+ }
+
+ ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
+ set_bit(CONNECT_PEND, &vif->flags);
+
+ return 0;
+}
+
+static struct cfg80211_bss *
+ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+ enum network_type nw_type,
+ const u8 *bssid,
+ struct ieee80211_channel *chan,
+ const u8 *beacon_ie,
+ size_t beacon_ie_len)
+{
+ struct ath6kl *ar = vif->ar;
+ struct cfg80211_bss *bss;
+ u16 cap_val;
+ enum ieee80211_bss_type bss_type;
+ u8 *ie;
+
+ if (nw_type & ADHOC_NETWORK) {
+ cap_val = WLAN_CAPABILITY_IBSS;
+ bss_type = IEEE80211_BSS_TYPE_IBSS;
+ } else {
+ cap_val = WLAN_CAPABILITY_ESS;
+ bss_type = IEEE80211_BSS_TYPE_ESS;
+ }
+
+ bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+ vif->ssid, vif->ssid_len,
+ bss_type, IEEE80211_PRIVACY_ANY);
+ if (bss == NULL) {
+ /*
+ * Since cfg80211 may not yet know about the BSS,
+ * generate a partial entry until the first BSS info
+ * event becomes available.
+ *
+ * Prepend SSID element since it is not included in the Beacon
+ * IEs from the target.
+ */
+ ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
+ if (ie == NULL)
+ return NULL;
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = vif->ssid_len;
+ memcpy(ie + 2, vif->ssid, vif->ssid_len);
+ memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wiphy, chan,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ bssid, 0, cap_val, 100,
+ ie, 2 + vif->ssid_len + beacon_ie_len,
+ 0, GFP_KERNEL);
+ if (bss)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "added bss %pM to cfg80211\n", bssid);
+ kfree(ie);
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+ }
+
+ return bss;
+}
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ struct ieee80211_channel *chan;
+ struct ath6kl *ar = vif->ar;
+ struct cfg80211_bss *bss;
+
+ /* capinfo + listen interval */
+ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
+
+ /* capinfo + status code + associd */
+ u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
+
+ u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
+ u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
+ assoc_resp_ie_offset;
+
+ assoc_req_len -= assoc_req_ie_offset;
+ assoc_resp_len -= assoc_resp_ie_offset;
+
+ /*
+ * Store Beacon interval here; DTIM period will be available only once
+ * a Beacon frame from the AP is seen.
+ */
+ vif->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
+
+ if (nw_type & ADHOC_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & INFRA_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ chan = ieee80211_get_channel(ar->wiphy, (int) channel);
+
+ bss = ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan,
+ assoc_info, beacon_ie_len);
+ if (!bss) {
+ ath6kl_err("could not add cfg80211 bss entry\n");
+ return;
+ }
+
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
+ cfg80211_put_bss(ar->wiphy, bss);
+ return;
+ }
+
+ if (vif->sme_state == SME_CONNECTING) {
+ /* inform connect result to cfg80211 */
+ vif->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(vif->ndev, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ cfg80211_put_bss(ar->wiphy, bss);
+ } else if (vif->sme_state == SME_CONNECTED) {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ }
+}
+
+static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev, u16 reason_code)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
+ reason_code);
+
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ vif->reconnect_flag = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
+
+ if (!test_bit(SKIP_SCAN, &ar->flag))
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+
+ up(&ar->sem);
+
+ vif->sme_state = SME_DISCONNECTED;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
+
+ if (vif->nw_type & ADHOC_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+
+ if (vif->nw_type & INFRA_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ clear_bit(CONNECT_PEND, &vif->flags);
+
+ if (vif->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(vif->ndev,
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else if (vif->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(vif->ndev, proto_reason,
+ NULL, 0, GFP_KERNEL);
+ }
+
+ vif->sme_state = SME_DISCONNECTED;
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which won't be notified to cfg80211.
+ */
+ if (reason != DISCONNECT_CMD)
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+}
+
+static int ath6kl_set_probed_ssids(struct ath6kl *ar,
+ struct ath6kl_vif *vif,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_set,
+ int n_match_ssid)
+{
+ u8 i, j, index_to_add, ssid_found = false;
+ struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
+
+ memset(ssid_list, 0, sizeof(ssid_list));
+
+ if (n_ssids > MAX_PROBED_SSIDS ||
+ n_match_ssid > MAX_PROBED_SSIDS)
+ return -EINVAL;
+
+ for (i = 0; i < n_ssids; i++) {
+ memcpy(ssid_list[i].ssid.ssid,
+ ssids[i].ssid,
+ ssids[i].ssid_len);
+ ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
+
+ if (ssids[i].ssid_len)
+ ssid_list[i].flag = SPECIFIC_SSID_FLAG;
+ else
+ ssid_list[i].flag = ANY_SSID_FLAG;
+
+ if (n_match_ssid == 0)
+ ssid_list[i].flag |= MATCH_SSID_FLAG;
+ }
+
+ index_to_add = i;
+
+ for (i = 0; i < n_match_ssid; i++) {
+ ssid_found = false;
+
+ for (j = 0; j < n_ssids; j++) {
+ if ((match_set[i].ssid.ssid_len ==
+ ssid_list[j].ssid.ssid_len) &&
+ (!memcmp(ssid_list[j].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len))) {
+ ssid_list[j].flag |= MATCH_SSID_FLAG;
+ ssid_found = true;
+ break;
+ }
+ }
+
+ if (ssid_found)
+ continue;
+
+ if (index_to_add >= MAX_PROBED_SSIDS)
+ continue;
+
+ ssid_list[index_to_add].ssid.ssid_len =
+ match_set[i].ssid.ssid_len;
+ memcpy(ssid_list[index_to_add].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len);
+ ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
+ index_to_add++;
+ }
+
+ for (i = 0; i < index_to_add; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ ssid_list[i].flag,
+ ssid_list[i].ssid.ssid_len,
+ ssid_list[i].ssid.ssid);
+ }
+
+ /* Make sure no old entries are left behind */
+ for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ DISABLE_SSID_FLAG, 0, NULL);
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(request->wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
+ s8 n_channels = 0;
+ u16 *channels = NULL;
+ int ret = 0;
+ u32 force_fg_scan = 0;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
+ if (ret) {
+ ath6kl_err("couldn't set bss filtering\n");
+ return ret;
+ }
+ }
+
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_err("failed to set Probe Request appie for scan\n");
+ return ret;
+ }
+
+ /*
+ * Scan only the requested channels if the request specifies a set of
+ * channels. If the list is longer than the target supports, do not
+ * configure the list and instead, scan all available channels.
+ */
+ if (request->n_channels > 0 &&
+ request->n_channels <= WMI_MAX_CHANNELS) {
+ u8 i;
+
+ n_channels = request->n_channels;
+
+ channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
+ if (channels == NULL) {
+ ath6kl_warn("failed to set scan channels, scan all channels");
+ n_channels = 0;
+ }
+
+ for (i = 0; i < n_channels; i++)
+ channels[i] = request->channels[i]->center_freq;
+ }
+
+ if (test_bit(CONNECTED, &vif->flags))
+ force_fg_scan = 1;
+
+ vif->scan_req = request;
+
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0,
+ ATH6KL_FG_SCAN_INTERVAL,
+ n_channels, channels,
+ request->no_cck,
+ request->rates);
+ if (ret) {
+ ath6kl_err("failed to start scan: %d\n", ret);
+ vif->scan_req = NULL;
+ }
+
+ kfree(channels);
+
+ return ret;
+}
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
+{
+ struct ath6kl *ar = vif->ar;
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+ aborted ? " aborted" : "");
+
+ if (!vif->scan_req)
+ return;
+
+ if (aborted)
+ goto out;
+
+ if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < vif->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+
+out:
+ cfg80211_scan_done(vif->scan_req, aborted);
+ vif->scan_req = NULL;
+}
+
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode)
+{
+ struct cfg80211_chan_def chandef;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "channel switch notify nw_type %d freq %d mode %d\n",
+ vif->nw_type, freq, mode);
+
+ cfg80211_chandef_create(&chandef,
+ ieee80211_get_channel(vif->ar->wiphy, freq),
+ (mode == WMI_11G_HT20) ?
+ NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
+
+ mutex_lock(&vif->wdev.mtx);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef);
+ mutex_unlock(&vif->wdev.mtx);
+}
+
+static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ int seq_len;
+ u8 key_usage;
+ u8 key_type;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+ if (params->key_len != WMI_KRK_LEN)
+ return -EINVAL;
+ return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+ params->key);
+ }
+
+ if (key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &vif->keys[key_index];
+ memset(key, 0, sizeof(struct ath6kl_key));
+
+ if (pairwise)
+ key_usage = PAIRWISE_USAGE;
+ else
+ key_usage = GROUP_USAGE;
+
+ seq_len = params->seq_len;
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+ seq_len > ATH6KL_KEY_SEQ_LEN) {
+ /* Only first half of the WPI PN is configured */
+ seq_len = ATH6KL_KEY_SEQ_LEN;
+ }
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ key_type = WAPI_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((vif->auth_mode == WPA_PSK_AUTH) ||
+ (vif->auth_mode == WPA2_PSK_AUTH)) &&
+ (key_usage & GROUP_USAGE))
+ del_timer(&vif->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len);
+
+ if (vif->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
+ key_type == WAPI_CRYPT)) {
+ ar->ap_mode_bkey.valid = true;
+ ar->ap_mode_bkey.key_index = key_index;
+ ar->ap_mode_bkey.key_type = key_type;
+ ar->ap_mode_bkey.key_len = key->key_len;
+ memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
+ if (!test_bit(CONNECTED, &vif->flags)) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay initial group key configuration until AP mode has been started\n");
+ /*
+ * The key will be set in ath6kl_connect_ap_mode() once
+ * the connected event is received from the target.
+ */
+ return 0;
+ }
+ }
+
+ if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &vif->flags)) {
+ /*
+ * Store the key locally so that it can be re-configured after
+ * the AP mode has properly started
+ * (ath6kl_install_statioc_wep_keys).
+ */
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay WEP key configuration until AP mode has been started\n");
+ vif->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(vif->wep_key_list[key_index].key, key->key,
+ key->key_len);
+ return 0;
+ }
+
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->seq_len, key->key,
+ KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
+}
+
+static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ if (!vif->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d is empty\n", __func__, key_index);
+ return 0;
+ }
+
+ vif->keys[key_index].key_len = 0;
+
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
+}
+
+static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie,
+ struct key_params *))
+{
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ struct key_params params;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &vif->keys[key_index];
+ memset(&params, 0, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, &params);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ u8 key_usage;
+ enum crypto_type key_type = NONE_CRYPT;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n",
+ __func__, key_index);
+ return -ENOENT;
+ }
+
+ if (!vif->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
+ __func__, key_index);
+ return -EINVAL;
+ }
+
+ vif->def_txkey_index = key_index;
+ key = &vif->keys[vif->def_txkey_index];
+ key_usage = GROUP_USAGE;
+ if (vif->prwise_crypto == WEP_CRYPT)
+ key_usage |= TX_USAGE;
+ if (unicast)
+ key_type = vif->prwise_crypto;
+ if (multicast)
+ key_type = vif->grp_crypto;
+
+ if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
+ return 0; /* Delay until AP mode has been started */
+
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->seq_len,
+ key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
+}
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
+ bool ismcast)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
+
+ cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
+ GFP_KERNEL);
+}
+
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
+ changed);
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
+ if (ret != 0) {
+ ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type,
+ int mbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+ int dbm = MBM_TO_DBM(mbm);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
+ type, dbm);
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->tx_pwr = dbm;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
+ __func__, type);
+ return -EOPNOTSUPP;
+ }
+
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, dbm);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ int *dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (test_bit(CONNECTED, &vif->flags)) {
+ ar->tx_pwr = 0;
+
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
+ ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ 5 * HZ);
+
+ if (signal_pending(current)) {
+ ath6kl_err("target did not respond\n");
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->tx_pwr;
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct wmi_power_mode_cmd mode;
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
+ __func__, pmgmt, timeout);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (pmgmt) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ mode.pwr_mode = REC_POWER;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ mode.pwr_mode = MAX_PERF_POWER;
+ }
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+ mode.pwr_mode) != 0) {
+ ath6kl_err("wmi_powermode_cmd failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct wireless_dev *wdev;
+ u8 if_idx, nw_type;
+
+ if (ar->num_vif == ar->vif_max) {
+ ath6kl_err("Reached maximum number of supported vif\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+ ath6kl_err("Not a supported interface type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wdev = ath6kl_interface_add(ar, name, name_assign_type, type, if_idx, nw_type);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ ar->num_vif++;
+
+ return wdev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct ath6kl_vif *vif = netdev_priv(wdev->netdev);
+
+ spin_lock_bh(&ar->list_lock);
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
+
+ rtnl_lock();
+ ath6kl_cfg80211_vif_cleanup(vif);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
+
+ /*
+ * Don't bring up p2p on an interface which is not initialized
+ * for p2p operation where fw does not have capability to switch
+ * dynamically between non-p2p and p2p type interface.
+ */
+ if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ vif->ar->fw_capabilities) &&
+ (type == NL80211_IFTYPE_P2P_CLIENT ||
+ type == NL80211_IFTYPE_P2P_GO)) {
+ if (vif->ar->vif_max == 1) {
+ if (vif->fw_vif_idx != 0)
+ return -EINVAL;
+ else
+ goto set_iface_type;
+ }
+
+ for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
+ if (i == vif->fw_vif_idx)
+ break;
+ }
+
+ if (i == vif->ar->vif_max) {
+ ath6kl_err("Invalid interface to bring up P2P\n");
+ return -EINVAL;
+ }
+ }
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+
+set_iface_type:
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ vif->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ vif->next_mode = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ vif->next_mode = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ vif->wdev.iftype = type;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ int status;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ vif->ssid_len = ibss_param->ssid_len;
+ memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
+
+ if (ibss_param->chandef.chan)
+ vif->ch_hint = ibss_param->chandef.chan->center_freq;
+
+ if (ibss_param->channel_fixed) {
+ /*
+ * TODO: channel_fixed: The channel should be fixed, do not
+ * search for IBSSs to join on other channels. Target
+ * firmware does not support this feature, needs to be
+ * updated.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
+ memcpy(vif->req_bssid, ibss_param->bssid,
+ sizeof(vif->req_bssid));
+
+ ath6kl_set_wpa_version(vif, 0);
+
+ status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ if (status)
+ return status;
+
+ if (ibss_param->privacy) {
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ath6kl_set_cipher(vif, 0, true);
+ ath6kl_set_cipher(vif, 0, false);
+ }
+
+ vif->nw_type = vif->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, SUBTYPE_NONE);
+ set_bit(CONNECT_PEND, &vif->flags);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
+
+ return 0;
+}
+
+static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ CCKM_KRK_CIPHER_SUITE,
+ WLAN_CIPHER_SUITE_SMS4,
+};
+
+static bool is_rate_legacy(s32 rate)
+{
+ static const s32 legacy[] = { 1000, 2000, 5500, 11000,
+ 6000, 9000, 12000, 18000, 24000,
+ 36000, 48000, 54000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy); i++)
+ if (rate == legacy[i])
+ return true;
+
+ return false;
+}
+
+static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
+ 52000, 58500, 65000, 72200
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht20); i++) {
+ if (rate == ht20[i]) {
+ if (i == ARRAY_SIZE(ht20) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht40[] = { 13500, 27000, 40500, 54000,
+ 81000, 108000, 121500, 135000,
+ 150000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht40); i++) {
+ if (rate == ht40[i]) {
+ if (i == ARRAY_SIZE(ht40) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ long left;
+ bool sgi;
+ s32 rate;
+ int ret;
+ u8 mcs;
+
+ if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
+ return -ENOENT;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
+
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
+
+ if (ret != 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &vif->flags),
+ WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left == 0)
+ return -ETIMEDOUT;
+ else if (left < 0)
+ return left;
+
+ if (vif->target_stats.rx_byte) {
+ sinfo->rx_bytes = vif->target_stats.rx_byte;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->rx_packets = vif->target_stats.rx_pkt;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (vif->target_stats.tx_byte) {
+ sinfo->tx_bytes = vif->target_stats.tx_byte;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->tx_packets = vif->target_stats.tx_pkt;
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ sinfo->signal = vif->target_stats.cs_rssi;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+
+ rate = vif->target_stats.tx_ucast_rate;
+
+ if (is_rate_legacy(rate)) {
+ sinfo->txrate.legacy = rate / 100;
+ } else if (is_rate_ht20(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ sinfo->txrate.bw = RATE_INFO_BW_20;
+ } else if (is_rate_ht40(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.bw = RATE_INFO_BW_40;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "invalid rate from stats: %d\n", rate);
+ ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE);
+ return 0;
+ }
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+
+ if (test_bit(CONNECTED, &vif->flags) &&
+ test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+ vif->nw_type == INFRA_NETWORK) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+ sinfo->bss_param.flags = 0;
+ sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
+ pmksa->pmkid, true);
+}
+
+static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
+ pmksa->pmkid, false);
+}
+
+static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ if (test_bit(CONNECTED, &vif->flags))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bssid, NULL, false);
+ return 0;
+}
+
+static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
+ struct cfg80211_wowlan *wow, u32 *filter)
+{
+ int ret, pos;
+ u8 mask[WOW_PATTERN_SIZE];
+ u16 i;
+
+ /* Configure the patterns that we received from the user. */
+ for (i = 0; i < wow->n_patterns; i++) {
+ /*
+ * Convert given nl80211 specific mask value to equivalent
+ * driver specific mask value and send it to the chip along
+ * with patterns. For example, If the mask value defined in
+ * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+ * then equivalent driver specific mask value is
+ * "0xFF 0x00 0xFF 0x00".
+ */
+ memset(&mask, 0, sizeof(mask));
+ for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+ if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+ mask[pos] = 0xFF;
+ }
+ /*
+ * Note: Pattern's offset is not passed as part of wowlan
+ * parameter from CFG layer. So it's always passed as ZERO
+ * to the firmware. It means, given WOW patterns are always
+ * matched from the first byte of received pkt in the firmware.
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ wow->patterns[i].pattern_len,
+ 0 /* pattern offset */,
+ wow->patterns[i].pattern, mask);
+ if (ret)
+ return ret;
+ }
+
+ if (wow->disconnect)
+ *filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+ if (wow->magic_pkt)
+ *filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+ if (wow->gtk_rekey_failure)
+ *filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+ if (wow->eap_identity_req)
+ *filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+ if (wow->four_way_handshake)
+ *filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+ return 0;
+}
+
+static int ath6kl_wow_ap(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+ static const u8 unicst_pattern[] = { 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08 };
+ static const u8 unicst_mask[] = { 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x7f };
+ u8 unicst_offset = 0;
+ static const u8 arp_pattern[] = { 0x08, 0x06 };
+ static const u8 arp_mask[] = { 0xff, 0xff };
+ u8 arp_offset = 20;
+ static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
+ static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
+ u8 discvr_offset = 38;
+ static const u8 dhcp_pattern[] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x43 /* port 67 */ };
+ static const u8 dhcp_mask[] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff /* port 67 */ };
+ u8 dhcp_offset = 0;
+ int ret;
+
+ /* Setup unicast IP, EAPOL-like and ARP pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(unicst_pattern), unicst_offset,
+ unicst_pattern, unicst_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW unicast IP pattern\n");
+ return ret;
+ }
+
+ /* Setup all ARP pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(arp_pattern), arp_offset,
+ arp_pattern, arp_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW ARP pattern\n");
+ return ret;
+ }
+
+ /*
+ * Setup multicast pattern for mDNS 224.0.0.251,
+ * SSDP 239.255.255.250 and LLMNR 224.0.0.252
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(discvr_pattern), discvr_offset,
+ discvr_pattern, discvr_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
+ return ret;
+ }
+
+ /* Setup all DHCP broadcast pkt pattern */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(dhcp_pattern), dhcp_offset,
+ dhcp_pattern, dhcp_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW DHCP broadcast pattern\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+ struct net_device *ndev = vif->ndev;
+ static const u8 discvr_pattern[] = { 0xe0, 0x00, 0x00, 0xf8 };
+ static const u8 discvr_mask[] = { 0xf0, 0x00, 0x00, 0xf8 };
+ u8 discvr_offset = 38;
+ u8 mac_mask[ETH_ALEN];
+ int ret;
+
+ /* Setup unicast pkt pattern */
+ eth_broadcast_addr(mac_mask);
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ ETH_ALEN, 0, ndev->dev_addr,
+ mac_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW unicast pattern\n");
+ return ret;
+ }
+
+ /*
+ * Setup multicast pattern for mDNS 224.0.0.251,
+ * SSDP 239.255.255.250 and LLMNR 224.0.0.252
+ */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (ndev->flags & IFF_MULTICAST && netdev_mc_count(ndev) > 0)) {
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ sizeof(discvr_pattern), discvr_offset,
+ discvr_pattern, discvr_mask);
+ if (ret) {
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
+{
+ return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+}
+
+static bool is_ctrl_ep_empty(struct ath6kl *ar)
+{
+ return !ar->tx_pending[ar->ctrl_ep];
+}
+
+static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+ int ret, left;
+
+ clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret)
+ return ret;
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ is_hsleep_mode_procsed(vif),
+ WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
+ left);
+ ret = left;
+ }
+
+ if (ar->tx_pending[ar->ctrl_ep]) {
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ is_ctrl_ep_empty(ar),
+ WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("clear wmi ctrl data timeout\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+ ret = left;
+ }
+ }
+
+ return ret;
+}
+
+static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
+ struct cfg80211_wowlan *wow, u32 *filter)
+{
+ struct ath6kl *ar = vif->ar;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ int ret;
+ u16 i, bmiss_time;
+ __be32 ips[MAX_IP_ADDRS];
+ u8 index = 0;
+
+ if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, false);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear existing WOW patterns */
+ for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+ ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+ WOW_LIST_ID, i);
+
+ /*
+ * Skip the default WOW pattern configuration
+ * if the driver receives any WOW patterns from
+ * the user.
+ */
+ if (wow)
+ ret = ath6kl_wow_usr(ar, vif, wow, filter);
+ else if (vif->nw_type == AP_NETWORK)
+ ret = ath6kl_wow_ap(ar, vif);
+ else
+ ret = ath6kl_wow_sta(ar, vif);
+
+ if (ret)
+ return ret;
+
+ netif_stop_queue(vif->ndev);
+
+ if (vif->nw_type != AP_NETWORK) {
+ ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_MAX_WOW_LISTEN_INTL,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Set listen interval x 15 times as bmiss time */
+ bmiss_time = ATH6KL_MAX_WOW_LISTEN_INTL * 15;
+ if (bmiss_time > ATH6KL_MAX_BMISS_TIME)
+ bmiss_time = ATH6KL_MAX_BMISS_TIME;
+
+ ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
+ bmiss_time, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ 0xFFFF, 0, 0xFFFF, 0, 0, 0,
+ 0, 0, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup own IP addr for ARP agent. */
+ in_dev = __in_dev_get_rtnl(vif->ndev);
+ if (!in_dev)
+ return 0;
+
+ ifa = in_dev->ifa_list;
+ memset(&ips, 0, sizeof(ips));
+
+ /* Configure IP addr only if IP address count < MAX_IP_ADDRS */
+ while (index < MAX_IP_ADDRS && ifa) {
+ ips[index] = ifa->ifa_local;
+ ifa = ifa->ifa_next;
+ index++;
+ }
+
+ if (ifa) {
+ ath6kl_err("total IP addr count is exceeding fw limit\n");
+ return -EINVAL;
+ }
+
+ ret = ath6kl_wmi_set_ip_cmd(ar->wmi, vif->fw_vif_idx, ips[0], ips[1]);
+ if (ret) {
+ ath6kl_err("fail to setup ip for arp agent\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *first_vif, *vif;
+ int ret = 0;
+ u32 filter = 0;
+ bool connected = false;
+
+ /* enter / leave wow suspend on first vif always */
+ first_vif = ath6kl_vif_first(ar);
+ if (WARN_ON(unlikely(!first_vif)) ||
+ !ath6kl_cfg80211_ready(first_vif))
+ return -EIO;
+
+ if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
+ return -EINVAL;
+
+ /* install filters for each connected vif */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (!test_bit(CONNECTED, &vif->flags) ||
+ !ath6kl_cfg80211_ready(vif))
+ continue;
+ connected = true;
+
+ ret = ath6kl_wow_suspend_vif(vif, wow, &filter);
+ if (ret)
+ break;
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ if (!connected)
+ return -ENOTCONN;
+ else if (ret)
+ return ret;
+
+ ar->state = ATH6KL_STATE_SUSPENDING;
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ filter,
+ WOW_HOST_REQ_DELAY);
+ if (ret)
+ return ret;
+
+ return ath6kl_cfg80211_host_sleep(ar, first_vif);
+}
+
+static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ int ret;
+
+ if (vif->nw_type != AP_NETWORK) {
+ ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->listen_intvl_t, 0);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_bmisstime_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bmiss_time_t, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, true);
+ if (ret)
+ return ret;
+ }
+
+ netif_wake_queue(vif->ndev);
+
+ return 0;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (WARN_ON(unlikely(!vif)) ||
+ !ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ ar->state = ATH6KL_STATE_RESUMING;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ if (ret) {
+ ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (!test_bit(CONNECTED, &vif->flags) ||
+ !ath6kl_cfg80211_ready(vif))
+ continue;
+ ret = ath6kl_wow_resume_vif(vif);
+ if (ret)
+ break;
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ if (ret)
+ goto cleanup;
+
+ ar->state = ATH6KL_STATE_ON;
+ return 0;
+
+cleanup:
+ ar->state = ATH6KL_STATE_WOW;
+ return ret;
+}
+
+static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("deepsleep failed as wmi is not ready\n");
+ return -EIO;
+ }
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ /* Save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+ if (ret)
+ return ret;
+
+ /* Disable WOW mode */
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_DISABLE,
+ 0, 0);
+ if (ret)
+ return ret;
+
+ /* Flush all non control pkts in TX path */
+ ath6kl_tx_data_cleanup(ar);
+
+ ret = ath6kl_cfg80211_host_sleep(ar, vif);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+
+ if (!vif)
+ return -EIO;
+
+ if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+ ar->wmi->saved_pwr_mode);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ if (ret)
+ return ret;
+
+ ar->state = ATH6KL_STATE_ON;
+
+ /* Reset scan parameter to default values */
+ ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *vif;
+ enum ath6kl_state prev_state;
+ int ret;
+
+ switch (mode) {
+ case ATH6KL_CFG_SUSPEND_WOW:
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+ /* Flush all non control pkts in TX path */
+ ath6kl_tx_data_cleanup(ar);
+
+ prev_state = ar->state;
+
+ ret = ath6kl_wow_suspend(ar, wow);
+ if (ret) {
+ ar->state = prev_state;
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_WOW;
+ break;
+
+ case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
+
+ ret = ath6kl_cfg80211_deepsleep_suspend(ar);
+ if (ret) {
+ ath6kl_err("deepsleep suspend failed: %d\n", ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (ar->state == ATH6KL_STATE_OFF) {
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "suspend hw off, no action for cutpower\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+ ret = ath6kl_init_hw_stop(ar);
+ if (ret) {
+ ath6kl_warn("failed to stop hw during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_CUTPOWER;
+
+ break;
+
+ default:
+ break;
+ }
+
+ list_for_each_entry(vif, &ar->vif_list, list)
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+ int ret;
+
+ switch (ar->state) {
+ case ATH6KL_STATE_WOW:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+ ret = ath6kl_wow_resume(ar);
+ if (ret) {
+ ath6kl_warn("wow mode resume failed: %d\n", ret);
+ return ret;
+ }
+
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
+
+ ret = ath6kl_cfg80211_deepsleep_resume(ar);
+ if (ret) {
+ ath6kl_warn("deep sleep resume failed: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath6kl_cfg80211_resume);
+
+#ifdef CONFIG_PM
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ ath6kl_recovery_suspend(ar);
+
+ return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ int err;
+
+ err = ath6kl_hif_resume(ar);
+ if (err)
+ return err;
+
+ ath6kl_recovery_resume(ar);
+
+ return 0;
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+ if (ar->state == ATH6KL_STATE_SUSPENDING)
+ return;
+
+ if (ar->state == ATH6KL_STATE_WOW)
+ ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+}
+#endif
+
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+ bool ht_enable)
+{
+ struct ath6kl_htcap *htcap = &vif->htcap[band];
+
+ if (htcap->ht_enable == ht_enable)
+ return 0;
+
+ if (ht_enable) {
+ /* Set default ht capabilities */
+ htcap->ht_enable = true;
+ htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+ ath6kl_g_htcap : ath6kl_a_htcap;
+ htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
+ } else /* Disable ht */
+ memset(htcap, 0, sizeof(*htcap));
+
+ return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ band, htcap);
+}
+
+static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
+{
+ struct wiphy *wiphy = vif->ar->wiphy;
+ int band, ret = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+
+ ret = ath6kl_set_htcap(vif, band,
+ wiphy->bands[band]->ht_cap.ht_supported);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static bool ath6kl_is_p2p_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
+ pos[2] == 0x50 && pos[3] == 0x6f &&
+ pos[4] == 0x9a && pos[5] == 0x09;
+}
+
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+ const u8 *ies, size_t ies_len)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Filter out P2P IE(s) since they will be included depending on
+ * the Probe Request frame in ath6kl_send_go_probe_resp().
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!ath6kl_is_p2p_ie(pos)) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_RESP, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_set_ies(struct ath6kl_vif *vif,
+ struct cfg80211_beacon_data *info)
+{
+ struct ath6kl *ar = vif->ar;
+ int res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ if (res)
+ return res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
+ info->proberesp_ies_len);
+ if (res)
+ return res;
+
+ /* this also clears IE in fw if it's not set */
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
+ u8 *rsn_capab)
+{
+ const u8 *rsn_ie;
+ size_t rsn_ie_len;
+ u16 cnt;
+
+ if (!beacon->tail)
+ return -EINVAL;
+
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
+ if (!rsn_ie)
+ return -EINVAL;
+
+ rsn_ie_len = *(rsn_ie + 1);
+ /* skip element id and length */
+ rsn_ie += 2;
+
+ /* skip version */
+ if (rsn_ie_len < 2)
+ return -EINVAL;
+ rsn_ie += 2;
+ rsn_ie_len -= 2;
+
+ /* skip group cipher suite */
+ if (rsn_ie_len < 4)
+ return 0;
+ rsn_ie += 4;
+ rsn_ie_len -= 4;
+
+ /* skip pairwise cipher suite */
+ if (rsn_ie_len < 2)
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
+ rsn_ie += (2 + cnt * 4);
+ rsn_ie_len -= (2 + cnt * 4);
+
+ /* skip akm suite */
+ if (rsn_ie_len < 2)
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
+ rsn_ie += (2 + cnt * 4);
+ rsn_ie_len -= (2 + cnt * 4);
+
+ if (rsn_ie_len < 2)
+ return 0;
+
+ memcpy(rsn_capab, rsn_ie, 2);
+
+ return 0;
+}
+
+static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *info)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ieee80211_mgmt *mgmt;
+ bool hidden = false;
+ u8 *ies;
+ int ies_len;
+ struct wmi_connect_cmd p;
+ int res;
+ int i, ret;
+ u16 rsn_capab = 0;
+ int inactivity_timeout = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (vif->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ res = ath6kl_set_ies(vif, &info->beacon);
+
+ ar->ap_mode_bkey.valid = false;
+
+ ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
+ info->beacon_interval);
+
+ if (ret)
+ ath6kl_warn("Failed to set beacon interval: %d\n", ret);
+
+ ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
+ info->dtim_period);
+
+ /* ignore error, just print a warning and continue normally */
+ if (ret)
+ ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
+
+ if (info->beacon.head == NULL)
+ return -EINVAL;
+ mgmt = (struct ieee80211_mgmt *) info->beacon.head;
+ ies = mgmt->u.beacon.variable;
+ if (ies > info->beacon.head + info->beacon.head_len)
+ return -EINVAL;
+ ies_len = info->beacon.head + info->beacon.head_len - ies;
+
+ if (info->ssid == NULL)
+ return -EINVAL;
+ memcpy(vif->ssid, info->ssid, info->ssid_len);
+ vif->ssid_len = info->ssid_len;
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
+ hidden = true;
+
+ res = ath6kl_wmi_ap_hidden_ssid(ar->wmi, vif->fw_vif_idx, hidden);
+ if (res)
+ return res;
+
+ ret = ath6kl_set_auth_type(vif, info->auth_type);
+ if (ret)
+ return ret;
+
+ memset(&p, 0, sizeof(p));
+
+ for (i = 0; i < info->crypto.n_akm_suites; i++) {
+ switch (info->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_AUTH;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_PSK_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_PSK_AUTH;
+ break;
+ }
+ }
+ if (p.auth_mode == 0)
+ p.auth_mode = NONE_AUTH;
+ vif->auth_mode = p.auth_mode;
+
+ for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
+ switch (info->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.prwise_crypto_type |= WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.prwise_crypto_type |= TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.prwise_crypto_type |= AES_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.prwise_crypto_type |= WAPI_CRYPT;
+ break;
+ }
+ }
+ if (p.prwise_crypto_type == 0) {
+ p.prwise_crypto_type = NONE_CRYPT;
+ ath6kl_set_cipher(vif, 0, true);
+ } else if (info->crypto.n_ciphers_pairwise == 1) {
+ ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+ }
+
+ switch (info->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.grp_crypto_type = WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.grp_crypto_type = TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.grp_crypto_type = AES_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.grp_crypto_type = WAPI_CRYPT;
+ break;
+ default:
+ p.grp_crypto_type = NONE_CRYPT;
+ break;
+ }
+ ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
+
+ p.nw_type = AP_NETWORK;
+ vif->nw_type = vif->next_mode;
+
+ p.ssid_len = vif->ssid_len;
+ memcpy(p.ssid, vif->ssid, vif->ssid_len);
+ p.dot11_auth_mode = vif->dot11_auth_mode;
+ p.ch = cpu_to_le16(info->chandef.chan->center_freq);
+
+ /* Enable uAPSD support by default */
+ res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
+ if (res < 0)
+ return res;
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ p.nw_subtype = SUBTYPE_P2PGO;
+ } else {
+ /*
+ * Due to firmware limitation, it is not possible to
+ * do P2P mgmt operations in AP mode
+ */
+ p.nw_subtype = SUBTYPE_NONE;
+ }
+
+ if (info->inactivity_timeout) {
+ inactivity_timeout = info->inactivity_timeout;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+ ar->fw_capabilities))
+ inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
+ 60);
+
+ res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
+ inactivity_timeout);
+ if (res < 0)
+ return res;
+ }
+
+ if (ath6kl_set_htcap(vif, info->chandef.chan->band,
+ cfg80211_get_chandef_type(&info->chandef)
+ != NL80211_CHAN_NO_HT))
+ return -EIO;
+
+ /*
+ * Get the PTKSA replay counter in the RSN IE. Supplicant
+ * will use the RSN IE in M3 message and firmware has to
+ * advertise the same in beacon/probe response. Send
+ * the complete RSN IE capability field to firmware
+ */
+ if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities)) {
+ res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
+ WLAN_EID_RSN, WMI_RSN_IE_CAPB,
+ (const u8 *) &rsn_capab,
+ sizeof(rsn_capab));
+ vif->rsn_capab = rsn_capab;
+ if (res < 0)
+ return res;
+ }
+
+ memcpy(&vif->profile, &p, sizeof(p));
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *beacon)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (vif->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ return ath6kl_set_ies(vif, beacon);
+}
+
+static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ if (vif->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+ if (!test_bit(CONNECTED, &vif->flags))
+ return -ENOTCONN;
+
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+ clear_bit(CONNECTED, &vif->flags);
+
+ /* Restore ht setting in firmware */
+ return ath6kl_restore_htcap(vif);
+}
+
+static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
+ struct station_del_parameters *params)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ const u8 *addr = params->mac ? params->mac : bcast_addr;
+
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, WMI_AP_DEAUTH,
+ addr, WLAN_REASON_PREV_AUTH_NOT_VALID);
+}
+
+static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ int err;
+
+ if (vif->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ err = cfg80211_check_station_change(wiphy, params,
+ CFG80211_STA_AP_MLME_CLIENT);
+ if (err)
+ return err;
+
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_AUTHORIZE, mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_UNAUTHORIZE, mac, 0);
+}
+
+static int ath6kl_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
+ u32 id;
+
+ /* TODO: if already pending or ongoing remain-on-channel,
+ * return -EBUSY */
+ id = ++vif->last_roc_id;
+ if (id == 0) {
+ /* Do not use 0 as the cookie value */
+ id = ++vif->last_roc_id;
+ }
+ *cookie = id;
+
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+ chan->center_freq, duration);
+}
+
+static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
+
+ if (cookie != vif->last_roc_id)
+ return -ENOENT;
+ vif->last_cancel_roc_id = cookie;
+
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
+}
+
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+ const u8 *buf, size_t len,
+ unsigned int freq)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *p2p;
+ int p2p_len;
+ int ret;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+
+ /* Include P2P IE(s) from the frame generated in user space. */
+
+ p2p = kmalloc(len, GFP_KERNEL);
+ if (p2p == NULL)
+ return -ENOMEM;
+ p2p_len = 0;
+
+ pos = mgmt->u.probe_resp.variable;
+ while (pos + 1 < buf + len) {
+ if (pos + 2 + pos[1] > buf + len)
+ break;
+ if (ath6kl_is_p2p_ie(pos)) {
+ memcpy(p2p + p2p_len, pos, 2 + pos[1]);
+ p2p_len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+ mgmt->da, p2p, p2p_len);
+ kfree(p2p);
+ return ret;
+}
+
+static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
+ u32 id,
+ u32 freq,
+ u32 wait,
+ const u8 *buf,
+ size_t len,
+ bool *more_data,
+ bool no_cck)
+{
+ struct ieee80211_mgmt *mgmt;
+ struct ath6kl_sta *conn;
+ bool is_psq_empty = false;
+ struct ath6kl_mgmt_buff *mgmt_buf;
+ size_t mgmt_buf_size;
+ struct ath6kl *ar = vif->ar;
+
+ mgmt = (struct ieee80211_mgmt *) buf;
+ if (is_multicast_ether_addr(mgmt->da))
+ return false;
+
+ conn = ath6kl_find_sta(vif, mgmt->da);
+ if (!conn)
+ return false;
+
+ if (conn->sta_flags & STA_PS_SLEEP) {
+ if (!(conn->sta_flags & STA_PS_POLLED)) {
+ /* Queue the frames if the STA is sleeping */
+ mgmt_buf_size = len + sizeof(struct ath6kl_mgmt_buff);
+ mgmt_buf = kmalloc(mgmt_buf_size, GFP_KERNEL);
+ if (!mgmt_buf)
+ return false;
+
+ INIT_LIST_HEAD(&mgmt_buf->list);
+ mgmt_buf->id = id;
+ mgmt_buf->freq = freq;
+ mgmt_buf->wait = wait;
+ mgmt_buf->len = len;
+ mgmt_buf->no_cck = no_cck;
+ memcpy(mgmt_buf->buf, buf, len);
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq) &&
+ (conn->mgmt_psq_len == 0);
+ list_add_tail(&mgmt_buf->list, &conn->mgmt_psq);
+ conn->mgmt_psq_len++;
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 1);
+ return true;
+ }
+
+ /*
+ * This tx is because of a PsPoll.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq) || (conn->mgmt_psq_len != 0))
+ *more_data = true;
+ spin_unlock_bh(&conn->psq_lock);
+ }
+
+ return false;
+}
+
+/* Check if SSID length is greater than DIRECT- */
+static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
+{
+ const struct ieee80211_mgmt *mgmt;
+ mgmt = (const struct ieee80211_mgmt *) buf;
+
+ /* variable[1] contains the SSID tag length */
+ if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
+ (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
+ return true;
+ }
+
+ return false;
+}
+
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ unsigned int wait = params->wait;
+ bool no_cck = params->no_cck;
+ u32 id, freq;
+ const struct ieee80211_mgmt *mgmt;
+ bool more_data, queued;
+
+ /* default to the current channel, but use the one specified as argument
+ * if any
+ */
+ freq = vif->ch_hint;
+ if (chan)
+ freq = chan->center_freq;
+
+ /* never send freq zero to the firmware */
+ if (WARN_ON(freq == 0))
+ return -EINVAL;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
+ ieee80211_is_probe_resp(mgmt->frame_control) &&
+ ath6kl_is_p2p_go_ssid(buf, len)) {
+ /*
+ * Send Probe Response frame in GO mode using a separate WMI
+ * command to allow the target to fill in the generic IEs.
+ */
+ *cookie = 0; /* TX status not supported */
+ return ath6kl_send_go_probe_resp(vif, buf, len, freq);
+ }
+
+ id = vif->send_action_id++;
+ if (id == 0) {
+ /*
+ * 0 is a reserved value in the WMI command and shall not be
+ * used for the command.
+ */
+ id = vif->send_action_id++;
+ }
+
+ *cookie = id;
+
+ /* AP mode Power saving processing */
+ if (vif->nw_type == AP_NETWORK) {
+ queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len,
+ &more_data, no_cck);
+ if (queued)
+ return 0;
+ }
+
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq,
+ wait, buf, len, no_cck);
+}
+
+static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
+ __func__, frame_type, reg);
+ if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
+ /*
+ * Note: This notification callback is not allowed to sleep, so
+ * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
+ * hardcode target to report Probe Request frames all the time.
+ */
+ vif->probe_req_report = reg;
+ }
+}
+
+static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u16 interval;
+ int ret, rssi_thold;
+ int n_match_sets = request->n_match_sets;
+
+ /*
+ * If there's a matchset w/o an SSID, then assume it's just for
+ * the RSSI (nothing else is currently supported) and ignore it.
+ * The device only supports a global RSSI filter that we set below.
+ */
+ if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+ n_match_sets = 0;
+
+ if (ar->state != ATH6KL_STATE_ON)
+ return -EIO;
+
+ if (vif->sme_state != SME_DISCONNECTED)
+ return -EBUSY;
+
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids,
+ request->match_sets,
+ n_match_sets);
+ if (ret < 0)
+ return ret;
+
+ if (!n_match_sets) {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ MATCHED_SSID_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+ ar->fw_capabilities)) {
+ if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ rssi_thold = 0;
+ else if (request->min_rssi_thold < -127)
+ rssi_thold = -127;
+ else
+ rssi_thold = request->min_rssi_thold;
+
+ ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
+ rssi_thold);
+ if (ret) {
+ ath6kl_err("failed to set RSSI threshold for scan\n");
+ return ret;
+ }
+ }
+
+ /* fw uses seconds, also make sure that it's >0 */
+ interval = max_t(u16, 1, request->interval / 1000);
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ interval, interval,
+ vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
+
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true);
+ if (ret)
+ return ret;
+
+ set_bit(SCHED_SCANNING, &vif->flags);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return -EIO;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
+ mask);
+}
+
+static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ if (vif->nw_type != INFRA_NETWORK ||
+ !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ if (vif->sme_state != SME_CONNECTED)
+ return -ENOTCONN;
+
+ /* save this since the firmware won't report the interval */
+ vif->txe_intvl = intvl;
+
+ return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx,
+ rate, pkts, intvl);
+}
+
+static const struct ieee80211_txrx_stypes
+ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .add_virtual_intf = ath6kl_cfg80211_add_iface,
+ .del_virtual_intf = ath6kl_cfg80211_del_iface,
+ .change_virtual_intf = ath6kl_cfg80211_change_iface,
+ .scan = ath6kl_cfg80211_scan,
+ .connect = ath6kl_cfg80211_connect,
+ .disconnect = ath6kl_cfg80211_disconnect,
+ .add_key = ath6kl_cfg80211_add_key,
+ .get_key = ath6kl_cfg80211_get_key,
+ .del_key = ath6kl_cfg80211_del_key,
+ .set_default_key = ath6kl_cfg80211_set_default_key,
+ .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
+ .set_tx_power = ath6kl_cfg80211_set_txpower,
+ .get_tx_power = ath6kl_cfg80211_get_txpower,
+ .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
+ .join_ibss = ath6kl_cfg80211_join_ibss,
+ .leave_ibss = ath6kl_cfg80211_leave_ibss,
+ .get_station = ath6kl_get_station,
+ .set_pmksa = ath6kl_set_pmksa,
+ .del_pmksa = ath6kl_del_pmksa,
+ .flush_pmksa = ath6kl_flush_pmksa,
+ CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
+#ifdef CONFIG_PM
+ .suspend = __ath6kl_cfg80211_suspend,
+ .resume = __ath6kl_cfg80211_resume,
+#endif
+ .start_ap = ath6kl_start_ap,
+ .change_beacon = ath6kl_change_beacon,
+ .stop_ap = ath6kl_stop_ap,
+ .del_station = ath6kl_del_station,
+ .change_station = ath6kl_change_station,
+ .remain_on_channel = ath6kl_remain_on_channel,
+ .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
+ .mgmt_tx = ath6kl_mgmt_tx,
+ .mgmt_frame_register = ath6kl_mgmt_frame_register,
+ .sched_scan_start = ath6kl_cfg80211_sscan_start,
+ .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
+ .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
+ .set_cqm_txe_config = ath6kl_cfg80211_set_txe_config,
+};
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
+{
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ switch (vif->sme_state) {
+ case SME_DISCONNECTED:
+ break;
+ case SME_CONNECTING:
+ cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case SME_CONNECTED:
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ break;
+ }
+
+ if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+ (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)))
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
+
+ vif->sme_state = SME_DISCONNECTED;
+ clear_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+
+ /* Stop netdev queues, needed during recovery */
+ netif_stop_queue(vif->ndev);
+ netif_carrier_off(vif->ndev);
+
+ /* disable scanning */
+ if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+ ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
+ ath6kl_warn("failed to disable scan during stop\n");
+
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+}
+
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif && ar->state != ATH6KL_STATE_RECOVERY) {
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+ ath6kl_warn("ath6kl_deep_sleep_enable: wmi_powermode_cmd failed\n");
+ return;
+ }
+
+ /*
+ * FIXME: we should take ar->list_lock to protect changes in the
+ * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
+ * sleeps.
+ */
+ list_for_each_entry(vif, &ar->vif_list, list)
+ ath6kl_cfg80211_stop(vif);
+}
+
+static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ u32 rates[IEEE80211_NUM_BANDS];
+ int ret, i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+ request->alpha2[0], request->alpha2[1],
+ request->intersect ? " intersect" : "",
+ request->processed ? " processed" : "",
+ request->initiator, request->user_reg_hint_type);
+
+ if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
+ return;
+
+ ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
+ if (ret) {
+ ath6kl_err("failed to set regdomain: %d\n", ret);
+ return;
+ }
+
+ /*
+ * Firmware will apply the regdomain change only after a scan is
+ * issued and it will send a WMI_REGDOMAIN_EVENTID when it has been
+ * changed.
+ */
+
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ if (wiphy->bands[i])
+ rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+
+
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false,
+ false, 0, ATH6KL_FG_SCAN_INTERVAL,
+ 0, NULL, false, rates);
+ if (ret) {
+ ath6kl_err("failed to start scan for a regdomain change: %d\n",
+ ret);
+ return;
+ }
+}
+
+static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
+{
+ vif->aggr_cntxt = aggr_init(vif);
+ if (!vif->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ return -ENOMEM;
+ }
+
+ setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) vif->ndev);
+ setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
+ (unsigned long) vif);
+
+ set_bit(WMM_ENABLED, &vif->flags);
+ spin_lock_init(&vif->if_lock);
+
+ INIT_LIST_HEAD(&vif->mc_filter);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
+{
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(vif->ndev);
+
+ clear_bit(WLAN_ENABLED, &vif->flags);
+
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
+
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+}
+
+void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ struct ath6kl_mc_filter *mc_filter, *tmp;
+
+ aggr_module_destroy(vif->aggr_cntxt);
+
+ ar->avail_idx_map |= BIT(vif->fw_vif_idx);
+
+ if (vif->nw_type == ADHOC_NETWORK)
+ ar->ibss_if_active = false;
+
+ list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
+ list_del(&mc_filter->list);
+ kfree(mc_filter);
+ }
+
+ unregister_netdevice(vif->ndev);
+
+ ar->num_vif--;
+}
+
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type)
+{
+ struct net_device *ndev;
+ struct ath6kl_vif *vif;
+
+ ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup);
+ if (!ndev)
+ return NULL;
+
+ vif = netdev_priv(ndev);
+ ndev->ieee80211_ptr = &vif->wdev;
+ vif->wdev.wiphy = ar->wiphy;
+ vif->ar = ar;
+ vif->ndev = ndev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+ vif->wdev.netdev = ndev;
+ vif->wdev.iftype = type;
+ vif->fw_vif_idx = fw_vif_idx;
+ vif->nw_type = nw_type;
+ vif->next_mode = nw_type;
+ vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
+ vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
+ vif->bg_scan_period = 0;
+ vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
+ vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
+
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+ if (fw_vif_idx != 0) {
+ ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+ 0x2;
+ if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+ ar->fw_capabilities))
+ ndev->dev_addr[4] ^= 0x80;
+ }
+
+ init_netdev(ndev);
+
+ ath6kl_init_control_info(vif);
+
+ if (ath6kl_cfg80211_vif_init(vif))
+ goto err;
+
+ if (register_netdevice(ndev))
+ goto err;
+
+ ar->avail_idx_map &= ~BIT(fw_vif_idx);
+ vif->sme_state = SME_DISCONNECTED;
+ set_bit(WLAN_ENABLED, &vif->flags);
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+
+ if (type == NL80211_IFTYPE_ADHOC)
+ ar->ibss_if_active = true;
+
+ spin_lock_bh(&ar->list_lock);
+ list_add_tail(&vif->list, &ar->vif_list);
+ spin_unlock_bh(&ar->list_lock);
+
+ return &vif->wdev;
+
+err:
+ aggr_module_destroy(vif->aggr_cntxt);
+ free_netdev(ndev);
+ return NULL;
+}
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support ath6kl_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE,
+ .n_patterns = WOW_MAX_FILTERS_PER_LIST,
+ .pattern_min_len = 1,
+ .pattern_max_len = WOW_PATTERN_SIZE,
+};
+#endif
+
+int ath6kl_cfg80211_init(struct ath6kl *ar)
+{
+ struct wiphy *wiphy = ar->wiphy;
+ bool band_2gig = false, band_5gig = false, ht = false;
+ int ret;
+
+ wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wiphy->max_remain_on_channel_duration = 5000;
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wiphy, ar->dev);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ if (ar->p2p) {
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+ }
+
+ if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+ test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
+ wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
+ ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
+ }
+
+ /* max num of ssids that can be probed during scanning */
+ wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
+
+ /* max num of ssids that can be matched after scan */
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+ ar->fw_capabilities))
+ wiphy->max_match_sets = MAX_PROBED_SSIDS;
+
+ wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ switch (ar->hw.cap) {
+ case WMI_11AN_CAP:
+ ht = true;
+ case WMI_11A_CAP:
+ band_5gig = true;
+ break;
+ case WMI_11GN_CAP:
+ ht = true;
+ case WMI_11G_CAP:
+ band_2gig = true;
+ break;
+ case WMI_11AGN_CAP:
+ ht = true;
+ case WMI_11AG_CAP:
+ band_2gig = true;
+ band_5gig = true;
+ break;
+ default:
+ ath6kl_err("invalid phy capability!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Even if the fw has HT support, advertise HT cap only when
+ * the firmware has support to override RSN capability, otherwise
+ * 4-way handshake would fail.
+ */
+ if (!(ht &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities))) {
+ ath6kl_band_2ghz.ht_cap.cap = 0;
+ ath6kl_band_2ghz.ht_cap.ht_supported = false;
+ ath6kl_band_5ghz.ht_cap.cap = 0;
+ ath6kl_band_5ghz.ht_cap.ht_supported = false;
+ }
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+ ar->fw_capabilities)) {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ } else {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ }
+
+ if (band_2gig)
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ if (band_5gig)
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+#ifdef CONFIG_PM
+ wiphy->wowlan = &ath6kl_wowlan_support;
+#endif
+
+ wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
+
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+ WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
+ ar->fw_capabilities))
+ ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
+
+ ar->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ ret = wiphy_register(wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ return ret;
+ }
+
+ ar->wiphy_registered = true;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_cleanup(struct ath6kl *ar)
+{
+ wiphy_unregister(ar->wiphy);
+
+ ar->wiphy_registered = false;
+}
+
+struct ath6kl *ath6kl_cfg80211_create(void)
+{
+ struct ath6kl *ar;
+ struct wiphy *wiphy;
+
+ /* create a new wiphy for use with cfg80211 */
+ wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+ if (!wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ return NULL;
+ }
+
+ ar = wiphy_priv(wiphy);
+ ar->wiphy = wiphy;
+
+ return ar;
+}
+
+/* Note: ar variable must not be accessed after calling this! */
+void ath6kl_cfg80211_destroy(struct ath6kl *ar)
+{
+ int i;
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++)
+ kfree(ar->sta_list[i].aggr_conn);
+
+ wiphy_free(ar->wiphy);
+}
+
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
new file mode 100644
index 000000000..5aa57a763
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH6KL_CFG80211_H
+#define ATH6KL_CFG80211_H
+
+enum ath6kl_cfg_suspend_mode {
+ ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ ATH6KL_CFG_SUSPEND_CUTPOWER,
+ ATH6KL_CFG_SUSPEND_WOW,
+};
+
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode);
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason);
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
+ bool ismcast);
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif);
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+
+int ath6kl_cfg80211_init(struct ath6kl *ar);
+void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
+
+struct ath6kl *ath6kl_cfg80211_create(void);
+void ath6kl_cfg80211_destroy(struct ath6kl *ar);
+
+#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
new file mode 100644
index 000000000..4f82e8632
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include <linux/netdevice.h>
+
+#define ATH6KL_MAX_IE 256
+
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
+
+/*
+ * Reflects the version of binary interface exposed by ATH6KL target
+ * firmware. Needs to be incremented by 1 for any change in the firmware
+ * that requires upgrade of the driver on the host side for the change to
+ * work correctly
+ */
+#define ATH6KL_ABI_VERSION 1
+
+#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
+
+enum {
+ SIGNAL_QUALITY_METRICS_SNR = 0,
+ SIGNAL_QUALITY_METRICS_RSSI,
+ SIGNAL_QUALITY_METRICS_ALL,
+};
+
+/*
+ * Data Path
+ */
+
+#define WMI_MAX_TX_DATA_FRAME_LENGTH \
+ (1500 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
+#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
+ (3840 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+#define EPPING_ALIGNMENT_PAD \
+ (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
+ - sizeof(struct htc_frame_hdr))
+
+struct ath6kl_llc_snap_hdr {
+ u8 dsap;
+ u8 ssap;
+ u8 cntl;
+ u8 org_code[3];
+ __be16 eth_type;
+} __packed;
+
+enum crypto_type {
+ NONE_CRYPT = 0x01,
+ WEP_CRYPT = 0x02,
+ TKIP_CRYPT = 0x04,
+ AES_CRYPT = 0x08,
+ WAPI_CRYPT = 0x10,
+};
+
+struct htc_endpoint_credit_dist;
+struct ath6kl;
+struct ath6kl_htcap;
+enum htc_credit_dist_reason;
+struct ath6kl_htc_credit_info;
+
+struct sk_buff *ath6kl_buf_alloc(int size);
+#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
new file mode 100644
index 000000000..4ec02cea0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include "debug.h"
+#include "hif-ops.h"
+#include "htc-ops.h"
+#include "cfg80211.h"
+
+unsigned int debug_mask;
+static unsigned int suspend_mode;
+static unsigned int wow_mode;
+static unsigned int uart_debug;
+static unsigned int ath6kl_p2p;
+static unsigned int testmode;
+static unsigned int recovery_enable;
+static unsigned int heart_beat_poll;
+
+module_param(debug_mask, uint, 0644);
+module_param(suspend_mode, uint, 0644);
+module_param(wow_mode, uint, 0644);
+module_param(uart_debug, uint, 0644);
+module_param(ath6kl_p2p, uint, 0644);
+module_param(testmode, uint, 0644);
+module_param(recovery_enable, uint, 0644);
+module_param(heart_beat_poll, uint, 0644);
+MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
+MODULE_PARM_DESC(heart_beat_poll,
+ "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
+
+void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
+{
+ ath6kl_htc_tx_complete(ar, skb);
+}
+EXPORT_SYMBOL(ath6kl_core_tx_complete);
+
+void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
+{
+ ath6kl_htc_rx_complete(ar, skb, pipe);
+}
+EXPORT_SYMBOL(ath6kl_core_rx_complete);
+
+int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
+{
+ struct ath6kl_bmi_target_info targ_info;
+ struct wireless_dev *wdev;
+ int ret = 0, i;
+
+ switch (htc_type) {
+ case ATH6KL_HTC_TYPE_MBOX:
+ ath6kl_htc_mbox_attach(ar);
+ break;
+ case ATH6KL_HTC_TYPE_PIPE:
+ ath6kl_htc_pipe_attach(ar);
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
+ if (!ar->ath6kl_wq)
+ return -ENOMEM;
+
+ ret = ath6kl_bmi_init(ar);
+ if (ret)
+ goto err_wq;
+
+ /*
+ * Turn on power to get hardware (target) version and leave power
+ * on delibrately as we will boot the hardware anyway within few
+ * seconds.
+ */
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_power_off;
+
+ ar->version.target_ver = le32_to_cpu(targ_info.version);
+ ar->target_type = le32_to_cpu(targ_info.type);
+ ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
+
+ ret = ath6kl_init_hw_params(ar);
+ if (ret)
+ goto err_power_off;
+
+ ar->htc_target = ath6kl_htc_create(ar);
+
+ if (!ar->htc_target) {
+ ret = -ENOMEM;
+ goto err_power_off;
+ }
+
+ ar->testmode = testmode;
+
+ ret = ath6kl_init_fetch_firmwares(ar);
+ if (ret)
+ goto err_htc_cleanup;
+
+ /* FIXME: we should free all firmwares in the error cases below */
+
+ /*
+ * Backwards compatibility support for older ar6004 firmware images
+ * which do not set these feature flags.
+ */
+ if (ar->target_type == TARGET_TYPE_AR6004 &&
+ ar->fw_api <= 4) {
+ __set_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+ ar->fw_capabilities);
+ __set_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+ ar->fw_capabilities);
+
+ if (ar->hw.id == AR6004_HW_1_3_VERSION)
+ __set_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+ ar->fw_capabilities);
+ }
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ ret = -EIO;
+ goto err_htc_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ ath6kl_cookie_init(ar);
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ if (suspend_mode &&
+ suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
+ suspend_mode <= WLAN_POWER_STATE_WOW)
+ ar->suspend_mode = suspend_mode;
+ else
+ ar->suspend_mode = 0;
+
+ if (suspend_mode == WLAN_POWER_STATE_WOW &&
+ (wow_mode == WLAN_POWER_STATE_CUT_PWR ||
+ wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
+ ar->wow_suspend_mode = wow_mode;
+ else
+ ar->wow_suspend_mode = 0;
+
+ if (uart_debug)
+ ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
+
+ set_bit(FIRST_BOOT, &ar->flag);
+
+ ath6kl_debug_init(ar);
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_err("Failed to start hardware: %d\n", ret);
+ goto err_rxbuf_cleanup;
+ }
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ ret = ath6kl_cfg80211_init(ar);
+ if (ret)
+ goto err_rxbuf_cleanup;
+
+ ret = ath6kl_debug_init_fs(ar);
+ if (ret) {
+ wiphy_unregister(ar->wiphy);
+ goto err_rxbuf_cleanup;
+ }
+
+ for (i = 0; i < ar->vif_max; i++)
+ ar->avail_idx_map |= BIT(i);
+
+ rtnl_lock();
+
+ /* Add an initial station interface */
+ wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM,
+ NL80211_IFTYPE_STATION, 0, INFRA_NETWORK);
+
+ rtnl_unlock();
+
+ if (!wdev) {
+ ath6kl_err("Failed to instantiate a network device\n");
+ ret = -ENOMEM;
+ wiphy_unregister(ar->wiphy);
+ goto err_rxbuf_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
+ __func__, wdev->netdev->name, wdev->netdev, ar);
+
+ ar->fw_recovery.enable = !!recovery_enable;
+ if (!ar->fw_recovery.enable)
+ return ret;
+
+ if (heart_beat_poll &&
+ test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+ ar->fw_capabilities))
+ ar->fw_recovery.hb_poll = heart_beat_poll;
+
+ ath6kl_recovery_init(ar);
+
+ return ret;
+
+err_rxbuf_cleanup:
+ ath6kl_debug_cleanup(ar);
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+err_htc_cleanup:
+ ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+ ath6kl_hif_power_off(ar);
+err_bmi_cleanup:
+ ath6kl_bmi_cleanup(ar);
+err_wq:
+ destroy_workqueue(ar->ath6kl_wq);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath6kl_core_init);
+
+struct ath6kl *ath6kl_core_create(struct device *dev)
+{
+ struct ath6kl *ar;
+ u8 ctr;
+
+ ar = ath6kl_cfg80211_create();
+ if (!ar)
+ return NULL;
+
+ ar->p2p = !!ath6kl_p2p;
+ ar->dev = dev;
+
+ ar->vif_max = 1;
+
+ ar->max_norm_iface = 1;
+
+ spin_lock_init(&ar->lock);
+ spin_lock_init(&ar->mcastpsq_lock);
+ spin_lock_init(&ar->list_lock);
+
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ INIT_LIST_HEAD(&ar->vif_list);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ clear_bit(SKIP_SCAN, &ar->flag);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ ar->tx_pwr = 0;
+ ar->intra_bss = 1;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ skb_queue_head_init(&ar->sta_list[ctr].apsdq);
+ ar->sta_list[ctr].mgmt_psq_len = 0;
+ INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq);
+ ar->sta_list[ctr].aggr_conn =
+ kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
+ if (!ar->sta_list[ctr].aggr_conn) {
+ ath6kl_err("Failed to allocate memory for sta aggregation information\n");
+ ath6kl_core_destroy(ar);
+ return NULL;
+ }
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+ return ar;
+}
+EXPORT_SYMBOL(ath6kl_core_create);
+
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+ ath6kl_hif_power_off(ar);
+
+ ath6kl_recovery_cleanup(ar);
+
+ destroy_workqueue(ar->ath6kl_wq);
+
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
+
+ ath6kl_cookie_cleanup(ar);
+
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+
+ ath6kl_bmi_cleanup(ar);
+
+ ath6kl_debug_cleanup(ar);
+
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ vfree(ar->fw);
+ kfree(ar->fw_patch);
+ kfree(ar->fw_testscript);
+
+ ath6kl_cfg80211_cleanup(ar);
+}
+EXPORT_SYMBOL(ath6kl_core_cleanup);
+
+void ath6kl_core_destroy(struct ath6kl *ar)
+{
+ ath6kl_cfg80211_destroy(ar);
+}
+EXPORT_SYMBOL(ath6kl_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
new file mode 100644
index 000000000..3b5460e4f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CORE_H
+#define CORE_H
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/circ_buf.h>
+#include <net/cfg80211.h>
+#include "htc.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "target.h"
+
+#define MAX_ATH6KL 1
+#define ATH6KL_MAX_RX_BUFFERS 16
+#define ATH6KL_BUFFER_SIZE 1664
+#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
+#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
+#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
+#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
+#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
+
+#define USER_SAVEDKEYS_STAT_INIT 0
+#define USER_SAVEDKEYS_STAT_RUN 1
+
+#define ATH6KL_TX_TIMEOUT 10
+#define ATH6KL_MAX_ENDPOINTS 4
+#define MAX_NODE_NUM 15
+
+#define ATH6KL_APSD_ALL_FRAME 0xFFFF
+#define ATH6KL_APSD_NUM_OF_AC 0x4
+#define ATH6KL_APSD_FRAME_MASK 0xF
+
+/* Extra bytes for htc header alignment */
+#define ATH6KL_HTC_ALIGN_BYTES 3
+
+/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
+#define MAX_DEF_COOKIE_NUM 180
+#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
+#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
+
+#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
+
+#define DISCON_TIMER_INTVAL 10000 /* in msec */
+
+/* Channel dwell time in fg scan */
+#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */
+
+/* includes also the null byte */
+#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
+
+enum ath6kl_fw_ie_type {
+ ATH6KL_FW_IE_FW_VERSION = 0,
+ ATH6KL_FW_IE_TIMESTAMP = 1,
+ ATH6KL_FW_IE_OTP_IMAGE = 2,
+ ATH6KL_FW_IE_FW_IMAGE = 3,
+ ATH6KL_FW_IE_PATCH_IMAGE = 4,
+ ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
+ ATH6KL_FW_IE_CAPABILITIES = 6,
+ ATH6KL_FW_IE_PATCH_ADDR = 7,
+ ATH6KL_FW_IE_BOARD_ADDR = 8,
+ ATH6KL_FW_IE_VIF_MAX = 9,
+};
+
+enum ath6kl_fw_capability {
+ ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
+
+ /*
+ * Firmware is capable of supporting P2P mgmt operations on a
+ * station interface. After group formation, the station
+ * interface will become a P2P client/GO interface as the case may be
+ */
+ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+
+ /*
+ * Firmware has support to cleanup inactive stations
+ * in AP mode.
+ */
+ ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
+
+ /* Firmware has support to override rsn cap of rsn ie */
+ ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+
+ /*
+ * Multicast support in WOW and host awake mode.
+ * Allow all multicast in host awake mode.
+ * Apply multicast filter in WOW mode.
+ */
+ ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+
+ /* Firmware supports enhanced bmiss detection */
+ ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+
+ /*
+ * FW supports matching of ssid in schedule scan
+ */
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+
+ /* Firmware supports filtering BSS results by RSSI */
+ ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+
+ /* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */
+ ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+
+ /* Firmware supports TX error rate notification */
+ ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+
+ /* supports WMI_SET_REGDOMAIN_CMDID command */
+ ATH6KL_FW_CAPABILITY_REGDOMAIN,
+
+ /* Firmware supports sched scan decoupled from host sleep */
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2,
+
+ /*
+ * Firmware capability for hang detection through heart beat
+ * challenge messages.
+ */
+ ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+
+ /* WMI_SET_TX_SELECT_RATES_CMDID uses 64 bit size rate table */
+ ATH6KL_FW_CAPABILITY_64BIT_RATES,
+
+ /* WMI_AP_CONN_INACT_CMDID uses minutes as units */
+ ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+
+ /* use low priority endpoint for all data */
+ ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+
+ /* ratetable is the 2 stream version (max MCS15) */
+ ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
+
+ /* firmare doesn't support IP checksumming */
+ ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
+
+ /* this needs to be last */
+ ATH6KL_FW_CAPABILITY_MAX,
+};
+
+#define ATH6KL_CAPABILITY_LEN (ALIGN(ATH6KL_FW_CAPABILITY_MAX, 32) / 32)
+
+struct ath6kl_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath6kl_hw_flags {
+ ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3),
+};
+
+#define ATH6KL_FW_API2_FILE "/*(DEBLOBBED)*/"
+#define ATH6KL_FW_API3_FILE "/*(DEBLOBBED)*/"
+#define ATH6KL_FW_API4_FILE "/*(DEBLOBBED)*/"
+#define ATH6KL_FW_API5_FILE "/*(DEBLOBBED)*/"
+
+/* AR6003 1.0 definitions */
+#define AR6003_HW_1_0_VERSION 0x300002ba
+
+/* AR6003 2.0 definitions */
+#define AR6003_HW_2_0_VERSION 0x30000384
+#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0"
+#define AR6003_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_0_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_0_PATCH_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
+ AR6003_HW_2_0_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6003 3.0 definitions */
+#define AR6003_HW_2_1_1_VERSION 0x30000582
+#define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1"
+#define AR6003_HW_2_1_1_OTP_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_PATCH_FILE "/*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
+ AR6003_HW_2_1_1_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6004 1.0 definitions */
+#define AR6004_HW_1_0_VERSION 0x30000623
+#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
+#define AR6004_HW_1_0_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
+ AR6004_HW_1_0_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6004 1.1 definitions */
+#define AR6004_HW_1_1_VERSION 0x30000001
+#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
+#define AR6004_HW_1_1_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
+ AR6004_HW_1_1_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6004 1.2 definitions */
+#define AR6004_HW_1_2_VERSION 0x300007e8
+#define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2"
+#define AR6004_HW_1_2_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
+ AR6004_HW_1_2_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6004 1.3 definitions */
+#define AR6004_HW_1_3_VERSION 0x31c8088a
+#define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3"
+#define AR6004_HW_1_3_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_3_TCMD_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_3_UTF_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_3_TESTSCRIPT_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_1_3_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "//*(DEBLOBBED)*/"
+
+/* AR6004 3.0 definitions */
+#define AR6004_HW_3_0_VERSION 0x31C809F8
+#define AR6004_HW_3_0_FW_DIR "ath6k/AR6004/hw3.0"
+#define AR6004_HW_3_0_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_3_0_TCMD_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_3_0_UTF_FIRMWARE_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_3_0_TESTSCRIPT_FILE "/*(DEBLOBBED)*/"
+#define AR6004_HW_3_0_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "//*(DEBLOBBED)*/"
+#define AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "//*(DEBLOBBED)*/"
+
+/* Per STA data, used in AP mode */
+#define STA_PS_AWAKE BIT(0)
+#define STA_PS_SLEEP BIT(1)
+#define STA_PS_POLLED BIT(2)
+#define STA_PS_APSD_TRIGGER BIT(3)
+#define STA_PS_APSD_EOSP BIT(4)
+
+/* HTC TX packet tagging definitions */
+#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
+#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
+
+#define AR6003_CUST_DATA_SIZE 16
+
+#define AGGR_WIN_IDX(x, y) ((x) % (y))
+#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
+#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
+#define ATH6KL_MAX_SEQ_NO 0xFFF
+#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
+
+#define NUM_OF_TIDS 8
+#define AGGR_SZ_DEFAULT 8
+
+#define AGGR_WIN_SZ_MIN 2
+#define AGGR_WIN_SZ_MAX 8
+
+#define TID_WINDOW_SZ(_x) ((_x) << 1)
+
+#define AGGR_NUM_OF_FREE_NETBUFS 16
+
+#define AGGR_RX_TIMEOUT 100 /* in ms */
+
+#define WMI_TIMEOUT (2 * HZ)
+
+#define MBOX_YIELD_LIMIT 99
+
+#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */
+#define ATH6KL_DEFAULT_BMISS_TIME 1500
+#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */
+#define ATH6KL_MAX_BMISS_TIME 5000
+
+/* configuration lags */
+/*
+ * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
+ * ERP IE of beacon to determine the short premable support when
+ * sending (Re)Assoc req.
+ * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
+ * module state transition failure events which happen during
+ * scan, to the host.
+ */
+#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
+#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
+#define ATH6KL_CONF_ENABLE_11N BIT(2)
+#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+#define ATH6KL_CONF_UART_DEBUG BIT(4)
+
+#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */
+
+enum wlan_low_pwr_state {
+ WLAN_POWER_STATE_ON,
+ WLAN_POWER_STATE_CUT_PWR,
+ WLAN_POWER_STATE_DEEP_SLEEP,
+ WLAN_POWER_STATE_WOW
+};
+
+enum sme_state {
+ SME_DISCONNECTED,
+ SME_CONNECTING,
+ SME_CONNECTED
+};
+
+struct skb_hold_q {
+ struct sk_buff *skb;
+ bool is_amsdu;
+ u16 seq_no;
+};
+
+struct rxtid {
+ bool aggr;
+ bool timer_mon;
+ u16 win_sz;
+ u16 seq_next;
+ u32 hold_q_sz;
+ struct skb_hold_q *hold_q;
+ struct sk_buff_head q;
+
+ /*
+ * lock mainly protects seq_next and hold_q. Movement of seq_next
+ * needs to be protected between aggr_timeout() and
+ * aggr_process_recv_frm(). hold_q will be holding the pending
+ * reorder frames and it's access should also be protected.
+ * Some of the other fields like hold_q_sz, win_sz and aggr are
+ * initialized/reset when receiving addba/delba req, also while
+ * deleting aggr state all the pending buffers are flushed before
+ * resetting these fields, so there should not be any race in accessing
+ * these fields.
+ */
+ spinlock_t lock;
+};
+
+struct rxtid_stats {
+ u32 num_into_aggr;
+ u32 num_dups;
+ u32 num_oow;
+ u32 num_mpdu;
+ u32 num_amsdu;
+ u32 num_delivered;
+ u32 num_timeouts;
+ u32 num_hole;
+ u32 num_bar;
+};
+
+struct aggr_info_conn {
+ u8 aggr_sz;
+ u8 timer_scheduled;
+ struct timer_list timer;
+ struct net_device *dev;
+ struct rxtid rx_tid[NUM_OF_TIDS];
+ struct rxtid_stats stat[NUM_OF_TIDS];
+ struct aggr_info *aggr_info;
+};
+
+struct aggr_info {
+ struct aggr_info_conn *aggr_conn;
+ struct sk_buff_head rx_amsdu_freeq;
+};
+
+struct ath6kl_wep_key {
+ u8 key_index;
+ u8 key_len;
+ u8 key[64];
+};
+
+#define ATH6KL_KEY_SEQ_LEN 8
+
+struct ath6kl_key {
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_len;
+ u8 seq[ATH6KL_KEY_SEQ_LEN];
+ u8 seq_len;
+ u32 cipher;
+};
+
+struct ath6kl_node_mapping {
+ u8 mac_addr[ETH_ALEN];
+ u8 ep_id;
+ u8 tx_pend;
+};
+
+struct ath6kl_cookie {
+ struct sk_buff *skb;
+ u32 map_no;
+ struct htc_packet htc_pkt;
+ struct ath6kl_cookie *arc_list_next;
+};
+
+struct ath6kl_mgmt_buff {
+ struct list_head list;
+ u32 freq;
+ u32 wait;
+ u32 id;
+ bool no_cck;
+ size_t len;
+ u8 buf[0];
+};
+
+struct ath6kl_sta {
+ u16 sta_flags;
+ u8 mac[ETH_ALEN];
+ u8 aid;
+ u8 keymgmt;
+ u8 ucipher;
+ u8 auth;
+ u8 wpa_ie[ATH6KL_MAX_IE];
+ struct sk_buff_head psq;
+
+ /* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */
+ spinlock_t psq_lock;
+
+ struct list_head mgmt_psq;
+ size_t mgmt_psq_len;
+ u8 apsd_info;
+ struct sk_buff_head apsdq;
+ struct aggr_info_conn *aggr_conn;
+};
+
+struct ath6kl_version {
+ u32 target_ver;
+ u32 wlan_ver;
+ u32 abi_ver;
+};
+
+struct ath6kl_bmi {
+ u32 cmd_credits;
+ bool done_sent;
+ u8 *cmd_buf;
+ u32 max_data_size;
+ u32 max_cmd_size;
+};
+
+struct target_stats {
+ u64 tx_pkt;
+ u64 tx_byte;
+ u64 tx_ucast_pkt;
+ u64 tx_ucast_byte;
+ u64 tx_mcast_pkt;
+ u64 tx_mcast_byte;
+ u64 tx_bcast_pkt;
+ u64 tx_bcast_byte;
+ u64 tx_rts_success_cnt;
+ u64 tx_pkt_per_ac[4];
+
+ u64 tx_err;
+ u64 tx_fail_cnt;
+ u64 tx_retry_cnt;
+ u64 tx_mult_retry_cnt;
+ u64 tx_rts_fail_cnt;
+
+ u64 rx_pkt;
+ u64 rx_byte;
+ u64 rx_ucast_pkt;
+ u64 rx_ucast_byte;
+ u64 rx_mcast_pkt;
+ u64 rx_mcast_byte;
+ u64 rx_bcast_pkt;
+ u64 rx_bcast_byte;
+ u64 rx_frgment_pkt;
+
+ u64 rx_err;
+ u64 rx_crc_err;
+ u64 rx_key_cache_miss;
+ u64 rx_decrypt_err;
+ u64 rx_dupl_frame;
+
+ u64 tkip_local_mic_fail;
+ u64 tkip_cnter_measures_invoked;
+ u64 tkip_replays;
+ u64 tkip_fmt_err;
+ u64 ccmp_fmt_err;
+ u64 ccmp_replays;
+
+ u64 pwr_save_fail_cnt;
+
+ u64 cs_bmiss_cnt;
+ u64 cs_low_rssi_cnt;
+ u64 cs_connect_cnt;
+ u64 cs_discon_cnt;
+
+ s32 tx_ucast_rate;
+ s32 rx_ucast_rate;
+
+ u32 lq_val;
+
+ u32 wow_pkt_dropped;
+ u16 wow_evt_discarded;
+
+ s16 noise_floor_calib;
+ s16 cs_rssi;
+ s16 cs_ave_beacon_rssi;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+ u8 cs_snr;
+
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+
+ u32 arp_received;
+ u32 arp_matched;
+ u32 arp_replied;
+};
+
+struct ath6kl_mbox_info {
+ u32 htc_addr;
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+
+ u32 block_size;
+
+ u32 gmbox_addr;
+
+ u32 gmbox_sz;
+};
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP. For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+
+#define ATH6KL_KEYBUF_SIZE 16
+#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
+
+#define ATH6KL_KEY_XMIT 0x01
+#define ATH6KL_KEY_RECV 0x02
+#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
+
+/* Initial group key for AP mode */
+struct ath6kl_req_key {
+ bool valid;
+ u8 key_index;
+ int key_type;
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_len;
+};
+
+enum ath6kl_hif_type {
+ ATH6KL_HIF_TYPE_SDIO,
+ ATH6KL_HIF_TYPE_USB,
+};
+
+enum ath6kl_htc_type {
+ ATH6KL_HTC_TYPE_MBOX,
+ ATH6KL_HTC_TYPE_PIPE,
+};
+
+/* Max number of filters that hw supports */
+#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
+struct ath6kl_mc_filter {
+ struct list_head list;
+ char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
+};
+
+struct ath6kl_htcap {
+ bool ht_enable;
+ u8 ampdu_factor;
+ unsigned short cap_info;
+};
+
+/*
+ * Driver's maximum limit, note that some firmwares support only one vif
+ * and the runtime (current) limit must be checked from ar->vif_max.
+ */
+#define ATH6KL_VIF_MAX 3
+
+/* vif flags info */
+enum ath6kl_vif_state {
+ CONNECTED,
+ CONNECT_PEND,
+ WMM_ENABLED,
+ NETQ_STOPPED,
+ DTIM_EXPIRED,
+ CLEAR_BSSFILTER_ON_BEACON,
+ DTIM_PERIOD_AVAIL,
+ WLAN_ENABLED,
+ STATS_UPDATE_PEND,
+ HOST_SLEEP_MODE_CMD_PROCESSED,
+ NETDEV_MCAST_ALL_ON,
+ NETDEV_MCAST_ALL_OFF,
+ SCHED_SCANNING,
+};
+
+struct ath6kl_vif {
+ struct list_head list;
+ struct wireless_dev wdev;
+ struct net_device *ndev;
+ struct ath6kl *ar;
+ /* Lock to protect vif specific net_stats and flags */
+ spinlock_t if_lock;
+ u8 fw_vif_idx;
+ unsigned long flags;
+ int ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto;
+ u8 prwise_crypto_len;
+ u8 grp_crypto;
+ u8 grp_crypto_len;
+ u8 def_txkey_index;
+ u8 next_mode;
+ u8 nw_type;
+ u8 bssid[ETH_ALEN];
+ u8 req_bssid[ETH_ALEN];
+ u16 ch_hint;
+ u16 bss_ch;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ struct aggr_info *aggr_cntxt;
+ struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
+
+ struct timer_list disconnect_timer;
+ struct timer_list sched_scan_timer;
+
+ struct cfg80211_scan_request *scan_req;
+ enum sme_state sme_state;
+ int reconnect_flag;
+ u32 last_roc_id;
+ u32 last_cancel_roc_id;
+ u32 send_action_id;
+ bool probe_req_report;
+ u16 assoc_bss_beacon_int;
+ u16 listen_intvl_t;
+ u16 bmiss_time_t;
+ u32 txe_intvl;
+ u16 bg_scan_period;
+ u8 assoc_bss_dtim_period;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+ struct wmi_connect_cmd profile;
+ u16 rsn_capab;
+
+ struct list_head mc_filter;
+};
+
+static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev)
+{
+ return container_of(wdev, struct ath6kl_vif, wdev);
+}
+
+#define WOW_LIST_ID 0
+#define WOW_HOST_REQ_DELAY 500 /* ms */
+
+#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+ WMI_ENABLED,
+ WMI_READY,
+ WMI_CTRL_EP_FULL,
+ TESTMODE,
+ DESTROY_IN_PROGRESS,
+ SKIP_SCAN,
+ ROAM_TBL_PEND,
+ FIRST_BOOT,
+ RECOVERY_CLEANUP,
+};
+
+enum ath6kl_state {
+ ATH6KL_STATE_OFF,
+ ATH6KL_STATE_ON,
+ ATH6KL_STATE_SUSPENDING,
+ ATH6KL_STATE_RESUMING,
+ ATH6KL_STATE_DEEPSLEEP,
+ ATH6KL_STATE_CUTPOWER,
+ ATH6KL_STATE_WOW,
+ ATH6KL_STATE_RECOVERY,
+};
+
+/* Fw error recovery */
+#define ATH6KL_HB_RESP_MISS_THRES 5
+
+enum ath6kl_fw_err {
+ ATH6KL_FW_ASSERT,
+ ATH6KL_FW_HB_RESP_FAILURE,
+ ATH6KL_FW_EP_FULL,
+};
+
+struct ath6kl {
+ struct device *dev;
+ struct wiphy *wiphy;
+
+ enum ath6kl_state state;
+ unsigned int testmode;
+
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ const struct ath6kl_htc_ops *htc_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ enum ath6kl_hif_type hif_type;
+ void *hif_priv;
+ struct list_head vif_list;
+ /* Lock to avoid race in vif_list entries among add/del/traverse */
+ spinlock_t list_lock;
+ u8 num_vif;
+ unsigned int vif_max;
+ u8 max_norm_iface;
+ u8 avail_idx_map;
+
+ /*
+ * Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie()
+ * calls, tx_pending and total_tx_data_pend.
+ */
+ spinlock_t lock;
+
+ struct semaphore sem;
+ u8 lrssi_roam_threshold;
+ struct ath6kl_version version;
+ u32 target_type;
+ u8 tx_pwr;
+ struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
+ u8 ibss_ps_enable;
+ bool ibss_if_active;
+ u8 node_num;
+ u8 next_ep_id;
+ struct ath6kl_cookie *cookie_list;
+ u32 cookie_count;
+ enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
+ bool ac_stream_active[WMM_NUM_AC];
+ u8 ac_stream_pri_map[WMM_NUM_AC];
+ u8 hiac_stream_active_pri;
+ u8 ep2ac_map[ENDPOINT_MAX];
+ enum htc_endpoint_id ctrl_ep;
+ struct ath6kl_htc_credit_info credit_state_info;
+ u32 connect_ctrl_flags;
+ u32 user_key_ctrl;
+ u8 usr_bss_filter;
+ struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
+ u8 sta_list_index;
+ struct ath6kl_req_key ap_mode_bkey;
+ struct sk_buff_head mcastpsq;
+ u32 want_ch_switch;
+ u16 last_ch;
+
+ /*
+ * FIXME: protects access to mcastpsq but is actually useless as
+ * all skbe_queue_*() functions provide serialisation themselves
+ */
+ spinlock_t mcastpsq_lock;
+
+ u8 intra_bss;
+ struct wmi_ap_mode_stat ap_stats;
+ u8 ap_country_code[3];
+ struct list_head amsdu_rx_buffer_queue;
+ u8 rx_meta_ver;
+ enum wlan_low_pwr_state wlan_pwr_state;
+ u8 mac_addr[ETH_ALEN];
+#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
+ struct {
+ void *rx_report;
+ size_t rx_report_len;
+ } tm;
+
+ struct ath6kl_hw {
+ u32 id;
+ const char *name;
+ u32 dataset_patch_addr;
+ u32 app_load_addr;
+ u32 app_start_override_addr;
+ u32 board_ext_data_addr;
+ u32 reserved_ram_size;
+ u32 board_addr;
+ u32 refclk_hz;
+ u32 uarttx_pin;
+ u32 testscript_addr;
+ enum wmi_phy_cap cap;
+
+ u32 flags;
+
+ struct ath6kl_hw_fw {
+ const char *dir;
+ const char *otp;
+ const char *fw;
+ const char *tcmd;
+ const char *patch;
+ const char *utf;
+ const char *testscript;
+ } fw;
+
+ const char *fw_board;
+ const char *fw_default_board;
+ } hw;
+
+ u16 conf_flags;
+ u16 suspend_mode;
+ u16 wow_suspend_mode;
+ wait_queue_head_t event_wq;
+ struct ath6kl_mbox_info mbox_info;
+
+ struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
+ unsigned long flag;
+
+ u8 *fw_board;
+ size_t fw_board_len;
+
+ u8 *fw_otp;
+ size_t fw_otp_len;
+
+ u8 *fw;
+ size_t fw_len;
+
+ u8 *fw_patch;
+ size_t fw_patch_len;
+
+ u8 *fw_testscript;
+ size_t fw_testscript_len;
+
+ unsigned int fw_api;
+ unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN];
+
+ struct workqueue_struct *ath6kl_wq;
+
+ struct dentry *debugfs_phy;
+
+ bool p2p;
+
+ bool wiphy_registered;
+
+ struct ath6kl_fw_recovery {
+ struct work_struct recovery_work;
+ unsigned long err_reason;
+ unsigned long hb_poll;
+ struct timer_list hb_timer;
+ u32 seq_num;
+ bool hb_pending;
+ u8 hb_misscnt;
+ bool enable;
+ } fw_recovery;
+
+#ifdef CONFIG_ATH6KL_DEBUG
+ struct {
+ struct sk_buff_head fwlog_queue;
+ struct completion fwlog_completion;
+ bool fwlog_open;
+
+ u32 fwlog_mask;
+
+ unsigned int dbgfs_diag_reg;
+ u32 diag_reg_addr_wr;
+ u32 diag_reg_val_wr;
+
+ struct {
+ unsigned int invalid_rate;
+ } war_stats;
+
+ u8 *roam_tbl;
+ unsigned int roam_tbl_len;
+
+ u8 keepalive;
+ u8 disc_timeout;
+ } debug;
+#endif /* CONFIG_ATH6KL_DEBUG */
+};
+
+static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
+{
+ return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
+}
+
+static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
+ u32 item_offset)
+{
+ u32 addr = 0;
+
+ if (ar->target_type == TARGET_TYPE_AR6003)
+ addr = ATH6KL_AR6003_HI_START_ADDR + item_offset;
+ else if (ar->target_type == TARGET_TYPE_AR6004)
+ addr = ATH6KL_AR6004_HI_START_ADDR + item_offset;
+
+ return addr;
+}
+
+int ath6kl_configure_target(struct ath6kl *ar);
+void ath6kl_detect_error(unsigned long ptr);
+void disconnect_timer_handler(unsigned long ptr);
+void init_netdev(struct net_device *dev);
+void ath6kl_cookie_init(struct ath6kl *ar);
+void ath6kl_cookie_cleanup(struct ath6kl *ar);
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
+void ath6kl_tx_complete(struct htc_target *context,
+ struct list_head *packet_queue);
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet);
+void ath6kl_stop_txrx(struct ath6kl *ar);
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
+int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value);
+int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
+int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
+int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
+int ath6kl_read_fwlogs(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
+void ath6kl_tx_data_cleanup(struct ath6kl *ar);
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
+
+struct aggr_info *aggr_init(struct ath6kl_vif *vif);
+void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
+ struct aggr_info_conn *aggr_conn);
+void ath6kl_rx_refill(struct htc_target *target,
+ enum htc_endpoint_id endpoint);
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len);
+void aggr_module_destroy(struct aggr_info *aggr_info);
+void aggr_reset_state(struct aggr_info_conn *aggr_conn);
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
+
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap);
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid);
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
+ u8 *bssid, u16 listen_int,
+ u16 beacon_int, enum network_type net_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
+ u8 keymgmt, u8 ucipher, u8 auth,
+ u8 assoc_req_len, u8 *assoc_info, u8 apsd_info);
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 prot_reason_status);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
+
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
+
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+ u8 win_sz);
+void ath6kl_wakeup_event(void *dev);
+
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
+int ath6kl_init_hw_params(struct ath6kl *ar);
+
+void ath6kl_check_wow_status(struct ath6kl *ar);
+
+void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
+void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
+
+struct ath6kl *ath6kl_core_create(struct device *dev);
+int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
+void ath6kl_core_cleanup(struct ath6kl *ar);
+void ath6kl_core_destroy(struct ath6kl *ar);
+
+/* Fw error recovery */
+void ath6kl_init_hw_restart(struct ath6kl *ar);
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
+void ath6kl_recovery_init(struct ath6kl *ar);
+void ath6kl_recovery_cleanup(struct ath6kl *ar);
+void ath6kl_recovery_suspend(struct ath6kl *ar);
+void ath6kl_recovery_resume(struct ath6kl *ar);
+#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
new file mode 100644
index 000000000..81ba48d29
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -0,0 +1,1863 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/skbuff.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include "debug.h"
+#include "target.h"
+
+struct ath6kl_fwlog_slot {
+ __le32 timestamp;
+ __le32 length;
+
+ /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
+ u8 payload[0];
+};
+
+#define ATH6KL_FWLOG_MAX_ENTRIES 20
+
+#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
+
+void ath6kl_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sath6kl: %pV", level, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_printk);
+
+void ath6kl_info(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ath6kl_printk(KERN_INFO, "%pV", &vaf);
+ trace_ath6kl_log_info(&vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_info);
+
+void ath6kl_err(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ath6kl_printk(KERN_ERR, "%pV", &vaf);
+ trace_ath6kl_log_err(&vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_err);
+
+void ath6kl_warn(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ath6kl_printk(KERN_WARNING, "%pV", &vaf);
+ trace_ath6kl_log_warn(&vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_warn);
+
+#ifdef CONFIG_ATH6KL_DEBUG
+
+void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (debug_mask & mask)
+ ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
+
+ trace_ath6kl_log_dbg(mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath6kl_dbg);
+
+void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ if (debug_mask & mask) {
+ if (msg)
+ ath6kl_dbg(mask, "%s\n", msg);
+
+ print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+ }
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath6kl_dbg_dump);
+
+#define REG_OUTPUT_LEN_PER_LINE 25
+#define REGTYPE_STR_LEN 100
+
+struct ath6kl_diag_reg_info {
+ u32 reg_start;
+ u32 reg_end;
+ const char *reg_info;
+};
+
+static const struct ath6kl_diag_reg_info diag_reg[] = {
+ { 0x20000, 0x200fc, "General DMA and Rx registers" },
+ { 0x28000, 0x28900, "MAC PCU register & keycache" },
+ { 0x20800, 0x20a40, "QCU" },
+ { 0x21000, 0x212f0, "DCU" },
+ { 0x4000, 0x42e4, "RTC" },
+ { 0x540000, 0x540000 + (256 * 1024), "RAM" },
+ { 0x29800, 0x2B210, "Base Band" },
+ { 0x1C000, 0x1C748, "Analog" },
+};
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_enable_reg)
+{
+ ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
+
+ if (irq_proc_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "CPU Int status: 0x%x\n",
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Error Int status: 0x%x\n",
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Counter Int status: 0x%x\n",
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Mbox Frame: 0x%x\n",
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Rx Lookahead Valid: 0x%x\n",
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Rx Lookahead 0: 0x%x\n",
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Rx Lookahead 1: 0x%x\n",
+ irq_proc_reg->rx_lkahd[1]);
+
+ if (dev->ar->mbox_info.gmbox_addr != 0) {
+ /*
+ * If the target supports GMBOX hardware, dump some
+ * additional state.
+ */
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ }
+ }
+
+ if (irq_enable_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
+ }
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "<------------------------------->\n");
+}
+
+static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
+{
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "--- endpoint: %d svc_id: 0x%X ---\n",
+ ep_dist->endpoint, ep_dist->svc_id);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
+ ep_dist->dist_flags);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
+ ep_dist->cred_norm);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
+ ep_dist->cred_min);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
+ ep_dist->credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
+ ep_dist->cred_assngd);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
+ ep_dist->seek_cred);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
+ ep_dist->cred_sz);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
+ ep_dist->cred_per_msg);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
+ ep_dist->cred_to_dist);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
+ get_queue_depth(&ep_dist->htc_ep->txq));
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "----------------------------------\n");
+}
+
+/* FIXME: move to htc.c */
+void dump_cred_dist_stats(struct htc_target *target)
+{
+ struct htc_endpoint_credit_dist *ep_list;
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list)
+ dump_cred_dist(ep_list);
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit distribution total %d free %d\n",
+ target->credit_info->total_avail_credits,
+ target->credit_info->cur_free_credits);
+}
+
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+ switch (war) {
+ case ATH6KL_WAR_INVALID_RATE:
+ ar->debug.war_stats.invalid_rate++;
+ break;
+ }
+}
+
+static ssize_t read_file_war_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Workaround stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10u\n",
+ "Invalid rates", ar->debug.war_stats.invalid_rate);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_war_stats = {
+ .read = read_file_war_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
+{
+ struct ath6kl_fwlog_slot *slot;
+ struct sk_buff *skb;
+ size_t slot_len;
+
+ if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
+ return;
+
+ slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE;
+
+ skb = alloc_skb(slot_len, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ slot = (struct ath6kl_fwlog_slot *) skb_put(skb, slot_len);
+ slot->timestamp = cpu_to_le32(jiffies);
+ slot->length = cpu_to_le32(len);
+ memcpy(slot->payload, buf, len);
+
+ /* Need to pad each record to fixed length ATH6KL_FWLOG_PAYLOAD_SIZE */
+ memset(slot->payload + len, 0, ATH6KL_FWLOG_PAYLOAD_SIZE - len);
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+
+ __skb_queue_tail(&ar->debug.fwlog_queue, skb);
+ complete(&ar->debug.fwlog_completion);
+
+ /* drop oldest entries */
+ while (skb_queue_len(&ar->debug.fwlog_queue) >
+ ATH6KL_FWLOG_MAX_ENTRIES) {
+ skb = __skb_dequeue(&ar->debug.fwlog_queue);
+ kfree_skb(skb);
+ }
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ return;
+}
+
+static int ath6kl_fwlog_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+
+ if (ar->debug.fwlog_open)
+ return -EBUSY;
+
+ ar->debug.fwlog_open = true;
+
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int ath6kl_fwlog_release(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+
+ ar->debug.fwlog_open = false;
+
+ return 0;
+}
+
+static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct sk_buff *skb;
+ ssize_t ret_cnt;
+ size_t len = 0;
+ char *buf;
+
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+
+ /* read undelivered logs from firmware */
+ ath6kl_read_fwlogs(ar);
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+
+ while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
+ if (skb->len > count - len) {
+ /* not enough space, put skb back and leave */
+ __skb_queue_head(&ar->debug.fwlog_queue, skb);
+ break;
+ }
+
+
+ memcpy(buf + len, skb->data, skb->len);
+ len += skb->len;
+
+ kfree_skb(skb);
+ }
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ /* FIXME: what to do if len == 0? */
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog = {
+ .open = ath6kl_fwlog_open,
+ .release = ath6kl_fwlog_release,
+ .read = ath6kl_fwlog_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_fwlog_block_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct sk_buff *skb;
+ ssize_t ret_cnt;
+ size_t len = 0, not_copied;
+ char *buf;
+ int ret;
+
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+
+ if (skb_queue_len(&ar->debug.fwlog_queue) == 0) {
+ /* we must init under queue lock */
+ init_completion(&ar->debug.fwlog_completion);
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ ret = wait_for_completion_interruptible(
+ &ar->debug.fwlog_completion);
+ if (ret == -ERESTARTSYS) {
+ vfree(buf);
+ return ret;
+ }
+
+ spin_lock(&ar->debug.fwlog_queue.lock);
+ }
+
+ while ((skb = __skb_dequeue(&ar->debug.fwlog_queue))) {
+ if (skb->len > count - len) {
+ /* not enough space, put skb back and leave */
+ __skb_queue_head(&ar->debug.fwlog_queue, skb);
+ break;
+ }
+
+
+ memcpy(buf + len, skb->data, skb->len);
+ len += skb->len;
+
+ kfree_skb(skb);
+ }
+
+ spin_unlock(&ar->debug.fwlog_queue.lock);
+
+ /* FIXME: what to do if len == 0? */
+
+ not_copied = copy_to_user(user_buf, buf, len);
+ if (not_copied != 0) {
+ ret_cnt = -EFAULT;
+ goto out;
+ }
+
+ *ppos = *ppos + len;
+
+ ret_cnt = len;
+
+out:
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog_block = {
+ .open = ath6kl_fwlog_open,
+ .release = ath6kl_fwlog_release,
+ .read = ath6kl_fwlog_block_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_fwlog_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi,
+ ATH6KL_FWLOG_VALID_MASK,
+ ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_fwlog_mask = {
+ .open = simple_open,
+ .read = ath6kl_fwlog_mask_read,
+ .write = ath6kl_fwlog_mask_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ struct target_stats *tgt_stats;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int i;
+ long left;
+ ssize_t ret_cnt;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ tgt_stats = &vif->target_stats;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (down_interruptible(&ar->sem)) {
+ kfree(buf);
+ return -EBUSY;
+ }
+
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
+
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
+ up(&ar->sem);
+ kfree(buf);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &vif->flags), WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left <= 0) {
+ kfree(buf);
+ return -ETIMEDOUT;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Tx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->tx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->tx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->tx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->tx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts success cnt", tgt_stats->tx_rts_success_cnt);
+ for (i = 0; i < 4; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%18s %d %10llu\n", "PER on ac",
+ i, tgt_stats->tx_pkt_per_ac[i]);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->tx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fail count", tgt_stats->tx_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Retry count", tgt_stats->tx_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Multi retry cnt", tgt_stats->tx_mult_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts fail cnt", tgt_stats->tx_rts_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n",
+ "TKIP counter measure used",
+ tgt_stats->tkip_cnter_measures_invoked);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Rx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->rx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Ucast Rate", tgt_stats->rx_ucast_rate);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->rx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->rx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->rx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fragmented pkt", tgt_stats->rx_frgment_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CRC Err", tgt_stats->rx_crc_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Key chache miss", tgt_stats->rx_key_cache_miss);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Decrypt Err", tgt_stats->rx_decrypt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Duplicate frame", tgt_stats->rx_dupl_frame);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Tkip Mic failure", tgt_stats->tkip_local_mic_fail);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "TKIP format err", tgt_stats->tkip_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CCMP format Err", tgt_stats->ccmp_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n",
+ "CCMP Replay Err", tgt_stats->ccmp_replays);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Misc Target stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Beacon Miss count", tgt_stats->cs_bmiss_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num Connects", tgt_stats->cs_connect_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num disconnects", tgt_stats->cs_discon_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "ARP pkt received", tgt_stats->arp_received);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "ARP pkt matched", tgt_stats->arp_matched);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "ARP pkt replied", tgt_stats->arp_replied);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define print_credit_info(fmt_str, ep_list_field) \
+ (len += scnprintf(buf + len, buf_len - len, fmt_str, \
+ ep_list->ep_list_field))
+#define CREDIT_INFO_DISPLAY_STRING_LEN 200
+#define CREDIT_INFO_LEN 128
+
+static ssize_t read_file_credit_dist_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ struct htc_endpoint_credit_dist *ep_list;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = CREDIT_INFO_DISPLAY_STRING_LEN +
+ get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Total Avail Credits: ",
+ target->credit_info->total_avail_credits);
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Free credits :",
+ target->credit_info->cur_free_credits);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
+ " Seek_cred Cred_sz Cred_per_msg Cred_to_dist"
+ " qdepth\n");
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list) {
+ print_credit_info(" %2d", endpoint);
+ print_credit_info("%10x", dist_flags);
+ print_credit_info("%8d", cred_norm);
+ print_credit_info("%9d", cred_min);
+ print_credit_info("%9d", credits);
+ print_credit_info("%10d", cred_assngd);
+ print_credit_info("%13d", seek_cred);
+ print_credit_info("%12d", cred_sz);
+ print_credit_info("%9d", cred_per_msg);
+ print_credit_info("%14d", cred_to_dist);
+ len += scnprintf(buf + len, buf_len - len, "%12d\n",
+ get_queue_depth(&ep_list->htc_ep->txq));
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_credit_dist_stats = {
+ .read = read_file_credit_dist_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+ unsigned int buf_len, unsigned int len,
+ int offset, const char *name)
+{
+ int i;
+ struct htc_endpoint_stats *ep_st;
+ u32 *counter;
+
+ len += scnprintf(buf + len, buf_len - len, "%s:", name);
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ counter = ((u32 *) ep_st) + (offset / 4);
+ len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+ (25 + ENDPOINT_MAX * 11);
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+#define EPSTAT(name) \
+ do { \
+ len = print_endpoint_stat(target, buf, buf_len, len, \
+ offsetof(struct htc_endpoint_stats, \
+ name), \
+ #name); \
+ } while (0)
+
+ EPSTAT(cred_low_indicate);
+ EPSTAT(tx_issued);
+ EPSTAT(tx_pkt_bundled);
+ EPSTAT(tx_bundles);
+ EPSTAT(tx_dropped);
+ EPSTAT(tx_cred_rpt);
+ EPSTAT(cred_rpt_from_rx);
+ EPSTAT(cred_rpt_from_other);
+ EPSTAT(cred_rpt_ep0);
+ EPSTAT(cred_from_rx);
+ EPSTAT(cred_from_other);
+ EPSTAT(cred_from_ep0);
+ EPSTAT(cred_cosumd);
+ EPSTAT(cred_retnd);
+ EPSTAT(rx_pkts);
+ EPSTAT(rx_lkahds);
+ EPSTAT(rx_bundl);
+ EPSTAT(rx_bundle_lkahd);
+ EPSTAT(rx_bundle_from_hdr);
+ EPSTAT(rx_alloc_thresh_hit);
+ EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ int ret, i;
+ u32 val;
+ struct htc_endpoint_stats *ep_st;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+ if (val == 0) {
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ memset(ep_st, 0, sizeof(*ep_st));
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+ .open = simple_open,
+ .read = ath6kl_endpoint_stats_read,
+ .write = ath6kl_endpoint_stats_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static unsigned long ath6kl_get_num_reg(void)
+{
+ int i;
+ unsigned long n_reg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++)
+ n_reg = n_reg +
+ (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1;
+
+ return n_reg;
+}
+
+static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ if (reg_addr >= diag_reg[i].reg_start &&
+ reg_addr <= diag_reg[i].reg_end)
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len = 0;
+
+ if (ar->debug.dbgfs_diag_reg)
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n",
+ ar->debug.dbgfs_diag_reg);
+ else
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "All diag registers\n");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regread_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ unsigned long reg_addr;
+
+ if (kstrtoul_from_user(user_buf, count, 0, &reg_addr))
+ return -EINVAL;
+
+ if ((reg_addr % 4) != 0)
+ return -EINVAL;
+
+ if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ ar->debug.dbgfs_diag_reg = reg_addr;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_read = {
+ .read = ath6kl_regread_read,
+ .write = ath6kl_regread_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath6kl_regdump_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+ u8 *buf;
+ unsigned long int reg_len;
+ unsigned int len = 0, n_reg;
+ u32 addr;
+ __le32 reg_val;
+ int i, status;
+
+ /* Dump all the registers if no register is specified */
+ if (!ar->debug.dbgfs_diag_reg)
+ n_reg = ath6kl_get_num_reg();
+ else
+ n_reg = 1;
+
+ reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE;
+ if (n_reg > 1)
+ reg_len += REGTYPE_STR_LEN;
+
+ buf = vmalloc(reg_len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (n_reg == 1) {
+ addr = ar->debug.dbgfs_diag_reg;
+
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)&reg_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val));
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ len += scnprintf(buf + len, reg_len - len,
+ "%s\n", diag_reg[i].reg_info);
+ for (addr = diag_reg[i].reg_start;
+ addr <= diag_reg[i].reg_end; addr += 4) {
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)&reg_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n",
+ addr, le32_to_cpu(reg_val));
+ }
+ }
+
+done:
+ file->private_data = buf;
+ return 0;
+
+fail_reg_read:
+ ath6kl_warn("Unable to read memory:%u\n", addr);
+ vfree(buf);
+ return -EIO;
+}
+
+static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath6kl_regdump_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_reg_dump = {
+ .open = ath6kl_regdump_open,
+ .read = ath6kl_regdump_read,
+ .release = ath6kl_regdump_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_lrssi_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ unsigned long lrssi_roam_threshold;
+
+ if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
+ return -EINVAL;
+
+ ar->lrssi_roam_threshold = lrssi_roam_threshold;
+
+ ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+
+ return count;
+}
+
+static ssize_t ath6kl_lrssi_roam_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_lrssi_roam_threshold = {
+ .read = ath6kl_lrssi_roam_read,
+ .write = ath6kl_lrssi_roam_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_regwrite_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n",
+ ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regwrite_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_addr, reg_val;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, "=");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_addr))
+ return -EINVAL;
+
+ if (!ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ if (kstrtou32(sptr, 0, &reg_val))
+ return -EINVAL;
+
+ ar->debug.diag_reg_addr_wr = reg_addr;
+ ar->debug.diag_reg_val_wr = reg_val;
+
+ if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr,
+ cpu_to_le32(ar->debug.diag_reg_val_wr)))
+ return -EIO;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_write = {
+ .read = ath6kl_regwrite_read,
+ .write = ath6kl_regwrite_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len)
+{
+ const struct wmi_target_roam_tbl *tbl;
+ u16 num_entries;
+
+ if (len < sizeof(*tbl))
+ return -EINVAL;
+
+ tbl = (const struct wmi_target_roam_tbl *) buf;
+ num_entries = le16_to_cpu(tbl->num_entries);
+ if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+ len)
+ return -EINVAL;
+
+ if (ar->debug.roam_tbl == NULL ||
+ ar->debug.roam_tbl_len < (unsigned int) len) {
+ kfree(ar->debug.roam_tbl);
+ ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+ }
+
+ memcpy(ar->debug.roam_tbl, buf, len);
+ ar->debug.roam_tbl_len = len;
+
+ if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+ clear_bit(ROAM_TBL_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+
+ return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ long left;
+ struct wmi_target_roam_tbl *tbl;
+ u16 num_entries, i;
+ char *buf;
+ unsigned int len, buf_len;
+ ssize_t ret_cnt;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(ROAM_TBL_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+ if (ret) {
+ up(&ar->sem);
+ return ret;
+ }
+
+ left = wait_event_interruptible_timeout(
+ ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+ up(&ar->sem);
+
+ if (left <= 0)
+ return -ETIMEDOUT;
+
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+
+ tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+ num_entries = le16_to_cpu(tbl->num_entries);
+
+ buf_len = 100 + num_entries * 100;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "roam_mode=%u\n\n"
+ "# roam_util bssid rssi rssidt last_rssi util bias\n",
+ le16_to_cpu(tbl->roam_mode));
+
+ for (i = 0; i < num_entries; i++) {
+ struct wmi_bss_roam_info *info = &tbl->info[i];
+ len += scnprintf(buf + len, buf_len - len,
+ "%d %pM %d %d %d %d %d\n",
+ a_sle32_to_cpu(info->roam_util), info->bssid,
+ info->rssi, info->rssidt, info->last_rssi,
+ info->util, info->bias);
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+ .read = ath6kl_roam_table_read,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ u8 bssid[ETH_ALEN];
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ if (!mac_pton(buf, bssid))
+ return -EINVAL;
+
+ ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_force_roam = {
+ .write = ath6kl_force_roam_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ enum wmi_roam_mode mode;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strcasecmp(buf, "default") == 0)
+ mode = WMI_DEFAULT_ROAM_MODE;
+ else if (strcasecmp(buf, "bssbias") == 0)
+ mode = WMI_HOST_BIAS_ROAM_MODE;
+ else if (strcasecmp(buf, "lock") == 0)
+ mode = WMI_LOCK_BSS_MODE;
+ else
+ return -EINVAL;
+
+ ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+ .write = ath6kl_roam_mode_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+ ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_keepalive = {
+ .open = simple_open,
+ .read = ath6kl_keepalive_read,
+ .write = ath6kl_keepalive_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+ ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+ .open = simple_open,
+ .read = ath6kl_disconnect_timeout_read,
+ .write = ath6kl_disconnect_timeout_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[200];
+ ssize_t len;
+ char *sptr, *token;
+ struct wmi_create_pstream_cmd pstream;
+ u32 val32;
+ u16 val16;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.user_pri))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_direc))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.inactivity_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.suspension_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.service_start_time = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.tsid))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.nominal_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.max_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.mean_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.peak_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_burst_size = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.delay_bound = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_phy_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.sba = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.medium_time = cpu_to_le32(val32);
+
+ pstream.nominal_phy = le32_to_cpu(pstream.min_phy_rate) / 1000000;
+
+ ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+ return count;
+}
+
+static const struct file_operations fops_create_qos = {
+ .write = ath6kl_create_qos_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[100];
+ ssize_t len;
+ char *sptr, *token;
+ u8 traffic_class;
+ u8 tsid;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &tsid))
+ return -EINVAL;
+
+ ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+ traffic_class, tsid);
+
+ return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+ .write = ath6kl_delete_qos_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ u16 bgscan_int;
+ char buf[32];
+ ssize_t len;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtou16(buf, 0, &bgscan_int))
+ return -EINVAL;
+
+ if (bgscan_int == 0)
+ bgscan_int = 0xffff;
+
+ vif->bg_scan_period = bgscan_int;
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+ 0, 0, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+ .write = ath6kl_bgscan_int_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ u16 listen_interval;
+ char buf[32];
+ ssize_t len;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtou16(buf, 0, &listen_interval))
+ return -EINVAL;
+
+ if ((listen_interval < 15) || (listen_interval > 3000))
+ return -EINVAL;
+
+ vif->listen_intvl_t = listen_interval;
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->listen_intvl_t, 0);
+
+ return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[32];
+ int len;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", vif->listen_intvl_t);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+ .read = ath6kl_listen_int_read,
+ .write = ath6kl_listen_int_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[100];
+ unsigned int len = 0;
+ char *sptr, *token;
+ u16 idle_period, ps_poll_num, dtim,
+ tx_wakeup, num_tx;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &idle_period))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &ps_poll_num))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &dtim))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &tx_wakeup))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &num_tx))
+ return -EINVAL;
+
+ ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+ dtim, tx_wakeup, num_tx, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_power_params = {
+ .write = ath6kl_power_params_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_init(struct ath6kl *ar)
+{
+ skb_queue_head_init(&ar->debug.fwlog_queue);
+ init_completion(&ar->debug.fwlog_completion);
+
+ /*
+ * Actually we are lying here but don't know how to read the mask
+ * value from the firmware.
+ */
+ ar->debug.fwlog_mask = 0;
+}
+
+/*
+ * Initialisation needs to happen in two stages as fwlog events can come
+ * before cfg80211 is initialised, and debugfs depends on cfg80211
+ * initialisation.
+ */
+int ath6kl_debug_init_fs(struct ath6kl *ar)
+{
+ ar->debugfs_phy = debugfs_create_dir("ath6kl",
+ ar->wiphy->debugfsdir);
+ if (!ar->debugfs_phy)
+ return -ENOMEM;
+
+ debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_tgt_stats);
+
+ if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO)
+ debugfs_create_file("credit_dist_stats", S_IRUSR,
+ ar->debugfs_phy, ar,
+ &fops_credit_dist_stats);
+
+ debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_endpoint_stats);
+
+ debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog);
+
+ debugfs_create_file("fwlog_block", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog_block);
+
+ debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
+ ar, &fops_fwlog_mask);
+
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_diag_reg_read);
+
+ debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_reg_dump);
+
+ debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_lrssi_roam_threshold);
+
+ debugfs_create_file("reg_write", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_diag_reg_write);
+
+ debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_war_stats);
+
+ debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_roam_table);
+
+ debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_force_roam);
+
+ debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_roam_mode);
+
+ debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_keepalive);
+
+ debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+ debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_create_qos);
+
+ debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_delete_qos);
+
+ debugfs_create_file("bgscan_interval", S_IWUSR,
+ ar->debugfs_phy, ar, &fops_bgscan_int);
+
+ debugfs_create_file("listen_interval", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_listen_int);
+
+ debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_power_params);
+
+ return 0;
+}
+
+void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+ skb_queue_purge(&ar->debug.fwlog_queue);
+ complete(&ar->debug.fwlog_completion);
+ kfree(ar->debug.roam_tbl);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
new file mode 100644
index 000000000..19106ed28
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include "hif.h"
+#include "trace.h"
+
+enum ATH6K_DEBUG_MASK {
+ ATH6KL_DBG_CREDIT = BIT(0),
+ /* hole */
+ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
+ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
+ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
+ ATH6KL_DBG_HTC = BIT(5),
+ ATH6KL_DBG_HIF = BIT(6),
+ ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
+ /* hole */
+ /* hole */
+ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
+ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
+ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
+ ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
+ ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx frames */
+ ATH6KL_DBG_AGGR = BIT(15), /* aggregation */
+ ATH6KL_DBG_SDIO = BIT(16),
+ ATH6KL_DBG_SDIO_DUMP = BIT(17),
+ ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
+ ATH6KL_DBG_WMI_DUMP = BIT(19),
+ ATH6KL_DBG_SUSPEND = BIT(20),
+ ATH6KL_DBG_USB = BIT(21),
+ ATH6KL_DBG_USB_BULK = BIT(22),
+ ATH6KL_DBG_RECOVERY = BIT(23),
+ ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
+};
+
+extern unsigned int debug_mask;
+__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) void ath6kl_info(const char *fmt, ...);
+__printf(1, 2) void ath6kl_err(const char *fmt, ...);
+__printf(1, 2) void ath6kl_warn(const char *fmt, ...);
+
+enum ath6kl_war {
+ ATH6KL_WAR_INVALID_RATE,
+};
+
+#ifdef CONFIG_ATH6KL_DEBUG
+
+void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
+void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg);
+void dump_cred_dist_stats(struct htc_target *target);
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
+void ath6kl_debug_init(struct ath6kl *ar);
+int ath6kl_debug_init_fs(struct ath6kl *ar);
+void ath6kl_debug_cleanup(struct ath6kl *ar);
+
+#else
+static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
+ const char *fmt, ...)
+{
+}
+
+static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+
+static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg)
+{
+}
+
+static inline void dump_cred_dist_stats(struct htc_target *target)
+{
+}
+
+static inline void ath6kl_debug_fwlog_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+}
+
+static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+}
+
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+ u8 timeout)
+{
+}
+
+static inline void ath6kl_debug_init(struct ath6kl *ar)
+{
+}
+
+static inline int ath6kl_debug_init_fs(struct ath6kl *ar)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+}
+
+#endif
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
new file mode 100644
index 000000000..8c9e72d52
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_OPS_H
+#define HIF_OPS_H
+
+#include "hif.h"
+#include "debug.h"
+
+static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+ (request & HIF_WRITE) ? "write" : "read",
+ addr, buf, len, request);
+
+ return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
+}
+
+static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+ address, buffer, length, request);
+
+ return ar->hif_ops->write_async(ar, address, buffer, length,
+ request, packet);
+}
+static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
+ return ar->hif_ops->irq_enable(ar);
+}
+
+static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
+ return ar->hif_ops->irq_disable(ar);
+}
+
+static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
+{
+ return ar->hif_ops->scatter_req_get(ar);
+}
+
+static inline void hif_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ return ar->hif_ops->scatter_req_add(ar, s_req);
+}
+
+static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar)
+{
+ return ar->hif_ops->enable_scatter(ar);
+}
+
+static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ return ar->hif_ops->scat_req_rw(ar, scat_req);
+}
+
+static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
+{
+ return ar->hif_ops->cleanup_scatter(ar);
+}
+
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+ struct cfg80211_wowlan *wow)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+ return ar->hif_ops->suspend(ar, wow);
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
+ u32 *value)
+{
+ return ar->hif_ops->diag_read32(ar, address, value);
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 value)
+{
+ return ar->hif_ops->diag_write32(ar, address, value);
+}
+
+static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_read(ar, buf, len);
+}
+
+static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_write(ar, buf, len);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+ return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+ return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+ return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+ ar->hif_ops->stop(ar);
+}
+
+static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
+ u8 pipe, struct sk_buff *hdr_buf,
+ struct sk_buff *buf)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
+
+ return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
+}
+
+static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
+
+ ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
+}
+
+static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
+ u16 service_id, u8 *ul_pipe,
+ u8 *dl_pipe)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
+
+ return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
+}
+
+static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
+ u8 pipe)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
+
+ return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
new file mode 100644
index 000000000..18c070850
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "hif.h"
+
+#include <linux/export.h>
+
+#include "core.h"
+#include "target.h"
+#include "hif-ops.h"
+#include "debug.h"
+#include "trace.h"
+
+#define MAILBOX_FOR_BLOCK_SIZE 1
+
+#define ATH6KL_TIME_QUANTUM 10 /* in ms */
+
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+ bool from_dma)
+{
+ u8 *buf;
+ int i;
+
+ buf = req->virt_dma_buf;
+
+ for (i = 0; i < req->scat_entries; i++) {
+ if (from_dma)
+ memcpy(req->scat_list[i].buf, buf,
+ req->scat_list[i].len);
+ else
+ memcpy(buf, req->scat_list[i].buf,
+ req->scat_list[i].len);
+
+ buf += req->scat_list[i].len;
+ }
+
+ return 0;
+}
+
+int ath6kl_hif_rw_comp_handler(void *context, int status)
+{
+ struct htc_packet *packet = context;
+
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
+ packet, status);
+
+ packet->status = status;
+ packet->completion(packet->context, packet);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
+
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
+
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
+{
+ __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i, address, regdump_addr = 0;
+ int ret;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(ar->target_type, address);
+
+ /* read RAM location through diagnostic window */
+ ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+ if (ret || !regdump_addr) {
+ ath6kl_warn("failed to get ptr to register dump area: %d\n",
+ ret);
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+ regdump_addr);
+ regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+ /* fetch register dump data */
+ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ if (ret) {
+ ath6kl_warn("failed to get register dump: %d\n", ret);
+ return;
+ }
+
+ ath6kl_info("crash dump:\n");
+ ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+ ar->wiphy->fw_version);
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
+
+ for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
+ ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+ i,
+ le32_to_cpu(regdump_val[i]),
+ le32_to_cpu(regdump_val[i + 1]),
+ le32_to_cpu(regdump_val[i + 2]),
+ le32_to_cpu(regdump_val[i + 3]));
+ }
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int ret;
+
+ ath6kl_warn("firmware crashed\n");
+
+ /*
+ * read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
+
+ ath6kl_hif_dump_fw_crash(dev->ar);
+ ath6kl_read_fwlogs(dev->ar);
+ ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT);
+
+ return ret;
+}
+
+/* mailbox recv message polling */
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+ int timeout)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0, i;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
+ /* this is the standard HIF way, load the reg table */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("failed to read reg table\n");
+ return status;
+ }
+
+ /* check for MBOX data and valid lookahead */
+ if (dev->irq_proc_reg.host_int_status & htc_mbox) {
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ /*
+ * Mailbox has a message and the look ahead
+ * is valid.
+ */
+ rg = &dev->irq_proc_reg;
+ *lk_ahd =
+ le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ break;
+ }
+ }
+
+ /* delay a little */
+ mdelay(ATH6KL_TIME_QUANTUM);
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
+ }
+
+ if (i == 0) {
+ ath6kl_err("timeout waiting for recv message\n");
+ status = -ETIME;
+ /* check if the target asserted */
+ if (dev->irq_proc_reg.counter_int_status &
+ ATH6KL_TARGET_DEBUG_INTR_MASK)
+ /*
+ * Target failure handler will be called in case of
+ * an assert.
+ */
+ ath6kl_hif_proc_dbg_intr(dev);
+ }
+
+ return status;
+}
+
+/*
+ * Disable packet reception (used in case the host runs out of buffers)
+ * using the interrupt enable registers through the host I/F
+ */
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+ enable_rx ? "enable" : "disable");
+
+ /* take the lock to protect interrupt enable shadows */
+ spin_lock_bh(&dev->lock);
+
+ if (enable_rx)
+ dev->irq_en_reg.int_status_en |=
+ SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+ else
+ dev->irq_en_reg.int_status_en &=
+ ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en,
+ sizeof(struct ath6kl_irq_enable_reg),
+ HIF_WR_SYNC_BYTE_INC);
+
+ return status;
+}
+
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read)
+{
+ int status = 0;
+
+ if (read) {
+ scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
+ scat_req->addr = dev->ar->mbox_info.htc_addr;
+ } else {
+ scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
+
+ scat_req->addr =
+ (scat_req->len > HIF_MBOX_WIDTH) ?
+ dev->ar->mbox_info.htc_ext_addr :
+ dev->ar->mbox_info.htc_addr;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
+ scat_req->scat_entries, scat_req->len,
+ scat_req->addr, !read ? "async" : "sync",
+ (read) ? "rd" : "wr");
+
+ if (!read && scat_req->virt_scat) {
+ status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
+ if (status) {
+ scat_req->status = status;
+ scat_req->complete(dev->ar->htc_target, scat_req);
+ return 0;
+ }
+ }
+
+ status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
+
+ if (read) {
+ /* in sync mode, we can touch the scatter request */
+ scat_req->status = status;
+ if (!status && scat_req->virt_scat)
+ scat_req->status =
+ ath6kl_hif_cp_scat_dma_buf(scat_req, true);
+ }
+
+ return status;
+}
+
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
+{
+ u8 counter_int_status;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
+
+ counter_int_status = dev->irq_proc_reg.counter_int_status &
+ dev->irq_en_reg.cntr_int_status_en;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
+ counter_int_status);
+
+ /*
+ * NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
+ return ath6kl_hif_proc_dbg_intr(dev);
+
+ return 0;
+}
+
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 error_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
+
+ error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
+ if (!error_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
+ error_int_status);
+
+ if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
+
+ if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
+ ath6kl_err("rx underflow\n");
+
+ if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
+ ath6kl_err("tx overflow\n");
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = error_int_status;
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ WARN_ON(status);
+
+ return status;
+}
+
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 cpu_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
+
+ cpu_int_status = dev->irq_proc_reg.cpu_int_status &
+ dev->irq_en_reg.cpu_int_status_en;
+ if (!cpu_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+ cpu_int_status);
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
+
+ /*
+ * Set up the register transfer buffer to hit the register 4 times ,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ */
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = cpu_int_status;
+ /* the remaining are set to zero which have no-effect */
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ WARN_ON(status);
+
+ return status;
+}
+
+/* process pending interrupts synchronously */
+static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0;
+ u8 host_int_status = 0;
+ u32 lk_ahd = 0;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
+
+ /*
+ * NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ /*
+ * Process pending intr only when int_status_en is clear, it may
+ * result in unnecessary bus transaction otherwise. Target may be
+ * unresponsive at the time.
+ */
+ if (dev->irq_en_reg.int_status_en) {
+ /*
+ * Read the first 28 bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ *
+ * length = sizeof(int_status) + sizeof(cpu_int_status)
+ * + sizeof(error_int_status) +
+ * sizeof(counter_int_status) +
+ * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
+ * + sizeof(hole) + sizeof(rx_lkahd) +
+ * sizeof(int_status_en) +
+ * sizeof(cpu_int_status_en) +
+ * sizeof(err_int_status_en) +
+ * sizeof(cntr_int_status_en);
+ */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+ if (status)
+ goto out;
+
+ ath6kl_dump_registers(dev, &dev->irq_proc_reg,
+ &dev->irq_en_reg);
+ trace_ath6kl_sdio_irq(&dev->irq_en_reg,
+ sizeof(dev->irq_en_reg));
+
+ /* Update only those registers that are enabled */
+ host_int_status = dev->irq_proc_reg.host_int_status &
+ dev->irq_en_reg.int_status_en;
+
+ /* Look at mbox status */
+ if (host_int_status & htc_mbox) {
+ /*
+ * Mask out pending mbox value, we use "lookAhead as
+ * the real flag for mbox processing.
+ */
+ host_int_status &= ~htc_mbox;
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ rg = &dev->irq_proc_reg;
+ lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ if (!lk_ahd)
+ ath6kl_err("lookAhead is zero!\n");
+ }
+ }
+ }
+
+ if (!host_int_status && !lk_ahd) {
+ *done = true;
+ goto out;
+ }
+
+ if (lk_ahd) {
+ int fetched = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
+ /*
+ * Mailbox Interrupt, the HTC layer may issue async
+ * requests to empty the mailbox. When emptying the recv
+ * mailbox we use the async handler above called from the
+ * completion routine of the callers read request. This can
+ * improve performance by reducing context switching when
+ * we rapidly pull packets.
+ */
+ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
+ lk_ahd, &fetched);
+ if (status)
+ goto out;
+
+ if (!fetched)
+ /*
+ * HTC could not pull any messages out due to lack
+ * of resources.
+ */
+ dev->htc_cnxt->chk_irq_status_cnt = 0;
+ }
+
+ /* now handle the rest of them */
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) for other interrupts: 0x%x\n",
+ host_int_status);
+
+ if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
+ /* CPU Interrupt */
+ status = ath6kl_hif_proc_cpu_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
+ /* Error Interrupt */
+ status = ath6kl_hif_proc_err_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
+ /* Counter Interrupt */
+ status = ath6kl_hif_proc_counter_intr(dev);
+
+out:
+ /*
+ * An optimization to bypass reading the IRQ status registers
+ * unecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "bypassing irq status re-check, forcing done\n");
+
+ if (!dev->htc_cnxt->chk_irq_status_cnt)
+ *done = true;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
+
+ return status;
+}
+
+/* interrupt handler, kicks off all interrupt processing */
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
+{
+ struct ath6kl_device *dev = ar->htc_target->dev;
+ unsigned long timeout;
+ int status = 0;
+ bool done = false;
+
+ /*
+ * Reset counter used to flag a re-scan of IRQ status registers on
+ * the target.
+ */
+ dev->htc_cnxt->chk_irq_status_cnt = 0;
+
+ /*
+ * IRQ processing is synchronous, interrupt status registers can be
+ * re-read.
+ */
+ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !done) {
+ status = proc_pending_irqs(dev, &done);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler);
+
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status;
+
+ spin_lock_bh(&dev->lock);
+
+ /* Enable all but ATH6KL CPU interrupts */
+ dev->irq_en_reg.int_status_en =
+ SM(INT_STATUS_ENABLE_ERROR, 0x01) |
+ SM(INT_STATUS_ENABLE_CPU, 0x01) |
+ SM(INT_STATUS_ENABLE_COUNTER, 0x01);
+
+ /*
+ * NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ /* Set up the CPU Interrupt status Register */
+ dev->irq_en_reg.cpu_int_status_en = 0;
+
+ /* Set up the Error Interrupt status Register */
+ dev->irq_en_reg.err_int_status_en =
+ SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
+ SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
+
+ /*
+ * Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
+ ATH6KL_TARGET_DEBUG_INTR_MASK);
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+
+ if (status)
+ ath6kl_err("failed to update interrupt ctl reg err: %d\n",
+ status);
+
+ return status;
+}
+
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+
+ spin_lock_bh(&dev->lock);
+ /* Disable all interrupts */
+ dev->irq_en_reg.int_status_en = 0;
+ dev->irq_en_reg.cpu_int_status_en = 0;
+ dev->irq_en_reg.err_int_status_en = 0;
+ dev->irq_en_reg.cntr_int_status_en = 0;
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+ spin_unlock_bh(&dev->lock);
+
+ return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+}
+
+/* enable device interrupts */
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
+{
+ int status = 0;
+
+ /*
+ * Make sure interrupt are disabled before unmasking at the HIF
+ * layer. The rationale here is that between device insertion
+ * (where we clear the interrupts the first time) and when HTC
+ * is finally ready to handle interrupts, other software can perform
+ * target "soft" resets. The ATH6KL interrupt enables reset back to an
+ * "enabled" state when this happens.
+ */
+ ath6kl_hif_disable_intrs(dev);
+
+ /* unmask the host controller interrupts */
+ ath6kl_hif_irq_enable(dev->ar);
+ status = ath6kl_hif_enable_intrs(dev);
+
+ return status;
+}
+
+/* disable all device interrupts */
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
+{
+ /*
+ * Mask the interrupt at the HIF layer to avoid any stray interrupt
+ * taken while we zero out our shadow registers in
+ * ath6kl_hif_disable_intrs().
+ */
+ ath6kl_hif_irq_disable(dev->ar);
+
+ return ath6kl_hif_disable_intrs(dev);
+}
+
+int ath6kl_hif_setup(struct ath6kl_device *dev)
+{
+ int status = 0;
+
+ spin_lock_init(&dev->lock);
+
+ /*
+ * NOTE: we actually get the block size of a mailbox other than 0,
+ * for SDIO the block size on mailbox 0 is artificially set to 1.
+ * So we use the block size that is set for the other 3 mailboxes.
+ */
+ dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
+
+ /* must be a power of 2 */
+ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
+ WARN_ON(1);
+ status = -EINVAL;
+ goto fail_setup;
+ }
+
+ /* assemble mask, used for padding to a block */
+ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
+
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
+ dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
+
+ status = ath6kl_hif_disable_intrs(dev);
+
+fail_setup:
+ return status;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
new file mode 100644
index 000000000..dc6bd8cd9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_H
+#define HIF_H
+
+#include "common.h"
+#include "core.h"
+
+#include <linux/scatterlist.h>
+
+#define BUS_REQUEST_MAX_NUM 64
+#define HIF_MBOX_BLOCK_SIZE 128
+#define HIF_MBOX0_BLOCK_SIZE 1
+
+#define HIF_DMA_BUFFER_SIZE (32 * 1024)
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS 2
+
+#define MAX_SCATTER_REQUESTS 4
+#define MAX_SCATTER_ENTRIES_PER_REQ 16
+#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
+
+#define MANUFACTURER_ID_AR6003_BASE 0x300
+#define MANUFACTURER_ID_AR6004_BASE 0x400
+ /* SDIO manufacturer ID and Codes */
+#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
+#define MANUFACTURER_CODE 0x271 /* Atheros */
+
+/* Mailbox address in SDIO address space */
+#define HIF_MBOX_BASE_ADDR 0x800
+#define HIF_MBOX_WIDTH 0x800
+
+#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
+
+/* version 1 of the chip has only a 12K extended mbox range */
+#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
+#define HIF_MBOX0_EXT_WIDTH (12*1024)
+
+/* GMBOX addresses */
+#define HIF_GMBOX_BASE_ADDR 0x7000
+#define HIF_GMBOX_WIDTH 0x4000
+
+/* interrupt mode register */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
+
+struct bus_request {
+ struct list_head list;
+
+ /* request data */
+ u32 address;
+
+ u8 *buffer;
+ u32 length;
+ u32 request;
+ struct htc_packet *packet;
+ int status;
+
+ /* this is a scatter request */
+ struct hif_scatter_req *scat_req;
+};
+
+/* direction of transfer (read/write) */
+#define HIF_READ 0x00000001
+#define HIF_WRITE 0x00000002
+#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
+
+/*
+ * emode - This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS 0x00000010
+#define HIF_ASYNCHRONOUS 0x00000020
+#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/*
+ * dmode - An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * ATH6KL interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS 0x00000040
+#define HIF_BLOCK_BASIS 0x00000080
+#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/*
+ * amode - This indicates if the address has to be incremented on ATH6KL
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS 0x00000100
+#define HIF_INCREMENTAL_ADDRESS 0x00000200
+#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BYTE_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_FIX \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_INC \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_ASYNC_BLOCK_FIX \
+ (HIF_READ | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_SYNC_BLOCK_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+struct hif_scatter_item {
+ u8 *buf;
+ int len;
+ struct htc_packet *packet;
+};
+
+struct hif_scatter_req {
+ struct list_head list;
+ /* address for the read/write operation */
+ u32 addr;
+
+ /* request flags */
+ u32 req;
+
+ /* total length of entire transfer */
+ u32 len;
+
+ bool virt_scat;
+
+ void (*complete) (struct htc_target *, struct hif_scatter_req *);
+ int status;
+ int scat_entries;
+
+ struct bus_request *busrequest;
+ struct scatterlist *sgentries;
+
+ /* bounce buffer for upper layers to copy to/from */
+ u8 *virt_dma_buf;
+
+ u32 scat_q_depth;
+
+ struct hif_scatter_item scat_list[0];
+};
+
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+ /* protects irq_proc_reg and irq_en_reg below */
+ spinlock_t lock;
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ struct htc_target *htc_cnxt;
+ struct ath6kl *ar;
+};
+
+struct ath6kl_hif_ops {
+ int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request);
+ int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request, struct htc_packet *packet);
+
+ void (*irq_enable)(struct ath6kl *ar);
+ void (*irq_disable)(struct ath6kl *ar);
+
+ struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
+ void (*scatter_req_add)(struct ath6kl *ar,
+ struct hif_scatter_req *s_req);
+ int (*enable_scatter)(struct ath6kl *ar);
+ int (*scat_req_rw) (struct ath6kl *ar,
+ struct hif_scatter_req *scat_req);
+ void (*cleanup_scatter)(struct ath6kl *ar);
+ int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+ int (*resume)(struct ath6kl *ar);
+ int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
+ int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
+ int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*power_on)(struct ath6kl *ar);
+ int (*power_off)(struct ath6kl *ar);
+ void (*stop)(struct ath6kl *ar);
+ int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
+ struct sk_buff *buf);
+ void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
+ int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
+ u8 *pipe_dl);
+ u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
+};
+
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h
new file mode 100644
index 000000000..2d4eed55c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_OPS_H
+#define HTC_OPS_H
+
+#include "htc.h"
+#include "debug.h"
+
+static inline void *ath6kl_htc_create(struct ath6kl *ar)
+{
+ return ar->htc_ops->create(ar);
+}
+
+static inline int ath6kl_htc_wait_target(struct htc_target *target)
+{
+ return target->dev->ar->htc_ops->wait_target(target);
+}
+
+static inline int ath6kl_htc_start(struct htc_target *target)
+{
+ return target->dev->ar->htc_ops->start(target);
+}
+
+static inline int ath6kl_htc_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *req,
+ struct htc_service_connect_resp *resp)
+{
+ return target->dev->ar->htc_ops->conn_service(target, req, resp);
+}
+
+static inline int ath6kl_htc_tx(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ return target->dev->ar->htc_ops->tx(target, packet);
+}
+
+static inline void ath6kl_htc_stop(struct htc_target *target)
+{
+ return target->dev->ar->htc_ops->stop(target);
+}
+
+static inline void ath6kl_htc_cleanup(struct htc_target *target)
+{
+ return target->dev->ar->htc_ops->cleanup(target);
+}
+
+static inline void ath6kl_htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ u16 tag)
+{
+ return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
+}
+
+static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
+{
+ return target->dev->ar->htc_ops->flush_rx_buf(target);
+}
+
+static inline void ath6kl_htc_activity_changed(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ bool active)
+{
+ return target->dev->ar->htc_ops->activity_changed(target, endpoint,
+ active);
+}
+
+static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
+ enum htc_endpoint_id endpoint)
+{
+ return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
+}
+
+static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pktq)
+{
+ return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
+}
+
+static inline int ath6kl_htc_credit_setup(struct htc_target *target,
+ struct ath6kl_htc_credit_info *info)
+{
+ return target->dev->ar->htc_ops->credit_setup(target, info);
+}
+
+static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
+ struct sk_buff *skb)
+{
+ ar->htc_ops->tx_complete(ar, skb);
+}
+
+
+static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
+ struct sk_buff *skb, u8 pipe)
+{
+ ar->htc_ops->rx_complete(ar, skb, pipe);
+}
+
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
new file mode 100644
index 000000000..14cab1403
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include "common.h"
+
+/* frame header flags */
+
+/* send direction */
+#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
+#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
+#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2)
+
+/* receive direction */
+#define HTC_FLG_RX_UNUSED (1 << 0)
+#define HTC_FLG_RX_TRAILER (1 << 1)
+/* Bundle count maske and shift */
+#define HTC_FLG_RX_BNDL_CNT (0xF0)
+#define HTC_FLG_RX_BNDL_CNT_S 4
+
+#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
+#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
+
+/* HTC control message IDs */
+
+#define HTC_MSG_READY_ID 1
+#define HTC_MSG_CONN_SVC_ID 2
+#define HTC_MSG_CONN_SVC_RESP_ID 3
+#define HTC_MSG_SETUP_COMPLETE_ID 4
+#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
+
+#define HTC_MAX_CTRL_MSG_LEN 256
+
+#define HTC_VERSION_2P0 0x00
+#define HTC_VERSION_2P1 0x01
+
+#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
+
+#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
+#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
+#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
+#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
+#define HTC_CONN_FLGS_THRESH_MASK 0x3
+/* disable credit flow control on a specific service */
+#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+
+/* no resources (i.e. no more endpoints) */
+#define HTC_SERVICE_NO_RESOURCES 3
+
+/* specific service is not allowing any more endpoints */
+#define HTC_SERVICE_NO_MORE_EP 4
+
+/* report record IDs */
+#define HTC_RECORD_NULL 0
+#define HTC_RECORD_CREDITS 1
+#define HTC_RECORD_LOOKAHEAD 2
+#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
+
+#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
+#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_MAX_SERVICES 5
+
+#define WMM_NUM_AC 4
+
+/* reserved and used to flush ALL packets */
+#define HTC_TX_PACKET_TAG_ALL 0
+#define HTC_SERVICE_TX_PACKET_TAG 1
+#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
+
+/* more packets on this endpoint are being fetched */
+#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
+
+/* TODO.. for BMI */
+#define ENDPOINT1 0
+/* TODO -remove me, but we have to fix BMI first */
+#define HTC_MAILBOX_NUM_MAX 4
+
+/* enable send bundle padding for this endpoint */
+#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
+#define HTC_EP_ACTIVE ((u32) (1u << 31))
+
+/* HTC operational parameters */
+#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
+#define HTC_TARGET_RESPONSE_POLL_WAIT 10
+#define HTC_TARGET_RESPONSE_POLL_COUNT 200
+#define HTC_TARGET_DEBUG_INTR_MASK 0x01
+#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
+
+#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
+
+/* packet flags */
+
+#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
+#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
+#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
+#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
+
+#define NUM_CONTROL_BUFFERS 8
+#define NUM_CONTROL_TX_BUFFERS 2
+#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
+
+#define HTC_RECV_WAIT_BUFFERS (1 << 0)
+#define HTC_OP_STATE_STOPPING (1 << 0)
+#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1)
+
+/*
+ * The frame header length and message formats defined herein were selected
+ * to accommodate optimal alignment for target processing. This reduces
+ * code size and improves performance. Any changes to the header length may
+ * alter the alignment and cause exceptions on the target. When adding to
+ * the messagestructures insure that fields are properly aligned.
+ */
+
+/* HTC frame header
+ *
+ * NOTE: do not remove or re-arrange the fields, these are minimally
+ * required to take advantage of 4-byte lookaheads in some hardware
+ * implementations.
+ */
+struct htc_frame_hdr {
+ u8 eid;
+ u8 flags;
+
+ /* length of data (including trailer) that follows the header */
+ __le16 payld_len;
+
+ /* end of 4-byte lookahead */
+
+ u8 ctrl[2];
+} __packed;
+
+/* HTC ready message */
+struct htc_ready_msg {
+ __le16 msg_id;
+ __le16 cred_cnt;
+ __le16 cred_sz;
+ u8 max_ep;
+ u8 pad;
+} __packed;
+
+/* extended HTC ready message */
+struct htc_ready_ext_msg {
+ struct htc_ready_msg ver2_0_info;
+ u8 htc_ver;
+ u8 msg_per_htc_bndl;
+} __packed;
+
+/* connect service */
+struct htc_conn_service_msg {
+ __le16 msg_id;
+ __le16 svc_id;
+ __le16 conn_flags;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response */
+struct htc_conn_service_resp {
+ __le16 msg_id;
+ __le16 svc_id;
+ u8 status;
+ u8 eid;
+ __le16 max_msg_sz;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_setup_comp_msg {
+ __le16 msg_id;
+} __packed;
+
+/* extended setup completion message */
+struct htc_setup_comp_ext_msg {
+ __le16 msg_id;
+ __le32 flags;
+ u8 msg_per_rxbndl;
+ u8 Rsvd[3];
+} __packed;
+
+struct htc_record_hdr {
+ u8 rec_id;
+ u8 len;
+} __packed;
+
+struct htc_credit_report {
+ u8 eid;
+ u8 credits;
+} __packed;
+
+/*
+ * NOTE: The lk_ahd array is guarded by a pre_valid
+ * and Post Valid guard bytes. The pre_valid bytes must
+ * equal the inverse of the post_valid byte.
+ */
+struct htc_lookahead_report {
+ u8 pre_valid;
+ u8 lk_ahd[4];
+ u8 post_valid;
+} __packed;
+
+struct htc_bundle_lkahd_rpt {
+ u8 lk_ahd[4];
+} __packed;
+
+/* Current service IDs */
+
+enum htc_service_grp_ids {
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_TEST_GROUP = 254,
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+/* ------ endpoint IDS ------ */
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT_0 = 0,
+ ENDPOINT_1 = 1,
+ ENDPOINT_2 = 2,
+ ENDPOINT_3,
+ ENDPOINT_4,
+ ENDPOINT_5,
+ ENDPOINT_6,
+ ENDPOINT_7,
+ ENDPOINT_8,
+ ENDPOINT_MAX,
+};
+
+struct htc_tx_packet_info {
+ u16 tag;
+ int cred_used;
+ u8 flags;
+ int seqno;
+};
+
+struct htc_rx_packet_info {
+ u32 exp_hdr;
+ u32 rx_flags;
+ u32 indicat_flags;
+};
+
+struct htc_target;
+
+/* wrapper around endpoint-specific packets */
+struct htc_packet {
+ struct list_head list;
+
+ /* caller's per packet specific context */
+ void *pkt_cntxt;
+
+ /*
+ * the true buffer start , the caller can store the real
+ * buffer start here. In receive callbacks, the HTC layer
+ * sets buf to the start of the payload past the header.
+ * This field allows the caller to reset buf when it recycles
+ * receive packets back to HTC.
+ */
+ u8 *buf_start;
+
+ /*
+ * Pointer to the start of the buffer. In the transmit
+ * direction this points to the start of the payload. In the
+ * receive direction, however, the buffer when queued up
+ * points to the start of the HTC header but when returned
+ * to the caller points to the start of the payload
+ */
+ u8 *buf;
+ u32 buf_len;
+
+ /* actual length of payload */
+ u32 act_len;
+
+ /* endpoint that this packet was sent/recv'd from */
+ enum htc_endpoint_id endpoint;
+
+ /* completion status */
+
+ int status;
+ union {
+ struct htc_tx_packet_info tx;
+ struct htc_rx_packet_info rx;
+ } info;
+
+ void (*completion) (struct htc_target *, struct htc_packet *);
+ struct htc_target *context;
+
+ /*
+ * optimization for network-oriented data, the HTC packet
+ * can pass the network buffer corresponding to the HTC packet
+ * lower layers may optimized the transfer knowing this is
+ * a network buffer
+ */
+ struct sk_buff *skb;
+};
+
+enum htc_send_full_action {
+ HTC_SEND_FULL_KEEP = 0,
+ HTC_SEND_FULL_DROP = 1,
+};
+
+struct htc_ep_callbacks {
+ void (*tx_complete) (struct htc_target *, struct htc_packet *);
+ void (*rx) (struct htc_target *, struct htc_packet *);
+ void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
+ enum htc_send_full_action (*tx_full) (struct htc_target *,
+ struct htc_packet *);
+ struct htc_packet *(*rx_allocthresh) (struct htc_target *,
+ enum htc_endpoint_id, int);
+ void (*tx_comp_multi) (struct htc_target *, struct list_head *);
+ int rx_alloc_thresh;
+ int rx_refill_thresh;
+};
+
+/* service connection information */
+struct htc_service_connect_req {
+ u16 svc_id;
+ u16 conn_flags;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ u32 flags;
+ unsigned int max_rxmsg_sz;
+};
+
+/* service connection response information */
+struct htc_service_connect_resp {
+ u8 buf_len;
+ u8 act_len;
+ enum htc_endpoint_id endpoint;
+ unsigned int len_max;
+ u8 resp_code;
+};
+
+/* endpoint distributionstructure */
+struct htc_endpoint_credit_dist {
+ struct list_head list;
+
+ /* Service ID (set by HTC) */
+ u16 svc_id;
+
+ /* endpoint for this distributionstruct (set by HTC) */
+ enum htc_endpoint_id endpoint;
+
+ u32 dist_flags;
+
+ /*
+ * credits for normal operation, anything above this
+ * indicates the endpoint is over-subscribed.
+ */
+ int cred_norm;
+
+ /* floor for credit distribution */
+ int cred_min;
+
+ int cred_assngd;
+
+ /* current credits available */
+ int credits;
+
+ /*
+ * pending credits to distribute on this endpoint, this
+ * is set by HTC when credit reports arrive. The credit
+ * distribution functions sets this to zero when it distributes
+ * the credits.
+ */
+ int cred_to_dist;
+
+ /*
+ * the number of credits that the current pending TX packet needs
+ * to transmit. This is set by HTC when endpoint needs credits in
+ * order to transmit.
+ */
+ int seek_cred;
+
+ /* size in bytes of each credit */
+ int cred_sz;
+
+ /* credits required for a maximum sized messages */
+ int cred_per_msg;
+
+ /* reserved for HTC use */
+ struct htc_endpoint *htc_ep;
+
+ /*
+ * current depth of TX queue , i.e. messages waiting for credits
+ * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
+ * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
+ * that has non-zero credits to recover.
+ */
+ int txq_depth;
+};
+
+/*
+ * credit distibution code that is passed into the distrbution function,
+ * there are mandatory and optional codes that must be handled
+ */
+enum htc_credit_dist_reason {
+ HTC_CREDIT_DIST_SEND_COMPLETE = 0,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
+ HTC_CREDIT_DIST_SEEK_CREDITS,
+};
+
+struct ath6kl_htc_credit_info {
+ int total_avail_credits;
+ int cur_free_credits;
+
+ /* list of lowest priority endpoints */
+ struct list_head lowestpri_ep_dist;
+};
+
+/* endpoint statistics */
+struct htc_endpoint_stats {
+ /*
+ * number of times the host set the credit-low flag in a send
+ * message on this endpoint
+ */
+ u32 cred_low_indicate;
+
+ u32 tx_issued;
+ u32 tx_pkt_bundled;
+ u32 tx_bundles;
+ u32 tx_dropped;
+
+ /* running count of total credit reports received for this endpoint */
+ u32 tx_cred_rpt;
+
+ /* credit reports received from this endpoint's RX packets */
+ u32 cred_rpt_from_rx;
+
+ /* credit reports received from RX packets of other endpoints */
+ u32 cred_rpt_from_other;
+
+ /* credit reports received from endpoint 0 RX packets */
+ u32 cred_rpt_ep0;
+
+ /* count of credits received via Rx packets on this endpoint */
+ u32 cred_from_rx;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_other;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_ep0;
+
+ /* count of consummed credits */
+ u32 cred_cosumd;
+
+ /* count of credits returned */
+ u32 cred_retnd;
+
+ u32 rx_pkts;
+
+ /* count of lookahead records found in Rx msg */
+ u32 rx_lkahds;
+
+ /* count of recv packets received in a bundle */
+ u32 rx_bundl;
+
+ /* count of number of bundled lookaheads */
+ u32 rx_bundle_lkahd;
+
+ /* count of the number of bundle indications from the HTC header */
+ u32 rx_bundle_from_hdr;
+
+ /* the number of times the recv allocation threshold was hit */
+ u32 rx_alloc_thresh_hit;
+
+ /* total number of bytes */
+ u32 rxalloc_thresh_byte;
+};
+
+struct htc_endpoint {
+ enum htc_endpoint_id eid;
+ u16 svc_id;
+ struct list_head txq;
+ struct list_head rx_bufq;
+ struct htc_endpoint_credit_dist cred_dist;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ int len_max;
+ int tx_proc_cnt;
+ int rx_proc_cnt;
+ struct htc_target *target;
+ u8 seqno;
+ u32 conn_flags;
+ struct htc_endpoint_stats ep_st;
+ u16 tx_drop_packet_threshold;
+
+ struct {
+ u8 pipeid_ul;
+ u8 pipeid_dl;
+ struct list_head tx_lookup_queue;
+ bool tx_credit_flow_enabled;
+ } pipe;
+};
+
+struct htc_control_buffer {
+ struct htc_packet packet;
+ u8 *buf;
+};
+
+struct htc_pipe_txcredit_alloc {
+ u16 service_id;
+ u8 credit_alloc;
+};
+
+enum htc_send_queue_result {
+ HTC_SEND_QUEUE_OK = 0, /* packet was queued */
+ HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
+};
+
+struct ath6kl_htc_ops {
+ void* (*create)(struct ath6kl *ar);
+ int (*wait_target)(struct htc_target *target);
+ int (*start)(struct htc_target *target);
+ int (*conn_service)(struct htc_target *target,
+ struct htc_service_connect_req *req,
+ struct htc_service_connect_resp *resp);
+ int (*tx)(struct htc_target *target, struct htc_packet *packet);
+ void (*stop)(struct htc_target *target);
+ void (*cleanup)(struct htc_target *target);
+ void (*flush_txep)(struct htc_target *target,
+ enum htc_endpoint_id endpoint, u16 tag);
+ void (*flush_rx_buf)(struct htc_target *target);
+ void (*activity_changed)(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ bool active);
+ int (*get_rxbuf_num)(struct htc_target *target,
+ enum htc_endpoint_id endpoint);
+ int (*add_rxbuf_multiple)(struct htc_target *target,
+ struct list_head *pktq);
+ int (*credit_setup)(struct htc_target *target,
+ struct ath6kl_htc_credit_info *cred_info);
+ int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
+ int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
+};
+
+struct ath6kl_device;
+
+/* our HTC target state */
+struct htc_target {
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+ /* contains struct htc_endpoint_credit_dist */
+ struct list_head cred_dist_list;
+
+ struct list_head free_ctrl_txbuf;
+ struct list_head free_ctrl_rxbuf;
+ struct ath6kl_htc_credit_info *credit_info;
+ int tgt_creds;
+ unsigned int tgt_cred_sz;
+
+ /* protects free_ctrl_txbuf and free_ctrl_rxbuf */
+ spinlock_t htc_lock;
+
+ /* FIXME: does this protext rx_bufq and endpoint structures or what? */
+ spinlock_t rx_lock;
+
+ /* protects endpoint->txq */
+ spinlock_t tx_lock;
+
+ struct ath6kl_device *dev;
+ u32 htc_flags;
+ u32 rx_st_flags;
+ enum htc_endpoint_id ep_waiting;
+ u8 htc_tgt_ver;
+
+ /* max messages per bundle for HTC */
+ int msg_per_bndl_max;
+
+ u32 tx_bndl_mask;
+ int rx_bndl_enable;
+ int max_rx_bndl_sz;
+ int max_tx_bndl_sz;
+
+ u32 block_sz;
+ u32 block_mask;
+
+ int max_scat_entries;
+ int max_xfer_szper_scatreq;
+
+ int chk_irq_status_cnt;
+
+ /* counts the number of Tx without bundling continously per AC */
+ u32 ac_tx_count[WMM_NUM_AC];
+
+ struct {
+ struct htc_packet *htc_packet_pool;
+ u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
+ int ctrl_response_len;
+ bool ctrl_response_valid;
+ struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
+ } pipe;
+};
+
+int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
+ u32 msg_look_ahead, int *n_pkts);
+
+static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned int len,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->act_len = len;
+ packet->endpoint = eid;
+ packet->info.tx.tag = tag;
+}
+
+static inline void htc_rxpkt_reset(struct htc_packet *packet)
+{
+ packet->buf = packet->buf_start;
+ packet->act_len = 0;
+}
+
+static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned long len,
+ enum htc_endpoint_id eid)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->buf_start = buf;
+ packet->buf_len = len;
+ packet->endpoint = eid;
+}
+
+static inline int get_queue_depth(struct list_head *queue)
+{
+ struct list_head *tmp_list;
+ int depth = 0;
+
+ list_for_each(tmp_list, queue)
+ depth++;
+
+ return depth;
+}
+
+void ath6kl_htc_pipe_attach(struct ath6kl *ar);
+void ath6kl_htc_mbox_attach(struct ath6kl *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
new file mode 100644
index 000000000..e481f14b9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -0,0 +1,2935 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "trace.h"
+
+#include <asm/unaligned.h>
+
+#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+
+static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
+static void ath6kl_htc_mbox_stop(struct htc_target *target);
+static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pkt_queue);
+static void ath6kl_htc_set_credit_dist(struct htc_target *target,
+ struct ath6kl_htc_credit_info *cred_info,
+ u16 svc_pri_order[], int len);
+
+/* threshold to re-enable Tx bundling for an AC*/
+#define TX_RESUME_BUNDLE_THRESHOLD 1500
+
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int credits)
+{
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+ ep_dist->endpoint, credits);
+
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4) {
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_credit_deposit(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_credit_deposit(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ /*
+ * For ath6kl_credit_seek function,
+ * it use list_for_each_entry_reverse to walk around the whole ep list.
+ * Therefore assign this lowestpri_ep_dist after walk around the ep_list
+ */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ } else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+ cur_ep_dist->endpoint,
+ cur_ep_dist->svc_id,
+ cur_ep_dist->credits,
+ cur_ep_dist->cred_per_msg,
+ cur_ep_dist->cred_norm,
+ cur_ep_dist->cred_min);
+ }
+}
+
+/* initialize and setup credit distribution */
+static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
+ struct ath6kl_htc_credit_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+ ep_dist->endpoint, limit);
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_list;
+
+ list_for_each_entry(cur_list, epdist_list, list) {
+ if (cur_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_list->cred_to_dist > 0) {
+ cur_list->credits += cur_list->cred_to_dist;
+ cur_list->cred_to_dist = 0;
+
+ if (cur_list->credits > cur_list->cred_assngd)
+ ath6kl_credit_reduce(cred_info,
+ cur_list,
+ cur_list->cred_assngd);
+
+ if (cur_list->credits > cur_list->cred_norm)
+ ath6kl_credit_reduce(cred_info, cur_list,
+ cur_list->cred_norm);
+
+ if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_list->txq_depth == 0)
+ ath6kl_credit_reduce(cred_info,
+ cur_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6kl_credit_reduce(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6kl_credit_reduce(info, curdist_list, 0);
+ else
+ ath6kl_credit_reduce(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6kl_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6kl_credit_redistribute(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
+static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
+{
+ u8 *align_addr;
+
+ if (!IS_ALIGNED((unsigned long) *buf, 4)) {
+ align_addr = PTR_ALIGN(*buf - 4, 4);
+ memmove(align_addr, *buf, len);
+ *buf = align_addr;
+ }
+}
+
+static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
+ int ctrl0, int ctrl1)
+{
+ struct htc_frame_hdr *hdr;
+
+ packet->buf -= HTC_HDR_LENGTH;
+ hdr = (struct htc_frame_hdr *)packet->buf;
+
+ /* Endianess? */
+ put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ hdr->flags = flags;
+ hdr->eid = packet->endpoint;
+ hdr->ctrl[0] = ctrl0;
+ hdr->ctrl[1] = ctrl1;
+}
+
+static void htc_reclaim_txctrl_buf(struct htc_target *target,
+ struct htc_packet *pkt)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static struct htc_packet *htc_get_control_buf(struct htc_target *target,
+ bool tx)
+{
+ struct htc_packet *packet = NULL;
+ struct list_head *buf_list;
+
+ buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
+
+ spin_lock_bh(&target->htc_lock);
+
+ if (list_empty(buf_list)) {
+ spin_unlock_bh(&target->htc_lock);
+ return NULL;
+ }
+
+ packet = list_first_entry(buf_list, struct htc_packet, list);
+ list_del(&packet->list);
+ spin_unlock_bh(&target->htc_lock);
+
+ if (tx)
+ packet->buf = packet->buf_start + HTC_HDR_LENGTH;
+
+ return packet;
+}
+
+static void htc_tx_comp_update(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ packet->completion = NULL;
+ packet->buf += HTC_HDR_LENGTH;
+
+ if (!packet->status)
+ return;
+
+ ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
+ packet->status, packet->endpoint, packet->act_len,
+ packet->info.tx.cred_used);
+
+ /* on failure to submit, reclaim credits for this packet */
+ spin_lock_bh(&target->tx_lock);
+ endpoint->cred_dist.cred_to_dist +=
+ packet->info.tx.cred_used;
+ endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
+
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static void htc_tx_complete(struct htc_endpoint *endpoint,
+ struct list_head *txq)
+{
+ if (list_empty(txq))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx complete ep %d pkts %d\n",
+ endpoint->eid, get_queue_depth(txq));
+
+ ath6kl_tx_complete(endpoint->target, txq);
+}
+
+static void htc_tx_comp_handler(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
+ struct list_head container;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+ packet->info.tx.seqno);
+
+ htc_tx_comp_update(target, endpoint, packet);
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ /* do completion */
+ htc_tx_complete(endpoint, &container);
+}
+
+static void htc_async_tx_scat_complete(struct htc_target *target,
+ struct hif_scatter_req *scat_req)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet;
+ struct list_head tx_compq;
+ int i;
+
+ INIT_LIST_HEAD(&tx_compq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scat complete len %d entries %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (scat_req->status)
+ ath6kl_err("send scatter req failed: %d\n", scat_req->status);
+
+ packet = scat_req->scat_list[0].packet;
+ endpoint = &target->endpoint[packet->endpoint];
+
+ /* walk through the scatter list and process */
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ if (!packet) {
+ WARN_ON(1);
+ return;
+ }
+
+ packet->status = scat_req->status;
+ htc_tx_comp_update(target, endpoint, packet);
+ list_add_tail(&packet->list, &tx_compq);
+ }
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+ /* complete all packets */
+ htc_tx_complete(endpoint, &tx_compq);
+}
+
+static int ath6kl_htc_tx_issue(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ int status;
+ bool sync = false;
+ u32 padded_len, send_len;
+
+ if (!packet->completion)
+ sync = true;
+
+ send_len = packet->act_len + HTC_HDR_LENGTH;
+
+ padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+ send_len, packet->info.tx.seqno, padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
+
+ if (sync) {
+ status = hif_read_write_sync(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_SYNC_BLOCK_INC);
+
+ packet->status = status;
+ packet->buf += HTC_HDR_LENGTH;
+ } else
+ status = hif_write_async(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_ASYNC_BLOCK_INC, packet);
+
+ trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
+
+ return status;
+}
+
+static int htc_check_credits(struct htc_target *target,
+ struct htc_endpoint *ep, u8 *flags,
+ enum htc_endpoint_id eid, unsigned int len,
+ int *req_cred)
+{
+ *req_cred = (len > target->tgt_cred_sz) ?
+ DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
+ *req_cred, ep->cred_dist.credits);
+
+ if (ep->cred_dist.credits < *req_cred) {
+ if (eid == ENDPOINT_0)
+ return -EINVAL;
+
+ /* Seek more credits */
+ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
+
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
+
+ ep->cred_dist.seek_cred = 0;
+
+ if (ep->cred_dist.credits < *req_cred) {
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit not found for ep %d\n",
+ eid);
+ return -EINVAL;
+ }
+ }
+
+ ep->cred_dist.credits -= *req_cred;
+ ep->ep_st.cred_cosumd += *req_cred;
+
+ /* When we are getting low on credits, ask for more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ ep->cred_dist.seek_cred =
+ ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
+
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
+
+ /* see if we were successful in getting more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ /* tell the target we need credits ASAP! */
+ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ ep->ep_st.cred_low_indicate += 1;
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit we need credits asap\n");
+ }
+ }
+
+ return 0;
+}
+
+static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int req_cred;
+ u8 flags;
+ struct htc_packet *packet;
+ unsigned int len;
+
+ while (true) {
+ flags = 0;
+
+ if (list_empty(&endpoint->txq))
+ break;
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx got packet 0x%p queue depth %d\n",
+ packet, get_queue_depth(&endpoint->txq));
+
+ len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ if (htc_check_credits(target, endpoint, &flags,
+ packet->endpoint, len, &req_cred))
+ break;
+
+ /* now we can fully move onto caller's queue */
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+ list_move_tail(&packet->list, queue);
+
+ /* save the number of credits this packet consumed */
+ packet->info.tx.cred_used = req_cred;
+
+ /* all TX packets are handled asynchronously */
+ packet->completion = htc_tx_comp_handler;
+ packet->context = target;
+ endpoint->ep_st.tx_issued += 1;
+
+ /* save send flags */
+ packet->info.tx.flags = flags;
+ packet->info.tx.seqno = endpoint->seqno;
+ endpoint->seqno++;
+ }
+}
+
+/* See if the padded tx length falls on a credit boundary */
+static int htc_get_credit_padding(unsigned int cred_sz, int *len,
+ struct htc_endpoint *ep)
+{
+ int rem_cred, cred_pad;
+
+ rem_cred = *len % cred_sz;
+
+ /* No padding needed */
+ if (!rem_cred)
+ return 0;
+
+ if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
+ return -1;
+
+ /*
+ * The transfer consumes a "partial" credit, this
+ * packet cannot be bundled unless we add
+ * additional "dummy" padding (max 255 bytes) to
+ * consume the entire credit.
+ */
+ cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
+
+ if ((cred_pad > 0) && (cred_pad <= 255))
+ *len += cred_pad;
+ else
+ /* The amount of padding is too large, send as non-bundled */
+ return -1;
+
+ return cred_pad;
+}
+
+static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct hif_scatter_req *scat_req,
+ int n_scat,
+ struct list_head *queue)
+{
+ struct htc_packet *packet;
+ int i, len, rem_scat, cred_pad;
+ int status = 0;
+ u8 flags;
+
+ rem_scat = target->max_tx_bndl_sz;
+
+ for (i = 0; i < n_scat; i++) {
+ scat_req->scat_list[i].packet = NULL;
+
+ if (list_empty(queue))
+ break;
+
+ packet = list_first_entry(queue, struct htc_packet, list);
+ len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
+ &len, endpoint);
+ if (cred_pad < 0 || rem_scat < len) {
+ status = -ENOSPC;
+ break;
+ }
+
+ rem_scat -= len;
+ /* now remove it from the queue */
+ list_del(&packet->list);
+
+ scat_req->scat_list[i].packet = packet;
+ /* prepare packet and flag message as part of a send bundle */
+ flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
+ ath6kl_htc_tx_prep_pkt(packet, flags,
+ cred_pad, packet->info.tx.seqno);
+ /* Make sure the buffer is 4-byte aligned */
+ ath6kl_htc_tx_buf_align(&packet->buf,
+ packet->act_len + HTC_HDR_LENGTH);
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = len;
+
+ scat_req->len += len;
+ scat_req->scat_entries++;
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+ i, packet, packet->info.tx.seqno, len, rem_scat);
+ }
+
+ /* Roll back scatter setup in case of any failure */
+ if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
+ for (i = scat_req->scat_entries - 1; i >= 0; i--) {
+ packet = scat_req->scat_list[i].packet;
+ if (packet) {
+ packet->buf += HTC_HDR_LENGTH;
+ list_add(&packet->list, queue);
+ }
+ }
+ return -EAGAIN;
+ }
+
+ return status;
+}
+
+/*
+ * Drain a queue and send as bundles this function may return without fully
+ * draining the queue when
+ *
+ * 1. scatter resources are exhausted
+ * 2. a message that will consume a partial credit will stop the
+ * bundling process early
+ * 3. we drop below the minimum number of messages for a bundle
+ */
+static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
+ struct list_head *queue,
+ int *sent_bundle, int *n_bundle_pkts)
+{
+ struct htc_target *target = endpoint->target;
+ struct hif_scatter_req *scat_req = NULL;
+ int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
+ struct htc_packet *packet;
+ int status;
+ u32 txb_mask;
+ u8 ac = WMM_NUM_AC;
+
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
+ (WMI_CONTROL_SVC != endpoint->svc_id))
+ ac = target->dev->ar->ep2ac_map[endpoint->eid];
+
+ while (true) {
+ status = 0;
+ n_scat = get_queue_depth(queue);
+ n_scat = min(n_scat, target->msg_per_bndl_max);
+
+ if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
+ /* not enough to bundle */
+ break;
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (!scat_req) {
+ /* no scatter resources */
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx no more scatter resources\n");
+ break;
+ }
+
+ if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
+ if (WMM_AC_BE == ac)
+ /*
+ * BE, BK have priorities and bit
+ * positions reversed
+ */
+ txb_mask = (1 << WMM_AC_BK);
+ else
+ /*
+ * any AC with priority lower than
+ * itself
+ */
+ txb_mask = ((1 << ac) - 1);
+
+ /*
+ * when the scatter request resources drop below a
+ * certain threshold, disable Tx bundling for all
+ * AC's with priority lower than the current requesting
+ * AC. Otherwise re-enable Tx bundling for them
+ */
+ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
+ target->tx_bndl_mask &= ~txb_mask;
+ else
+ target->tx_bndl_mask |= txb_mask;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
+ n_scat);
+
+ scat_req->len = 0;
+ scat_req->scat_entries = 0;
+
+ status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
+ scat_req, n_scat,
+ queue);
+ if (status == -EAGAIN) {
+ hif_scatter_req_add(target->dev->ar, scat_req);
+ break;
+ }
+
+ /* send path is always asynchronous */
+ scat_req->complete = htc_async_tx_scat_complete;
+ n_sent_bundle++;
+ tot_pkts_bundle += scat_req->scat_entries;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scatter bytes %d entries %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ trace_ath6kl_htc_tx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+ }
+
+ ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
+
+ if (status)
+ break;
+ }
+
+ *sent_bundle = n_sent_bundle;
+ *n_bundle_pkts = tot_pkts_bundle;
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+ n_sent_bundle);
+
+ return;
+}
+
+static void ath6kl_htc_tx_from_queue(struct htc_target *target,
+ struct htc_endpoint *endpoint)
+{
+ struct list_head txq;
+ struct htc_packet *packet;
+ int bundle_sent;
+ int n_pkts_bundle;
+ u8 ac = WMM_NUM_AC;
+ int status;
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->tx_proc_cnt++;
+ if (endpoint->tx_proc_cnt > 1) {
+ endpoint->tx_proc_cnt--;
+ spin_unlock_bh(&target->tx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
+ return;
+ }
+
+ /*
+ * drain the endpoint TX queue for transmission as long
+ * as we have enough credits.
+ */
+ INIT_LIST_HEAD(&txq);
+
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
+ (WMI_CONTROL_SVC != endpoint->svc_id))
+ ac = target->dev->ar->ep2ac_map[endpoint->eid];
+
+ while (true) {
+ if (list_empty(&endpoint->txq))
+ break;
+
+ ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
+
+ if (list_empty(&txq))
+ break;
+
+ spin_unlock_bh(&target->tx_lock);
+
+ bundle_sent = 0;
+ n_pkts_bundle = 0;
+
+ while (true) {
+ /* try to send a bundle on each pass */
+ if ((target->tx_bndl_mask) &&
+ (get_queue_depth(&txq) >=
+ HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ int temp1 = 0, temp2 = 0;
+
+ /* check if bundling is enabled for an AC */
+ if (target->tx_bndl_mask & (1 << ac)) {
+ ath6kl_htc_tx_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
+ }
+ }
+
+ if (list_empty(&txq))
+ break;
+
+ packet = list_first_entry(&txq, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
+ 0, packet->info.tx.seqno);
+ status = ath6kl_htc_tx_issue(target, packet);
+
+ if (status) {
+ packet->status = status;
+ packet->completion(packet->context, packet);
+ }
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->ep_st.tx_bundles += bundle_sent;
+ endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
+
+ /*
+ * if an AC has bundling disabled and no tx bundling
+ * has occured continously for a certain number of TX,
+ * enable tx bundling for this AC
+ */
+ if (!bundle_sent) {
+ if (!(target->tx_bndl_mask & (1 << ac)) &&
+ (ac < WMM_NUM_AC)) {
+ if (++target->ac_tx_count[ac] >=
+ TX_RESUME_BUNDLE_THRESHOLD) {
+ target->ac_tx_count[ac] = 0;
+ target->tx_bndl_mask |= (1 << ac);
+ }
+ }
+ } else {
+ /* tx bundling will reset the counter */
+ if (ac < WMM_NUM_AC)
+ target->ac_tx_count[ac] = 0;
+ }
+ }
+
+ endpoint->tx_proc_cnt = 0;
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static bool ath6kl_htc_tx_try(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *tx_pkt)
+{
+ struct htc_ep_callbacks ep_cb;
+ int txq_depth;
+ bool overflow = false;
+
+ ep_cb = endpoint->ep_cb;
+
+ spin_lock_bh(&target->tx_lock);
+ txq_depth = get_queue_depth(&endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ if (txq_depth >= endpoint->max_txq_depth)
+ overflow = true;
+
+ if (overflow)
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx overflow ep %d depth %d max %d\n",
+ endpoint->eid, txq_depth,
+ endpoint->max_txq_depth);
+
+ if (overflow && ep_cb.tx_full) {
+ if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
+ HTC_SEND_FULL_DROP) {
+ endpoint->ep_st.tx_dropped += 1;
+ return false;
+ }
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ list_add_tail(&tx_pkt->list, &endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ ath6kl_htc_tx_from_queue(target, endpoint);
+
+ return true;
+}
+
+static void htc_chk_ep_txq(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_endpoint_credit_dist *cred_dist;
+
+ /*
+ * Run through the credit distribution list to see if there are
+ * packets queued. NOTE: no locks need to be taken since the
+ * distribution list is not dynamic (cannot be re-ordered) and we
+ * are not modifying any state.
+ */
+ list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
+ endpoint = cred_dist->htc_ep;
+
+ spin_lock_bh(&target->tx_lock);
+ if (!list_empty(&endpoint->txq)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc creds ep %d credits %d pkts %d\n",
+ cred_dist->endpoint,
+ endpoint->cred_dist.credits,
+ get_queue_depth(&endpoint->txq));
+ spin_unlock_bh(&target->tx_lock);
+ /*
+ * Try to start the stalled queue, this list is
+ * ordered by priority. If there are credits
+ * available the highest priority queue will get a
+ * chance to reclaim credits from lower priority
+ * ones.
+ */
+ ath6kl_htc_tx_from_queue(target, endpoint);
+ spin_lock_bh(&target->tx_lock);
+ }
+ spin_unlock_bh(&target->tx_lock);
+ }
+}
+
+static int htc_setup_tx_complete(struct htc_target *target)
+{
+ struct htc_packet *send_pkt = NULL;
+ int status;
+
+ send_pkt = htc_get_control_buf(target, true);
+
+ if (!send_pkt)
+ return -ENOMEM;
+
+ if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
+ struct htc_setup_comp_ext_msg *setup_comp_ext;
+ u32 flags = 0;
+
+ setup_comp_ext =
+ (struct htc_setup_comp_ext_msg *)send_pkt->buf;
+ memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
+ setup_comp_ext->msg_id =
+ cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (target->msg_per_bndl_max > 0) {
+ /* Indicate HTC bundling to the target */
+ flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
+ setup_comp_ext->msg_per_rxbndl =
+ target->msg_per_bndl_max;
+ }
+
+ memcpy(&setup_comp_ext->flags, &flags,
+ sizeof(setup_comp_ext->flags));
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ } else {
+ struct htc_setup_comp_msg *setup_comp;
+ setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
+ memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
+ setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
+ sizeof(struct htc_setup_comp_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ }
+
+ /* we want synchronous operation */
+ send_pkt->completion = NULL;
+ ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
+ status = ath6kl_htc_tx_issue(target, send_pkt);
+
+ if (send_pkt != NULL)
+ htc_reclaim_txctrl_buf(target, send_pkt);
+
+ return status;
+}
+
+static void ath6kl_htc_set_credit_dist(struct htc_target *target,
+ struct ath6kl_htc_credit_info *credit_info,
+ u16 srvc_pri_order[], int list_len)
+{
+ struct htc_endpoint *endpoint;
+ int i, ep;
+
+ target->credit_info = credit_info;
+
+ list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
+ &target->cred_dist_list);
+
+ for (i = 0; i < list_len; i++) {
+ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
+ endpoint = &target->endpoint[ep];
+ if (endpoint->svc_id == srvc_pri_order[i]) {
+ list_add_tail(&endpoint->cred_dist.list,
+ &target->cred_dist_list);
+ break;
+ }
+ }
+ if (ep >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return;
+ }
+ }
+}
+
+static int ath6kl_htc_mbox_tx(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint;
+ struct list_head queue;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx ep id %d buf 0x%p len %d\n",
+ packet->endpoint, packet->buf, packet->act_len);
+
+ if (packet->endpoint >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ endpoint = &target->endpoint[packet->endpoint];
+
+ if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
+ packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
+ -ECANCELED : -ENOSPC;
+ INIT_LIST_HEAD(&queue);
+ list_add(&packet->list, &queue);
+ htc_tx_complete(endpoint, &queue);
+ }
+
+ return 0;
+}
+
+/* flush endpoint TX queue */
+static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct list_head discard_q, container;
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+
+ if (!endpoint->svc_id) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* initialize the discard queue */
+ INIT_LIST_HEAD(&discard_q);
+
+ spin_lock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
+ if ((tag == HTC_TX_PACKET_TAG_ALL) ||
+ (tag == packet->info.tx.tag))
+ list_move_tail(&packet->list, &discard_q);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
+ packet, packet->act_len,
+ packet->endpoint, packet->info.tx.tag);
+
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ htc_tx_complete(endpoint, &container);
+ }
+}
+
+static void ath6kl_htc_flush_txep_all(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ dump_cred_dist_stats(target);
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (endpoint->svc_id == 0)
+ /* not in use.. */
+ continue;
+ ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
+ }
+}
+
+static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
+ enum htc_endpoint_id eid,
+ bool active)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+ bool dist = false;
+
+ if (endpoint->svc_id == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ if (active) {
+ if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
+ endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
+ dist = true;
+ }
+ } else {
+ if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
+ endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
+ dist = true;
+ }
+ }
+
+ if (dist) {
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx activity ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
+
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (dist && !active)
+ htc_chk_ep_txq(target);
+}
+
+/* HTC Rx */
+
+static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
+ int n_look_ahds)
+{
+ endpoint->ep_st.rx_pkts++;
+ if (n_look_ahds == 1)
+ endpoint->ep_st.rx_lkahds++;
+ else if (n_look_ahds > 1)
+ endpoint->ep_st.rx_bundle_lkahd++;
+}
+
+static inline bool htc_valid_rx_frame_len(struct htc_target *target,
+ enum htc_endpoint_id eid, int len)
+{
+ return (eid == target->dev->ar->ctrl_ep) ?
+ len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
+}
+
+static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
+{
+ struct list_head queue;
+
+ INIT_LIST_HEAD(&queue);
+ list_add_tail(&packet->list, &queue);
+ return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
+}
+
+static void htc_reclaim_rxbuf(struct htc_target *target,
+ struct htc_packet *packet,
+ struct htc_endpoint *ep)
+{
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
+ htc_rxpkt_reset(packet);
+ packet->status = -ECANCELED;
+ ep->ep_cb.rx(ep->target, packet);
+ } else {
+ htc_rxpkt_reset(packet);
+ htc_add_rxbuf((void *)(target), packet);
+ }
+}
+
+static void reclaim_rx_ctrl_buf(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static int ath6kl_htc_rx_packet(struct htc_target *target,
+ struct htc_packet *packet,
+ u32 rx_len)
+{
+ struct ath6kl_device *dev = target->dev;
+ u32 padded_len;
+ int status;
+
+ padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
+
+ if (padded_len > packet->buf_len) {
+ ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
+ padded_len, rx_len, packet->buf_len);
+ return -ENOMEM;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
+ packet, packet->info.rx.exp_hdr,
+ padded_len, dev->ar->mbox_info.htc_addr);
+
+ status = hif_read_write_sync(dev->ar,
+ dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_RD_SYNC_BLOCK_FIX);
+
+ packet->status = status;
+
+ return status;
+}
+
+/*
+ * optimization for recv packets, we can indicate a
+ * "hint" that there are more single-packets to fetch
+ * on this endpoint.
+ */
+static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
+
+ if (htc_hdr->eid == packet->endpoint) {
+ if (!list_empty(&endpoint->rx_bufq))
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+ }
+}
+
+static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
+{
+ struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
+
+ if (ep_cb.rx_refill_thresh > 0) {
+ spin_lock_bh(&endpoint->target->rx_lock);
+ if (get_queue_depth(&endpoint->rx_bufq)
+ < ep_cb.rx_refill_thresh) {
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ ep_cb.rx_refill(endpoint->target, endpoint->eid);
+ return;
+ }
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ }
+}
+
+/* This function is called with rx_lock held */
+static int ath6kl_htc_rx_setup(struct htc_target *target,
+ struct htc_endpoint *ep,
+ u32 *lk_ahds, struct list_head *queue, int n_msg)
+{
+ struct htc_packet *packet;
+ /* FIXME: type of lk_ahds can't be right */
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
+ struct htc_ep_callbacks ep_cb;
+ int status = 0, j, full_len;
+ bool no_recycle;
+
+ full_len = CALC_TXRX_PADDED_LEN(target,
+ le16_to_cpu(htc_hdr->payld_len) +
+ sizeof(*htc_hdr));
+
+ if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
+ ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
+ htc_hdr->eid, htc_hdr->flags,
+ le16_to_cpu(htc_hdr->payld_len));
+ return -EINVAL;
+ }
+
+ ep_cb = ep->ep_cb;
+ for (j = 0; j < n_msg; j++) {
+ /*
+ * Reset flag, any packets allocated using the
+ * rx_alloc() API cannot be recycled on
+ * cleanup,they must be explicitly returned.
+ */
+ no_recycle = false;
+
+ if (ep_cb.rx_allocthresh &&
+ (full_len > ep_cb.rx_alloc_thresh)) {
+ ep->ep_st.rx_alloc_thresh_hit += 1;
+ ep->ep_st.rxalloc_thresh_byte +=
+ le16_to_cpu(htc_hdr->payld_len);
+
+ spin_unlock_bh(&target->rx_lock);
+ no_recycle = true;
+
+ packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
+ full_len);
+ spin_lock_bh(&target->rx_lock);
+ } else {
+ /* refill handler is being used */
+ if (list_empty(&ep->rx_bufq)) {
+ if (ep_cb.rx_refill) {
+ spin_unlock_bh(&target->rx_lock);
+ ep_cb.rx_refill(ep->target, ep->eid);
+ spin_lock_bh(&target->rx_lock);
+ }
+ }
+
+ if (list_empty(&ep->rx_bufq)) {
+ packet = NULL;
+ } else {
+ packet = list_first_entry(&ep->rx_bufq,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ }
+ }
+
+ if (!packet) {
+ target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ep->eid;
+ return -ENOSPC;
+ }
+
+ /* clear flags */
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.indicat_flags = 0;
+ packet->status = 0;
+
+ if (no_recycle)
+ /*
+ * flag that these packets cannot be
+ * recycled, they have to be returned to
+ * the user
+ */
+ packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
+
+ /* Caller needs to free this upon any failure */
+ list_add_tail(&packet->list, queue);
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ status = -ECANCELED;
+ break;
+ }
+
+ if (j) {
+ packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
+ packet->info.rx.exp_hdr = 0xFFFFFFFF;
+ } else
+ /* set expected look ahead */
+ packet->info.rx.exp_hdr = *lk_ahds;
+
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
+ HTC_HDR_LENGTH;
+ }
+
+ return status;
+}
+
+static int ath6kl_htc_rx_alloc(struct htc_target *target,
+ u32 lk_ahds[], int msg,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int status = 0;
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_frame_hdr *htc_hdr;
+ int i, n_msg;
+
+ spin_lock_bh(&target->rx_lock);
+
+ for (i = 0; i < msg; i++) {
+ htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
+
+ if (htc_hdr->eid >= ENDPOINT_MAX) {
+ ath6kl_err("invalid ep in look-ahead: %d\n",
+ htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->eid != endpoint->eid) {
+ ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
+ htc_hdr->eid, endpoint->eid, i);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
+ ath6kl_err("payload len %d exceeds max htc : %d !\n",
+ htc_hdr->payld_len,
+ (u32) HTC_MAX_PAYLOAD_LENGTH);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (endpoint->svc_id == 0) {
+ ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
+ /*
+ * HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
+ HTC_FLG_RX_BNDL_CNT_S;
+
+ /* the count doesn't include the starter frame */
+ n_msg++;
+ if (n_msg > target->msg_per_bndl_max) {
+ status = -ENOMEM;
+ break;
+ }
+
+ endpoint->ep_st.rx_bundle_from_hdr += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle pkts %d\n",
+ n_msg);
+ } else
+ /* HTC header only indicates 1 message to fetch */
+ n_msg = 1;
+
+ /* Setup packet buffers for each message */
+ status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
+ queue, n_msg);
+
+ /*
+ * This is due to unavailabilty of buffers to rx entire data.
+ * Return no error so that free buffers from queue can be used
+ * to receive partial data.
+ */
+ if (status == -ENOSPC) {
+ spin_unlock_bh(&target->rx_lock);
+ return 0;
+ }
+
+ if (status)
+ break;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (status) {
+ list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+ }
+
+ return status;
+}
+
+static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
+{
+ if (packets->endpoint != ENDPOINT_0) {
+ WARN_ON(1);
+ return;
+ }
+
+ if (packets->status == -ECANCELED) {
+ reclaim_rx_ctrl_buf(context, packets);
+ return;
+ }
+
+ if (packets->act_len > 0) {
+ ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
+ packets->act_len + HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx unexpected endpoint 0 message", "",
+ packets->buf - HTC_HDR_LENGTH,
+ packets->act_len + HTC_HDR_LENGTH);
+ }
+
+ htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
+}
+
+static void htc_proc_cred_rpt(struct htc_target *target,
+ struct htc_credit_report *rpt,
+ int n_entries,
+ enum htc_endpoint_id from_ep)
+{
+ struct htc_endpoint *endpoint;
+ int tot_credits = 0, i;
+ bool dist = false;
+
+ spin_lock_bh(&target->tx_lock);
+
+ for (i = 0; i < n_entries; i++, rpt++) {
+ if (rpt->eid >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ spin_unlock_bh(&target->tx_lock);
+ return;
+ }
+
+ endpoint = &target->endpoint[rpt->eid];
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit report ep %d credits %d\n",
+ rpt->eid, rpt->credits);
+
+ endpoint->ep_st.tx_cred_rpt += 1;
+ endpoint->ep_st.cred_retnd += rpt->credits;
+
+ if (from_ep == rpt->eid) {
+ /*
+ * This credit report arrived on the same endpoint
+ * indicating it arrived in an RX packet.
+ */
+ endpoint->ep_st.cred_from_rx += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_rx += 1;
+ } else if (from_ep == ENDPOINT_0) {
+ /* credit arrived on endpoint 0 as a NULL message */
+ endpoint->ep_st.cred_from_ep0 += rpt->credits;
+ endpoint->ep_st.cred_rpt_ep0 += 1;
+ } else {
+ endpoint->ep_st.cred_from_other += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_other += 1;
+ }
+
+ if (rpt->eid == ENDPOINT_0)
+ /* always give endpoint 0 credits back */
+ endpoint->cred_dist.credits += rpt->credits;
+ else {
+ endpoint->cred_dist.cred_to_dist += rpt->credits;
+ dist = true;
+ }
+
+ /*
+ * Refresh tx depth for distribution function that will
+ * recover these credits NOTE: this is only valid when
+ * there are credits to recover!
+ */
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ tot_credits += rpt->credits;
+ }
+
+ if (dist) {
+ /*
+ * This was a credit return based on a completed send
+ * operations note, this is done with the lock held
+ */
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (tot_credits)
+ htc_chk_ep_txq(target);
+}
+
+static int htc_parse_trailer(struct htc_target *target,
+ struct htc_record_hdr *record,
+ u8 *record_buf, u32 *next_lk_ahds,
+ enum htc_endpoint_id endpoint,
+ int *n_lk_ahds)
+{
+ struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
+ struct htc_lookahead_report *lk_ahd;
+ int len;
+
+ switch (record->rec_id) {
+ case HTC_RECORD_CREDITS:
+ len = record->len / sizeof(struct htc_credit_report);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ htc_proc_cred_rpt(target,
+ (struct htc_credit_report *) record_buf,
+ len, endpoint);
+ break;
+ case HTC_RECORD_LOOKAHEAD:
+ len = record->len / sizeof(*lk_ahd);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ lk_ahd = (struct htc_lookahead_report *) record_buf;
+ if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
+ next_lk_ahds) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
+ lk_ahd->pre_valid, lk_ahd->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx next look ahead",
+ "", next_lk_ahds, 4);
+
+ *n_lk_ahds = 1;
+ }
+ break;
+ case HTC_RECORD_LOOKAHEAD_BUNDLE:
+ len = record->len / sizeof(*bundle_lkahd_rpt);
+ if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (next_lk_ahds) {
+ int i;
+
+ bundle_lkahd_rpt =
+ (struct htc_bundle_lkahd_rpt *) record_buf;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
+ "", record_buf, record->len);
+
+ for (i = 0; i < len; i++) {
+ memcpy((u8 *)&next_lk_ahds[i],
+ bundle_lkahd_rpt->lk_ahd, 4);
+ bundle_lkahd_rpt++;
+ }
+
+ *n_lk_ahds = i;
+ }
+ break;
+ default:
+ ath6kl_err("unhandled record: id:%d len:%d\n",
+ record->rec_id, record->len);
+ break;
+ }
+
+ return 0;
+}
+
+static int htc_proc_trailer(struct htc_target *target,
+ u8 *buf, int len, u32 *next_lk_ahds,
+ int *n_lk_ahds, enum htc_endpoint_id endpoint)
+{
+ struct htc_record_hdr *record;
+ int orig_len;
+ int status;
+ u8 *record_buf;
+ u8 *orig_buf;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
+
+ orig_buf = buf;
+ orig_len = len;
+ status = 0;
+
+ while (len > 0) {
+ if (len < sizeof(struct htc_record_hdr)) {
+ status = -ENOMEM;
+ break;
+ }
+ /* these are byte aligned structs */
+ record = (struct htc_record_hdr *) buf;
+ len -= sizeof(struct htc_record_hdr);
+ buf += sizeof(struct htc_record_hdr);
+
+ if (record->len > len) {
+ ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
+ record->len, record->rec_id, len);
+ status = -ENOMEM;
+ break;
+ }
+ record_buf = buf;
+
+ status = htc_parse_trailer(target, record, record_buf,
+ next_lk_ahds, endpoint, n_lk_ahds);
+
+ if (status)
+ break;
+
+ /* advance buffer past this record for next time around */
+ buf += record->len;
+ len -= record->len;
+ }
+
+ if (status)
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
+ "", orig_buf, orig_len);
+
+ return status;
+}
+
+static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
+ struct htc_packet *packet,
+ u32 *next_lkahds, int *n_lkahds)
+{
+ int status = 0;
+ u16 payload_len;
+ u32 lk_ahd;
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
+
+ if (n_lkahds != NULL)
+ *n_lkahds = 0;
+
+ /*
+ * NOTE: we cannot assume the alignment of buf, so we use the safe
+ * macros to retrieve 16 bit fields.
+ */
+ payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
+
+ memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
+ /*
+ * Refresh the expected header and the actual length as it
+ * was unknown when this packet was grabbed as part of the
+ * bundle.
+ */
+ packet->info.rx.exp_hdr = lk_ahd;
+ packet->act_len = payload_len + HTC_HDR_LENGTH;
+
+ /* validate the actual header that was refreshed */
+ if (packet->act_len > packet->buf_len) {
+ ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
+ payload_len, lk_ahd);
+ /*
+ * Limit this to max buffer just to print out some
+ * of the buffer.
+ */
+ packet->act_len = min(packet->act_len, packet->buf_len);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->endpoint != htc_hdr->eid) {
+ ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
+ htc_hdr->eid, packet->endpoint);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+ }
+
+ if (lk_ahd != packet->info.rx.exp_hdr) {
+ ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
+ __func__, packet, packet->info.rx.rx_flags);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
+ "", &packet->info.rx.exp_hdr, 4);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
+ "", (u8 *)&lk_ahd, sizeof(lk_ahd));
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
+ if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
+ htc_hdr->ctrl[0] > payload_len) {
+ ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
+ __func__, payload_len, htc_hdr->ctrl[0]);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+ next_lkahds = NULL;
+ n_lkahds = NULL;
+ }
+
+ status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
+ + payload_len - htc_hdr->ctrl[0],
+ htc_hdr->ctrl[0], next_lkahds,
+ n_lkahds, packet->endpoint);
+
+ if (status)
+ goto fail_rx;
+
+ packet->act_len -= htc_hdr->ctrl[0];
+ }
+
+ packet->buf += HTC_HDR_LENGTH;
+ packet->act_len -= HTC_HDR_LENGTH;
+
+fail_rx:
+ if (status)
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+ "", packet->buf, packet->act_len);
+
+ return status;
+}
+
+static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx complete ep %d packet 0x%p\n",
+ endpoint->eid, packet);
+
+ endpoint->ep_cb.rx(endpoint->target, packet);
+}
+
+static int ath6kl_htc_rx_bundle(struct htc_target *target,
+ struct list_head *rxq,
+ struct list_head *sync_compq,
+ int *n_pkt_fetched, bool part_bundle)
+{
+ struct hif_scatter_req *scat_req;
+ struct htc_packet *packet;
+ int rem_space = target->max_rx_bndl_sz;
+ int n_scat_pkt, status = 0, i, len;
+
+ n_scat_pkt = get_queue_depth(rxq);
+ n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
+
+ if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
+ /*
+ * We were forced to split this bundle receive operation
+ * all packets in this partial bundle must have their
+ * lookaheads ignored.
+ */
+ part_bundle = true;
+
+ /*
+ * This would only happen if the target ignored our max
+ * bundle limit.
+ */
+ ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
+ __func__, get_queue_depth(rxq), n_scat_pkt);
+ }
+
+ len = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle depth %d pkts %d\n",
+ get_queue_depth(rxq), n_scat_pkt);
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (scat_req == NULL)
+ goto fail_rx_pkt;
+
+ for (i = 0; i < n_scat_pkt; i++) {
+ int pad_len;
+
+ packet = list_first_entry(rxq, struct htc_packet, list);
+ list_del(&packet->list);
+
+ pad_len = CALC_TXRX_PADDED_LEN(target,
+ packet->act_len);
+
+ if ((rem_space - pad_len) < 0) {
+ list_add(&packet->list, rxq);
+ break;
+ }
+
+ rem_space -= pad_len;
+
+ if (part_bundle || (i < (n_scat_pkt - 1)))
+ /*
+ * Packet 0..n-1 cannot be checked for look-aheads
+ * since we are fetching a bundle the last packet
+ * however can have it's lookahead used
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* NOTE: 1 HTC packet per scatter entry */
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = pad_len;
+
+ packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
+
+ list_add_tail(&packet->list, sync_compq);
+
+ WARN_ON(!scat_req->scat_list[i].len);
+ len += scat_req->scat_list[i].len;
+ }
+
+ scat_req->len = len;
+ scat_req->scat_entries = i;
+
+ status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
+
+ if (!status)
+ *n_pkt_fetched = i;
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+fail_rx_pkt:
+
+ return status;
+}
+
+static int ath6kl_htc_rx_process_packets(struct htc_target *target,
+ struct list_head *comp_pktq,
+ u32 lk_ahds[],
+ int *n_lk_ahd)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_endpoint *ep;
+ int status = 0;
+
+ list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
+ ep = &target->endpoint[packet->endpoint];
+
+ trace_ath6kl_htc_rx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+
+ /* process header for each of the recv packet */
+ status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
+ n_lk_ahd);
+ if (status)
+ return status;
+
+ list_del(&packet->list);
+
+ if (list_empty(comp_pktq)) {
+ /*
+ * Last packet's more packet flag is set
+ * based on the lookahead.
+ */
+ if (*n_lk_ahd > 0)
+ ath6kl_htc_rx_set_indicate(lk_ahds[0],
+ ep, packet);
+ } else
+ /*
+ * Packets in a bundle automatically have
+ * this flag set.
+ */
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+
+ ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
+ ep->ep_st.rx_bundl += 1;
+
+ ath6kl_htc_rx_complete(ep, packet);
+ }
+
+ return status;
+}
+
+static int ath6kl_htc_rx_fetch(struct htc_target *target,
+ struct list_head *rx_pktq,
+ struct list_head *comp_pktq)
+{
+ int fetched_pkts;
+ bool part_bundle = false;
+ int status = 0;
+ struct list_head tmp_rxq;
+ struct htc_packet *packet, *tmp_pkt;
+
+ /* now go fetch the list of HTC packets */
+ while (!list_empty(rx_pktq)) {
+ fetched_pkts = 0;
+
+ INIT_LIST_HEAD(&tmp_rxq);
+
+ if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
+ /*
+ * There are enough packets to attempt a
+ * bundle transfer and recv bundling is
+ * allowed.
+ */
+ status = ath6kl_htc_rx_bundle(target, rx_pktq,
+ &tmp_rxq,
+ &fetched_pkts,
+ part_bundle);
+ if (status)
+ goto fail_rx;
+
+ if (!list_empty(rx_pktq))
+ part_bundle = true;
+
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
+ }
+
+ if (!fetched_pkts) {
+ packet = list_first_entry(rx_pktq, struct htc_packet,
+ list);
+
+ /* fully synchronous */
+ packet->completion = NULL;
+
+ if (!list_is_singular(rx_pktq))
+ /*
+ * look_aheads in all packet
+ * except the last one in the
+ * bundle must be ignored
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* go fetch the packet */
+ status = ath6kl_htc_rx_packet(target, packet,
+ packet->act_len);
+
+ list_move_tail(&packet->list, &tmp_rxq);
+
+ if (status)
+ goto fail_rx;
+
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
+ }
+ }
+
+ return 0;
+
+fail_rx:
+
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+
+ list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
+ return status;
+}
+
+int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
+ u32 msg_look_ahead, int *num_pkts)
+{
+ struct htc_packet *packets, *tmp_pkt;
+ struct htc_endpoint *endpoint;
+ struct list_head rx_pktq, comp_pktq;
+ int status = 0;
+ u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int num_look_ahead = 1;
+ enum htc_endpoint_id id;
+ int n_fetched = 0;
+
+ INIT_LIST_HEAD(&comp_pktq);
+ *num_pkts = 0;
+
+ /*
+ * On first entry copy the look_aheads into our temp array for
+ * processing
+ */
+ look_aheads[0] = msg_look_ahead;
+
+ while (true) {
+ /*
+ * First lookahead sets the expected endpoint IDs for all
+ * packets in a bundle.
+ */
+ id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
+ endpoint = &target->endpoint[id];
+
+ if (id >= ENDPOINT_MAX) {
+ ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
+ id);
+ status = -ENOMEM;
+ break;
+ }
+
+ INIT_LIST_HEAD(&rx_pktq);
+ INIT_LIST_HEAD(&comp_pktq);
+
+ /*
+ * Try to allocate as many HTC RX packets indicated by the
+ * look_aheads.
+ */
+ status = ath6kl_htc_rx_alloc(target, look_aheads,
+ num_look_ahead, endpoint,
+ &rx_pktq);
+ if (status)
+ break;
+
+ if (get_queue_depth(&rx_pktq) >= 2)
+ /*
+ * A recv bundle was detected, force IRQ status
+ * re-check again
+ */
+ target->chk_irq_status_cnt = 1;
+
+ n_fetched += get_queue_depth(&rx_pktq);
+
+ num_look_ahead = 0;
+
+ status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
+
+ if (!status)
+ ath6kl_htc_rx_chk_water_mark(endpoint);
+
+ /* Process fetched packets */
+ status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
+ look_aheads,
+ &num_look_ahead);
+
+ if (!num_look_ahead || status)
+ break;
+
+ /*
+ * For SYNCH processing, if we get here, we are running
+ * through the loop again due to a detected lookahead. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ target->chk_irq_status_cnt = 1;
+ }
+
+ if (status) {
+ ath6kl_err("failed to get pending recv messages: %d\n",
+ status);
+
+ /* cleanup any packets in sync completion queue */
+ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
+ list_del(&packets->list);
+ htc_reclaim_rxbuf(target, packets,
+ &target->endpoint[packets->endpoint]);
+ }
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
+ ath6kl_hif_rx_control(target->dev, false);
+ }
+ }
+
+ /*
+ * Before leaving, check to see if host ran out of buffers and
+ * needs to stop the receiver.
+ */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
+ ath6kl_hif_rx_control(target->dev, false);
+ }
+ *num_pkts = n_fetched;
+
+ return status;
+}
+
+/*
+ * Synchronously wait for a control message from the target,
+ * This function is used at initialization time ONLY. At init messages
+ * on ENDPOINT 0 are expected.
+ */
+static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_frame_hdr *htc_hdr;
+ u32 look_ahead;
+
+ if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
+ HTC_TARGET_RESPONSE_TIMEOUT))
+ return NULL;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
+
+ htc_hdr = (struct htc_frame_hdr *)&look_ahead;
+
+ if (htc_hdr->eid != ENDPOINT_0)
+ return NULL;
+
+ packet = htc_get_control_buf(target, false);
+
+ if (!packet)
+ return NULL;
+
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.exp_hdr = look_ahead;
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
+
+ if (packet->act_len > packet->buf_len)
+ goto fail_ctrl_rx;
+
+ /* we want synchronous operation */
+ packet->completion = NULL;
+
+ /* get the message from the device, this will block */
+ if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
+ goto fail_ctrl_rx;
+
+ trace_ath6kl_htc_rx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+
+ /* process receive header */
+ packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
+
+ if (packet->status) {
+ ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
+ packet->status);
+ goto fail_ctrl_rx;
+ }
+
+ return packet;
+
+fail_ctrl_rx:
+ if (packet != NULL) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return NULL;
+}
+
+static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pkt_queue)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *first_pkt;
+ bool rx_unblock = false;
+ int status = 0, depth;
+
+ if (list_empty(pkt_queue))
+ return -ENOMEM;
+
+ first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
+
+ if (first_pkt->endpoint >= ENDPOINT_MAX)
+ return status;
+
+ depth = get_queue_depth(pkt_queue);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx add multiple ep id %d cnt %d len %d\n",
+ first_pkt->endpoint, depth, first_pkt->buf_len);
+
+ endpoint = &target->endpoint[first_pkt->endpoint];
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ struct htc_packet *packet, *tmp_pkt;
+
+ /* walk through queue and mark each one canceled */
+ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_htc_rx_complete(endpoint, packet);
+ }
+
+ return status;
+ }
+
+ spin_lock_bh(&target->rx_lock);
+
+ list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
+
+ /* check if we are blocked waiting for a new buffer */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ if (target->ep_waiting == first_pkt->endpoint) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx blocked on ep %d, unblocking\n",
+ target->ep_waiting);
+ target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ENDPOINT_MAX;
+ rx_unblock = true;
+ }
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
+ /* TODO : implement a buffer threshold count? */
+ ath6kl_hif_rx_control(target->dev, true);
+
+ return status;
+}
+
+static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet, *tmp_pkt;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (!endpoint->svc_id)
+ /* not in use.. */
+ continue;
+
+ spin_lock_bh(&target->rx_lock);
+ list_for_each_entry_safe(packet, tmp_pkt,
+ &endpoint->rx_bufq, list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&target->rx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx flush pkt 0x%p len %d ep %d\n",
+ packet, packet->buf_len,
+ packet->endpoint);
+ /*
+ * packets in rx_bufq of endpoint 0 have originally
+ * been queued from target->free_ctrl_rxbuf where
+ * packet and packet->buf_start are allocated
+ * separately using kmalloc(). For other endpoint
+ * rx_bufq, it is allocated as skb where packet is
+ * skb->head. Take care of this difference while freeing
+ * the memory.
+ */
+ if (packet->endpoint == ENDPOINT_0) {
+ kfree(packet->buf_start);
+ kfree(packet);
+ } else {
+ dev_kfree_skb(packet->pkt_cntxt);
+ }
+ spin_lock_bh(&target->rx_lock);
+ }
+ spin_unlock_bh(&target->rx_lock);
+ }
+}
+
+static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *conn_req,
+ struct htc_service_connect_resp *conn_resp)
+{
+ struct htc_packet *rx_pkt = NULL;
+ struct htc_packet *tx_pkt = NULL;
+ struct htc_conn_service_resp *resp_msg;
+ struct htc_conn_service_msg *conn_msg;
+ struct htc_endpoint *endpoint;
+ enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
+ unsigned int max_msg_sz = 0;
+ int status = 0;
+ u16 msg_id;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc connect service target 0x%p service id 0x%x\n",
+ target, conn_req->svc_id);
+
+ if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
+ /* special case for pseudo control service */
+ assigned_ep = ENDPOINT_0;
+ max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
+ } else {
+ /* allocate a packet to send to the target */
+ tx_pkt = htc_get_control_buf(target, true);
+
+ if (!tx_pkt)
+ return -ENOMEM;
+
+ conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
+ memset(conn_msg, 0, sizeof(*conn_msg));
+ conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
+ conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
+ conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
+
+ set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
+ sizeof(*conn_msg) + conn_msg->svc_meta_len,
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ /* we want synchronous operation */
+ tx_pkt->completion = NULL;
+ ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
+ status = ath6kl_htc_tx_issue(target, tx_pkt);
+
+ if (status)
+ goto fail_tx;
+
+ /* wait for response */
+ rx_pkt = htc_wait_for_ctrl_msg(target);
+
+ if (!rx_pkt) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
+ msg_id = le16_to_cpu(resp_msg->msg_id);
+
+ if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) ||
+ (rx_pkt->act_len < sizeof(*resp_msg))) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ conn_resp->resp_code = resp_msg->status;
+ /* check response status */
+ if (resp_msg->status != HTC_SERVICE_SUCCESS) {
+ ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
+ resp_msg->svc_id, resp_msg->status);
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
+ max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
+ }
+
+ if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
+ assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ endpoint = &target->endpoint[assigned_ep];
+ endpoint->eid = assigned_ep;
+ if (endpoint->svc_id) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ /* return assigned endpoint to caller */
+ conn_resp->endpoint = assigned_ep;
+ conn_resp->len_max = max_msg_sz;
+
+ /* setup the endpoint */
+
+ /* this marks the endpoint in use */
+ endpoint->svc_id = conn_req->svc_id;
+
+ endpoint->max_txq_depth = conn_req->max_txq_depth;
+ endpoint->len_max = max_msg_sz;
+ endpoint->ep_cb = conn_req->ep_cb;
+ endpoint->cred_dist.svc_id = conn_req->svc_id;
+ endpoint->cred_dist.htc_ep = endpoint;
+ endpoint->cred_dist.endpoint = assigned_ep;
+ endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
+
+ switch (endpoint->svc_id) {
+ case WMI_DATA_BK_SVC:
+ endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
+ break;
+ default:
+ endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
+ break;
+ }
+
+ if (conn_req->max_rxmsg_sz) {
+ /*
+ * Override cred_per_msg calculation, this optimizes
+ * the credit-low indications since the host will actually
+ * issue smaller messages in the Send path.
+ */
+ if (conn_req->max_rxmsg_sz > max_msg_sz) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+ endpoint->cred_dist.cred_per_msg =
+ conn_req->max_rxmsg_sz / target->tgt_cred_sz;
+ } else
+ endpoint->cred_dist.cred_per_msg =
+ max_msg_sz / target->tgt_cred_sz;
+
+ if (!endpoint->cred_dist.cred_per_msg)
+ endpoint->cred_dist.cred_per_msg = 1;
+
+ /* save local connection flags */
+ endpoint->conn_flags = conn_req->flags;
+
+fail_tx:
+ if (tx_pkt)
+ htc_reclaim_txctrl_buf(target, tx_pkt);
+
+ if (rx_pkt) {
+ htc_rxpkt_reset(rx_pkt);
+ reclaim_rx_ctrl_buf(target, rx_pkt);
+ }
+
+ return status;
+}
+
+static void reset_ep_state(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
+ endpoint->svc_id = 0;
+ endpoint->len_max = 0;
+ endpoint->max_txq_depth = 0;
+ memset(&endpoint->ep_st, 0,
+ sizeof(endpoint->ep_st));
+ INIT_LIST_HEAD(&endpoint->rx_bufq);
+ INIT_LIST_HEAD(&endpoint->txq);
+ endpoint->target = target;
+ }
+
+ /* reset distribution list */
+ /* FIXME: free existing entries */
+ INIT_LIST_HEAD(&target->cred_dist_list);
+}
+
+static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
+ enum htc_endpoint_id endpoint)
+{
+ int num;
+
+ spin_lock_bh(&target->rx_lock);
+ num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
+ spin_unlock_bh(&target->rx_lock);
+ return num;
+}
+
+static void htc_setup_msg_bndl(struct htc_target *target)
+{
+ /* limit what HTC can handle */
+ target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
+ target->msg_per_bndl_max);
+
+ if (ath6kl_hif_enable_scatter(target->dev->ar)) {
+ target->msg_per_bndl_max = 0;
+ return;
+ }
+
+ /* limit bundle what the device layer can handle */
+ target->msg_per_bndl_max = min(target->max_scat_entries,
+ target->msg_per_bndl_max);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc bundling allowed msg_per_bndl_max %d\n",
+ target->msg_per_bndl_max);
+
+ /* Max rx bundle size is limited by the max tx bundle size */
+ target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
+ /* Max tx bundle size if limited by the extended mbox address range */
+ target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
+ target->max_xfer_szper_scatreq);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
+ target->max_rx_bndl_sz, target->max_tx_bndl_sz);
+
+ if (target->max_tx_bndl_sz)
+ /* tx_bndl_mask is enabled per AC, each has 1 bit */
+ target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
+
+ if (target->max_rx_bndl_sz)
+ target->rx_bndl_enable = true;
+
+ if ((target->tgt_cred_sz % target->block_sz) != 0) {
+ ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
+ target->tgt_cred_sz);
+
+ /*
+ * Disallow send bundling since the credit size is
+ * not aligned to a block size the I/O block
+ * padding will spill into the next credit buffer
+ * which is fatal.
+ */
+ target->tx_bndl_mask = 0;
+ }
+}
+
+static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_ready_ext_msg *rdy_msg;
+ struct htc_service_connect_req connect;
+ struct htc_service_connect_resp resp;
+ int status;
+
+ /* we should be getting 1 control message that the target is ready */
+ packet = htc_wait_for_ctrl_msg(target);
+
+ if (!packet)
+ return -ENOMEM;
+
+ /* we controlled the buffer creation so it's properly aligned */
+ rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
+
+ if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
+ (packet->act_len < sizeof(struct htc_ready_msg))) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
+ target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc target ready credits %d size %d\n",
+ target->tgt_creds, target->tgt_cred_sz);
+
+ /* check if this is an extended ready message */
+ if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
+ /* this is an extended message */
+ target->htc_tgt_ver = rdy_msg->htc_ver;
+ target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
+ } else {
+ /* legacy */
+ target->htc_tgt_ver = HTC_VERSION_2P0;
+ target->msg_per_bndl_max = 0;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
+ (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
+ target->htc_tgt_ver);
+
+ if (target->msg_per_bndl_max > 0)
+ htc_setup_msg_bndl(target);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&connect, 0, sizeof(connect));
+ memset(&resp, 0, sizeof(resp));
+ connect.ep_cb.rx = htc_ctrl_rx;
+ connect.ep_cb.rx_refill = NULL;
+ connect.ep_cb.tx_full = NULL;
+ connect.max_txq_depth = NUM_CONTROL_BUFFERS;
+ connect.svc_id = HTC_CTRL_RSVD_SVC;
+
+ /* connect fake service */
+ status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
+
+ if (status)
+ /*
+ * FIXME: this call doesn't make sense, the caller should
+ * call ath6kl_htc_mbox_cleanup() when it wants remove htc
+ */
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+fail_wait_target:
+ if (packet) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return status;
+}
+
+/*
+ * Start HTC, enable interrupts and let the target know
+ * host has finished setup.
+ */
+static int ath6kl_htc_mbox_start(struct htc_target *target)
+{
+ struct htc_packet *packet;
+ int status;
+
+ memset(&target->dev->irq_proc_reg, 0,
+ sizeof(target->dev->irq_proc_reg));
+
+ /* Disable interrupts at the chip level */
+ ath6kl_hif_disable_intrs(target->dev);
+
+ target->htc_flags = 0;
+ target->rx_st_flags = 0;
+
+ /* Push control receive buffers into htc control endpoint */
+ while ((packet = htc_get_control_buf(target, false)) != NULL) {
+ status = htc_add_rxbuf(target, packet);
+ if (status)
+ return status;
+ }
+
+ /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
+ ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+ target->tgt_creds);
+
+ dump_cred_dist_stats(target);
+
+ /* Indicate to the target of the setup completion */
+ status = htc_setup_tx_complete(target);
+
+ if (status)
+ return status;
+
+ /* unmask interrupts */
+ status = ath6kl_hif_unmask_intrs(target->dev);
+
+ if (status)
+ ath6kl_htc_mbox_stop(target);
+
+ return status;
+}
+
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+ u32 block_size, ctrl_bufsz;
+ struct htc_packet *packet;
+ int i;
+
+ reset_ep_state(target);
+
+ block_size = target->dev->ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else {
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+ }
+
+ return 0;
+}
+
+/* htc_stop: stop interrupt reception, and flush all queued buffers */
+static void ath6kl_htc_mbox_stop(struct htc_target *target)
+{
+ spin_lock_bh(&target->htc_lock);
+ target->htc_flags |= HTC_OP_STATE_STOPPING;
+ spin_unlock_bh(&target->htc_lock);
+
+ /*
+ * Masking interrupts is a synchronous operation, when this
+ * function returns all pending HIF I/O has completed, we can
+ * safely flush the queues.
+ */
+ ath6kl_hif_mask_intrs(target->dev);
+
+ ath6kl_htc_flush_txep_all(target);
+
+ ath6kl_htc_mbox_flush_rx_buf(target);
+
+ ath6kl_htc_reset(target);
+}
+
+static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
+{
+ struct htc_target *target = NULL;
+ int status = 0;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target) {
+ ath6kl_err("unable to allocate memory\n");
+ return NULL;
+ }
+
+ target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
+ if (!target->dev) {
+ ath6kl_err("unable to allocate memory\n");
+ status = -ENOMEM;
+ goto err_htc_cleanup;
+ }
+
+ spin_lock_init(&target->htc_lock);
+ spin_lock_init(&target->rx_lock);
+ spin_lock_init(&target->tx_lock);
+
+ INIT_LIST_HEAD(&target->free_ctrl_txbuf);
+ INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
+ INIT_LIST_HEAD(&target->cred_dist_list);
+
+ target->dev->ar = ar;
+ target->dev->htc_cnxt = target;
+ target->ep_waiting = ENDPOINT_MAX;
+
+ status = ath6kl_hif_setup(target->dev);
+ if (status)
+ goto err_htc_cleanup;
+
+ status = ath6kl_htc_reset(target);
+ if (status)
+ goto err_htc_cleanup;
+
+ return target;
+
+err_htc_cleanup:
+ ath6kl_htc_mbox_cleanup(target);
+
+ return NULL;
+}
+
+/* cleanup the HTC instance */
+static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
+{
+ struct htc_packet *packet, *tmp_packet;
+
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_txbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_rxbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ kfree(target->dev);
+ kfree(target);
+}
+
+static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
+ .create = ath6kl_htc_mbox_create,
+ .wait_target = ath6kl_htc_mbox_wait_target,
+ .start = ath6kl_htc_mbox_start,
+ .conn_service = ath6kl_htc_mbox_conn_service,
+ .tx = ath6kl_htc_mbox_tx,
+ .stop = ath6kl_htc_mbox_stop,
+ .cleanup = ath6kl_htc_mbox_cleanup,
+ .flush_txep = ath6kl_htc_mbox_flush_txep,
+ .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
+ .activity_changed = ath6kl_htc_mbox_activity_changed,
+ .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
+ .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
+ .credit_setup = ath6kl_htc_mbox_credit_setup,
+};
+
+void ath6kl_htc_mbox_attach(struct ath6kl *ar)
+{
+ ar->htc_ops = &ath6kl_htc_mbox_ops;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
new file mode 100644
index 000000000..ca1a18c86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -0,0 +1,1737 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "debug.h"
+#include "hif-ops.h"
+
+#define HTC_PACKET_CONTAINER_ALLOCATION 32
+#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
+
+static int ath6kl_htc_pipe_tx(struct htc_target *handle,
+ struct htc_packet *packet);
+static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
+
+/* htc pipe tx path */
+static inline void restore_tx_packet(struct htc_packet *packet)
+{
+ if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
+ skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
+ packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
+ }
+}
+
+static void do_send_completion(struct htc_endpoint *ep,
+ struct list_head *queue_to_indicate)
+{
+ struct htc_packet *packet;
+
+ if (list_empty(queue_to_indicate)) {
+ /* nothing to indicate */
+ return;
+ }
+
+ if (ep->ep_cb.tx_comp_multi != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
+ __func__, ep->eid,
+ get_queue_depth(queue_to_indicate));
+ /*
+ * a multiple send complete handler is being used,
+ * pass the queue to the handler
+ */
+ ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
+ /*
+ * all packets are now owned by the callback,
+ * reset queue to be safe
+ */
+ INIT_LIST_HEAD(queue_to_indicate);
+ } else {
+ /* using legacy EpTxComplete */
+ do {
+ packet = list_first_entry(queue_to_indicate,
+ struct htc_packet, list);
+
+ list_del(&packet->list);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: calling ep %d send complete callback on packet 0x%p\n",
+ __func__, ep->eid, packet);
+ ep->ep_cb.tx_complete(ep->target, packet);
+ } while (!list_empty(queue_to_indicate));
+ }
+}
+
+static void send_packet_completion(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
+ struct list_head container;
+
+ restore_tx_packet(packet);
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+
+ /* do completion */
+ do_send_completion(ep, &container);
+}
+
+static void get_htc_packet_credit_based(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct list_head *queue)
+{
+ int credits_required;
+ int remainder;
+ u8 send_flags;
+ struct htc_packet *packet;
+ unsigned int transfer_len;
+
+ /* NOTE : the TX lock is held when this function is called */
+
+ /* loop until we can grab as many packets out of the queue as we can */
+ while (true) {
+ send_flags = 0;
+ if (list_empty(&ep->txq))
+ break;
+
+ /* get packet at head, but don't remove it */
+ packet = list_first_entry(&ep->txq, struct htc_packet, list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: got head packet:0x%p , queue depth: %d\n",
+ __func__, packet, get_queue_depth(&ep->txq));
+
+ transfer_len = packet->act_len + HTC_HDR_LENGTH;
+
+ if (transfer_len <= target->tgt_cred_sz) {
+ credits_required = 1;
+ } else {
+ /* figure out how many credits this message requires */
+ credits_required = transfer_len / target->tgt_cred_sz;
+ remainder = transfer_len % target->tgt_cred_sz;
+
+ if (remainder)
+ credits_required++;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
+ __func__, credits_required, ep->cred_dist.credits);
+
+ if (ep->eid == ENDPOINT_0) {
+ /*
+ * endpoint 0 is special, it always has a credit and
+ * does not require credit based flow control
+ */
+ credits_required = 0;
+
+ } else {
+ if (ep->cred_dist.credits < credits_required)
+ break;
+
+ ep->cred_dist.credits -= credits_required;
+ ep->ep_st.cred_cosumd += credits_required;
+
+ /* check if we need credits back from the target */
+ if (ep->cred_dist.credits <
+ ep->cred_dist.cred_per_msg) {
+ /* tell the target we need credits ASAP! */
+ send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ ep->ep_st.cred_low_indicate += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: host needs credits\n",
+ __func__);
+ }
+ }
+
+ /* now we can fully dequeue */
+ packet = list_first_entry(&ep->txq, struct htc_packet, list);
+
+ list_del(&packet->list);
+ /* save the number of credits this packet consumed */
+ packet->info.tx.cred_used = credits_required;
+ /* save send flags */
+ packet->info.tx.flags = send_flags;
+ packet->info.tx.seqno = ep->seqno;
+ ep->seqno++;
+ /* queue this packet into the caller's queue */
+ list_add_tail(&packet->list, queue);
+ }
+}
+
+static void get_htc_packet(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct list_head *queue, int resources)
+{
+ struct htc_packet *packet;
+
+ /* NOTE : the TX lock is held when this function is called */
+
+ /* loop until we can grab as many packets out of the queue as we can */
+ while (resources) {
+ if (list_empty(&ep->txq))
+ break;
+
+ packet = list_first_entry(&ep->txq, struct htc_packet, list);
+ list_del(&packet->list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: got packet:0x%p , new queue depth: %d\n",
+ __func__, packet, get_queue_depth(&ep->txq));
+ packet->info.tx.seqno = ep->seqno;
+ packet->info.tx.flags = 0;
+ packet->info.tx.cred_used = 0;
+ ep->seqno++;
+
+ /* queue this packet into the caller's queue */
+ list_add_tail(&packet->list, queue);
+ resources--;
+ }
+}
+
+static int htc_issue_packets(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct list_head *pkt_queue)
+{
+ int status = 0;
+ u16 payload_len;
+ struct sk_buff *skb;
+ struct htc_frame_hdr *htc_hdr;
+ struct htc_packet *packet;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: queue: 0x%p, pkts %d\n", __func__,
+ pkt_queue, get_queue_depth(pkt_queue));
+
+ while (!list_empty(pkt_queue)) {
+ packet = list_first_entry(pkt_queue, struct htc_packet, list);
+ list_del(&packet->list);
+
+ skb = packet->skb;
+ if (!skb) {
+ WARN_ON_ONCE(1);
+ status = -EINVAL;
+ break;
+ }
+
+ payload_len = packet->act_len;
+
+ /* setup HTC frame header */
+ htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
+ sizeof(*htc_hdr));
+ if (!htc_hdr) {
+ WARN_ON_ONCE(1);
+ status = -EINVAL;
+ break;
+ }
+
+ packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
+
+ /* Endianess? */
+ put_unaligned((u16) payload_len, &htc_hdr->payld_len);
+ htc_hdr->flags = packet->info.tx.flags;
+ htc_hdr->eid = (u8) packet->endpoint;
+ htc_hdr->ctrl[0] = 0;
+ htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
+
+ spin_lock_bh(&target->tx_lock);
+
+ /* store in look up queue to match completions */
+ list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
+ ep->ep_st.tx_issued += 1;
+ spin_unlock_bh(&target->tx_lock);
+
+ status = ath6kl_hif_pipe_send(target->dev->ar,
+ ep->pipe.pipeid_ul, NULL, skb);
+
+ if (status != 0) {
+ if (status != -ENOMEM) {
+ /* TODO: if more than 1 endpoint maps to the
+ * same PipeID, it is possible to run out of
+ * resources in the HIF layer.
+ * Don't emit the error
+ */
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: failed status:%d\n",
+ __func__, status);
+ }
+ spin_lock_bh(&target->tx_lock);
+ list_del(&packet->list);
+
+ /* reclaim credits */
+ ep->cred_dist.credits += packet->info.tx.cred_used;
+ spin_unlock_bh(&target->tx_lock);
+
+ /* put it back into the callers queue */
+ list_add(&packet->list, pkt_queue);
+ break;
+ }
+ }
+
+ if (status != 0) {
+ while (!list_empty(pkt_queue)) {
+ if (status != -ENOMEM) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: failed pkt:0x%p status:%d\n",
+ __func__, packet, status);
+ }
+
+ packet = list_first_entry(pkt_queue,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ packet->status = status;
+ send_packet_completion(target, packet);
+ }
+ }
+
+ return status;
+}
+
+static enum htc_send_queue_result htc_try_send(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct list_head *txq)
+{
+ struct list_head send_queue; /* temp queue to hold packets */
+ struct htc_packet *packet, *tmp_pkt;
+ struct ath6kl *ar = target->dev->ar;
+ enum htc_send_full_action action;
+ int tx_resources, overflow, txqueue_depth, i, good_pkts;
+ u8 pipeid;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
+ __func__, txq,
+ (txq == NULL) ? 0 : get_queue_depth(txq));
+
+ /* init the local send queue */
+ INIT_LIST_HEAD(&send_queue);
+
+ /*
+ * txq equals to NULL means
+ * caller didn't provide a queue, just wants us to
+ * check queues and send
+ */
+ if (txq != NULL) {
+ if (list_empty(txq)) {
+ /* empty queue */
+ return HTC_SEND_QUEUE_DROP;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ txqueue_depth = get_queue_depth(&ep->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ if (txqueue_depth >= ep->max_txq_depth) {
+ /* we've already overflowed */
+ overflow = get_queue_depth(txq);
+ } else {
+ /* get how much we will overflow by */
+ overflow = txqueue_depth;
+ overflow += get_queue_depth(txq);
+ /* get how much we will overflow the TX queue by */
+ overflow -= ep->max_txq_depth;
+ }
+
+ /* if overflow is negative or zero, we are okay */
+ if (overflow > 0) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
+ __func__, ep->eid, overflow, txqueue_depth,
+ ep->max_txq_depth);
+ }
+ if ((overflow <= 0) ||
+ (ep->ep_cb.tx_full == NULL)) {
+ /*
+ * all packets will fit or caller did not provide send
+ * full indication handler -- just move all of them
+ * to the local send_queue object
+ */
+ list_splice_tail_init(txq, &send_queue);
+ } else {
+ good_pkts = get_queue_depth(txq) - overflow;
+ if (good_pkts < 0) {
+ WARN_ON_ONCE(1);
+ return HTC_SEND_QUEUE_DROP;
+ }
+
+ /* we have overflowed, and a callback is provided */
+ /* dequeue all non-overflow packets to the sendqueue */
+ for (i = 0; i < good_pkts; i++) {
+ /* pop off caller's queue */
+ packet = list_first_entry(txq,
+ struct htc_packet,
+ list);
+ /* move to local queue */
+ list_move_tail(&packet->list, &send_queue);
+ }
+
+ /*
+ * the caller's queue has all the packets that won't fit
+ * walk through the caller's queue and indicate each to
+ * the send full handler
+ */
+ list_for_each_entry_safe(packet, tmp_pkt,
+ txq, list) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: Indicat overflowed TX pkts: %p\n",
+ __func__, packet);
+ action = ep->ep_cb.tx_full(ep->target, packet);
+ if (action == HTC_SEND_FULL_DROP) {
+ /* callback wants the packet dropped */
+ ep->ep_st.tx_dropped += 1;
+
+ /* leave this one in the caller's queue
+ * for cleanup */
+ } else {
+ /* callback wants to keep this packet,
+ * move from caller's queue to the send
+ * queue */
+ list_move_tail(&packet->list,
+ &send_queue);
+ }
+ }
+
+ if (list_empty(&send_queue)) {
+ /* no packets made it in, caller will cleanup */
+ return HTC_SEND_QUEUE_DROP;
+ }
+ }
+ }
+
+ if (!ep->pipe.tx_credit_flow_enabled) {
+ tx_resources =
+ ath6kl_hif_pipe_get_free_queue_number(ar,
+ ep->pipe.pipeid_ul);
+ } else {
+ tx_resources = 0;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ if (!list_empty(&send_queue)) {
+ /* transfer packets to tail */
+ list_splice_tail_init(&send_queue, &ep->txq);
+ if (!list_empty(&send_queue)) {
+ WARN_ON_ONCE(1);
+ spin_unlock_bh(&target->tx_lock);
+ return HTC_SEND_QUEUE_DROP;
+ }
+ INIT_LIST_HEAD(&send_queue);
+ }
+
+ /* increment tx processing count on entry */
+ ep->tx_proc_cnt++;
+
+ if (ep->tx_proc_cnt > 1) {
+ /*
+ * Another thread or task is draining the TX queues on this
+ * endpoint that thread will reset the tx processing count
+ * when the queue is drained.
+ */
+ ep->tx_proc_cnt--;
+ spin_unlock_bh(&target->tx_lock);
+ return HTC_SEND_QUEUE_OK;
+ }
+
+ /***** beyond this point only 1 thread may enter ******/
+
+ /*
+ * Now drain the endpoint TX queue for transmission as long as we have
+ * enough transmit resources.
+ */
+ while (true) {
+ if (get_queue_depth(&ep->txq) == 0)
+ break;
+
+ if (ep->pipe.tx_credit_flow_enabled) {
+ /*
+ * Credit based mechanism provides flow control
+ * based on target transmit resource availability,
+ * we assume that the HIF layer will always have
+ * bus resources greater than target transmit
+ * resources.
+ */
+ get_htc_packet_credit_based(target, ep, &send_queue);
+ } else {
+ /*
+ * Get all packets for this endpoint that we can
+ * for this pass.
+ */
+ get_htc_packet(target, ep, &send_queue, tx_resources);
+ }
+
+ if (get_queue_depth(&send_queue) == 0) {
+ /*
+ * Didn't get packets due to out of resources or TX
+ * queue was drained.
+ */
+ break;
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ /* send what we can */
+ htc_issue_packets(target, ep, &send_queue);
+
+ if (!ep->pipe.tx_credit_flow_enabled) {
+ pipeid = ep->pipe.pipeid_ul;
+ tx_resources =
+ ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ }
+
+ /* done with this endpoint, we can clear the count */
+ ep->tx_proc_cnt = 0;
+ spin_unlock_bh(&target->tx_lock);
+
+ return HTC_SEND_QUEUE_OK;
+}
+
+/* htc control packet manipulation */
+static void destroy_htc_txctrl_packet(struct htc_packet *packet)
+{
+ struct sk_buff *skb;
+ skb = packet->skb;
+ dev_kfree_skb(skb);
+ kfree(packet);
+}
+
+static struct htc_packet *build_htc_txctrl_packet(void)
+{
+ struct htc_packet *packet = NULL;
+ struct sk_buff *skb;
+
+ packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
+ if (packet == NULL)
+ return NULL;
+
+ skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
+
+ if (skb == NULL) {
+ kfree(packet);
+ return NULL;
+ }
+ packet->skb = skb;
+
+ return packet;
+}
+
+static void htc_free_txctrl_packet(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ destroy_htc_txctrl_packet(packet);
+}
+
+static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
+{
+ return build_htc_txctrl_packet();
+}
+
+static void htc_txctrl_complete(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ htc_free_txctrl_packet(target, packet);
+}
+
+#define MAX_MESSAGE_SIZE 1536
+
+static int htc_setup_target_buffer_assignments(struct htc_target *target)
+{
+ int status, credits, credit_per_maxmsg, i;
+ struct htc_pipe_txcredit_alloc *entry;
+ unsigned int hif_usbaudioclass = 0;
+
+ credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
+ if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
+ credit_per_maxmsg++;
+
+ /* TODO, this should be configured by the caller! */
+
+ credits = target->tgt_creds;
+ entry = &target->pipe.txcredit_alloc[0];
+
+ status = -ENOMEM;
+
+ /* FIXME: hif_usbaudioclass is always zero */
+ if (hif_usbaudioclass) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: For USB Audio Class- Total:%d\n",
+ __func__, credits);
+ entry++;
+ entry++;
+ /* Setup VO Service To have Max Credits */
+ entry->service_id = WMI_DATA_VO_SVC;
+ entry->credit_alloc = (credits - 6);
+ if (entry->credit_alloc == 0)
+ entry->credit_alloc++;
+
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ entry++;
+ entry->service_id = WMI_CONTROL_SVC;
+ entry->credit_alloc = credit_per_maxmsg;
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ /* leftovers go to best effort */
+ entry++;
+ entry++;
+ entry->service_id = WMI_DATA_BE_SVC;
+ entry->credit_alloc = (u8) credits;
+ status = 0;
+ } else {
+ entry++;
+ entry->service_id = WMI_DATA_VI_SVC;
+ entry->credit_alloc = credits / 4;
+ if (entry->credit_alloc == 0)
+ entry->credit_alloc++;
+
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ entry++;
+ entry->service_id = WMI_DATA_VO_SVC;
+ entry->credit_alloc = credits / 4;
+ if (entry->credit_alloc == 0)
+ entry->credit_alloc++;
+
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ entry++;
+ entry->service_id = WMI_CONTROL_SVC;
+ entry->credit_alloc = credit_per_maxmsg;
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ entry++;
+ entry->service_id = WMI_DATA_BK_SVC;
+ entry->credit_alloc = credit_per_maxmsg;
+ credits -= (int) entry->credit_alloc;
+ if (credits <= 0)
+ return status;
+
+ /* leftovers go to best effort */
+ entry++;
+ entry->service_id = WMI_DATA_BE_SVC;
+ entry->credit_alloc = (u8) credits;
+ status = 0;
+ }
+
+ if (status == 0) {
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ if (target->pipe.txcredit_alloc[i].service_id != 0) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
+ i,
+ target->pipe.txcredit_alloc[i].
+ service_id,
+ target->pipe.txcredit_alloc[i].
+ credit_alloc);
+ }
+ }
+ }
+ return status;
+}
+
+/* process credit reports and call distribution function */
+static void htc_process_credit_report(struct htc_target *target,
+ struct htc_credit_report *rpt,
+ int num_entries,
+ enum htc_endpoint_id from_ep)
+{
+ int total_credits = 0, i;
+ struct htc_endpoint *ep;
+
+ /* lock out TX while we update credits */
+ spin_lock_bh(&target->tx_lock);
+
+ for (i = 0; i < num_entries; i++, rpt++) {
+ if (rpt->eid >= ENDPOINT_MAX) {
+ WARN_ON_ONCE(1);
+ spin_unlock_bh(&target->tx_lock);
+ return;
+ }
+
+ ep = &target->endpoint[rpt->eid];
+ ep->cred_dist.credits += rpt->credits;
+
+ if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
+ spin_unlock_bh(&target->tx_lock);
+ htc_try_send(target, ep, NULL);
+ spin_lock_bh(&target->tx_lock);
+ }
+
+ total_credits += rpt->credits;
+ }
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "Report indicated %d credits to distribute\n",
+ total_credits);
+
+ spin_unlock_bh(&target->tx_lock);
+}
+
+/* flush endpoint TX queue */
+static void htc_flush_tx_endpoint(struct htc_target *target,
+ struct htc_endpoint *ep, u16 tag)
+{
+ struct htc_packet *packet;
+
+ spin_lock_bh(&target->tx_lock);
+ while (get_queue_depth(&ep->txq)) {
+ packet = list_first_entry(&ep->txq, struct htc_packet, list);
+ list_del(&packet->list);
+ packet->status = 0;
+ send_packet_completion(target, packet);
+ }
+ spin_unlock_bh(&target->tx_lock);
+}
+
+/*
+ * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
+ * since upper layers expects struct htc_packet containers we use the completed
+ * skb and lookup it's corresponding HTC packet buffer from a lookup list.
+ * This is extra overhead that can be fixed by re-aligning HIF interfaces with
+ * HTC.
+ */
+static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct sk_buff *skb)
+{
+ struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
+
+ spin_lock_bh(&target->tx_lock);
+
+ /*
+ * interate from the front of tx lookup queue
+ * this lookup should be fast since lower layers completes in-order and
+ * so the completed packet should be at the head of the list generally
+ */
+ list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
+ list) {
+ /* check for removal */
+ if (skb == packet->skb) {
+ /* found it */
+ list_del(&packet->list);
+ found_packet = packet;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ return found_packet;
+}
+
+static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
+{
+ struct htc_target *target = ar->htc_target;
+ struct htc_frame_hdr *htc_hdr;
+ struct htc_endpoint *ep;
+ struct htc_packet *packet;
+ u8 ep_id, *netdata;
+ u32 netlen;
+
+ netdata = skb->data;
+ netlen = skb->len;
+
+ htc_hdr = (struct htc_frame_hdr *) netdata;
+
+ ep_id = htc_hdr->eid;
+ ep = &target->endpoint[ep_id];
+
+ packet = htc_lookup_tx_packet(target, ep, skb);
+ if (packet == NULL) {
+ /* may have already been flushed and freed */
+ ath6kl_err("HTC TX lookup failed!\n");
+ } else {
+ /* will be giving this buffer back to upper layers */
+ packet->status = 0;
+ send_packet_completion(target, packet);
+ }
+ skb = NULL;
+
+ if (!ep->pipe.tx_credit_flow_enabled) {
+ /*
+ * note: when using TX credit flow, the re-checking of queues
+ * happens when credits flow back from the target. in the
+ * non-TX credit case, we recheck after the packet completes
+ */
+ htc_try_send(target, ep, NULL);
+ }
+
+ return 0;
+}
+
+static int htc_send_packets_multiple(struct htc_target *target,
+ struct list_head *pkt_queue)
+{
+ struct htc_endpoint *ep;
+ struct htc_packet *packet, *tmp_pkt;
+
+ if (list_empty(pkt_queue))
+ return -EINVAL;
+
+ /* get first packet to find out which ep the packets will go into */
+ packet = list_first_entry(pkt_queue, struct htc_packet, list);
+
+ if (packet->endpoint >= ENDPOINT_MAX) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ ep = &target->endpoint[packet->endpoint];
+
+ htc_try_send(target, ep, pkt_queue);
+
+ /* do completion on any packets that couldn't get in */
+ if (!list_empty(pkt_queue)) {
+ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+ packet->status = -ENOMEM;
+ }
+
+ do_send_completion(ep, pkt_queue);
+ }
+
+ return 0;
+}
+
+/* htc pipe rx path */
+static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
+{
+ struct htc_packet *packet;
+ spin_lock_bh(&target->rx_lock);
+
+ if (target->pipe.htc_packet_pool == NULL) {
+ spin_unlock_bh(&target->rx_lock);
+ return NULL;
+ }
+
+ packet = target->pipe.htc_packet_pool;
+ target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
+
+ spin_unlock_bh(&target->rx_lock);
+
+ packet->list.next = NULL;
+ return packet;
+}
+
+static void free_htc_packet_container(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct list_head *lh;
+
+ spin_lock_bh(&target->rx_lock);
+
+ if (target->pipe.htc_packet_pool == NULL) {
+ target->pipe.htc_packet_pool = packet;
+ packet->list.next = NULL;
+ } else {
+ lh = (struct list_head *) target->pipe.htc_packet_pool;
+ packet->list.next = lh;
+ target->pipe.htc_packet_pool = packet;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+}
+
+static int htc_process_trailer(struct htc_target *target, u8 *buffer,
+ int len, enum htc_endpoint_id from_ep)
+{
+ struct htc_credit_report *report;
+ struct htc_record_hdr *record;
+ u8 *record_buf, *orig_buf;
+ int orig_len, status;
+
+ orig_buf = buffer;
+ orig_len = len;
+ status = 0;
+
+ while (len > 0) {
+ if (len < sizeof(struct htc_record_hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* these are byte aligned structs */
+ record = (struct htc_record_hdr *) buffer;
+ len -= sizeof(struct htc_record_hdr);
+ buffer += sizeof(struct htc_record_hdr);
+
+ if (record->len > len) {
+ /* no room left in buffer for record */
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "invalid length: %d (id:%d) buffer has: %d bytes left\n",
+ record->len, record->rec_id, len);
+ status = -EINVAL;
+ break;
+ }
+
+ /* start of record follows the header */
+ record_buf = buffer;
+
+ switch (record->rec_id) {
+ case HTC_RECORD_CREDITS:
+ if (record->len < sizeof(struct htc_credit_report)) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ report = (struct htc_credit_report *) record_buf;
+ htc_process_credit_report(target, report,
+ record->len / sizeof(*report),
+ from_ep);
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "unhandled record: id:%d length:%d\n",
+ record->rec_id, record->len);
+ break;
+ }
+
+ if (status != 0)
+ break;
+
+ /* advance buffer past this record for next time around */
+ buffer += record->len;
+ len -= record->len;
+ }
+
+ return status;
+}
+
+static void do_recv_completion(struct htc_endpoint *ep,
+ struct list_head *queue_to_indicate)
+{
+ struct htc_packet *packet;
+
+ if (list_empty(queue_to_indicate)) {
+ /* nothing to indicate */
+ return;
+ }
+
+ /* using legacy EpRecv */
+ while (!list_empty(queue_to_indicate)) {
+ packet = list_first_entry(queue_to_indicate,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ ep->ep_cb.rx(ep->target, packet);
+ }
+
+ return;
+}
+
+static void recv_packet_completion(struct htc_target *target,
+ struct htc_endpoint *ep,
+ struct htc_packet *packet)
+{
+ struct list_head container;
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+
+ /* do completion */
+ do_recv_completion(ep, &container);
+}
+
+static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
+ u8 pipeid)
+{
+ struct htc_target *target = ar->htc_target;
+ u8 *netdata, *trailer, hdr_info;
+ struct htc_frame_hdr *htc_hdr;
+ u32 netlen, trailerlen = 0;
+ struct htc_packet *packet;
+ struct htc_endpoint *ep;
+ u16 payload_len;
+ int status = 0;
+
+ /*
+ * ar->htc_target can be NULL due to a race condition that can occur
+ * during driver initialization(we do 'ath6kl_hif_power_on' before
+ * initializing 'ar->htc_target' via 'ath6kl_htc_create').
+ * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
+ * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
+ * Thus the possibility of ar->htc_target being NULL
+ * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ */
+ if (WARN_ON_ONCE(!target)) {
+ ath6kl_err("Target not yet initialized\n");
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+
+ netdata = skb->data;
+ netlen = skb->len;
+
+ htc_hdr = (struct htc_frame_hdr *) netdata;
+
+ if (htc_hdr->eid >= ENDPOINT_MAX) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "HTC Rx: invalid EndpointID=%d\n",
+ htc_hdr->eid);
+ status = -EINVAL;
+ goto free_skb;
+ }
+ ep = &target->endpoint[htc_hdr->eid];
+
+ payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
+
+ if (netlen < (payload_len + HTC_HDR_LENGTH)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "HTC Rx: insufficient length, got:%d expected =%u\n",
+ netlen, payload_len + HTC_HDR_LENGTH);
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+ /* get flags to check for trailer */
+ hdr_info = htc_hdr->flags;
+ if (hdr_info & HTC_FLG_RX_TRAILER) {
+ /* extract the trailer length */
+ hdr_info = htc_hdr->ctrl[0];
+ if ((hdr_info < sizeof(struct htc_record_hdr)) ||
+ (hdr_info > payload_len)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "invalid header: payloadlen should be %d, CB[0]: %d\n",
+ payload_len, hdr_info);
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+ trailerlen = hdr_info;
+ /* process trailer after hdr/apps payload */
+ trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
+ payload_len - hdr_info;
+ status = htc_process_trailer(target, trailer, hdr_info,
+ htc_hdr->eid);
+ if (status != 0)
+ goto free_skb;
+ }
+
+ if (((int) payload_len - (int) trailerlen) <= 0) {
+ /* zero length packet with trailer, just drop these */
+ goto free_skb;
+ }
+
+ if (htc_hdr->eid == ENDPOINT_0) {
+ /* handle HTC control message */
+ if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
+ /*
+ * fatal: target should not send unsolicited
+ * messageson the endpoint 0
+ */
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "HTC ignores Rx Ctrl after setup complete\n");
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+ /* remove HTC header */
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ netdata = skb->data;
+ netlen = skb->len;
+
+ spin_lock_bh(&target->rx_lock);
+
+ target->pipe.ctrl_response_valid = true;
+ target->pipe.ctrl_response_len = min_t(int, netlen,
+ HTC_MAX_CTRL_MSG_LEN);
+ memcpy(target->pipe.ctrl_response_buf, netdata,
+ target->pipe.ctrl_response_len);
+
+ spin_unlock_bh(&target->rx_lock);
+
+ dev_kfree_skb(skb);
+ skb = NULL;
+
+ goto free_skb;
+ }
+
+ /*
+ * TODO: the message based HIF architecture allocates net bufs
+ * for recv packets since it bridges that HIF to upper layers,
+ * which expects HTC packets, we form the packets here
+ */
+ packet = alloc_htc_packet_container(target);
+ if (packet == NULL) {
+ status = -ENOMEM;
+ goto free_skb;
+ }
+
+ packet->status = 0;
+ packet->endpoint = htc_hdr->eid;
+ packet->pkt_cntxt = skb;
+
+ /* TODO: for backwards compatibility */
+ packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
+ packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
+
+ /*
+ * TODO: this is a hack because the driver layer will set the
+ * actual len of the skb again which will just double the len
+ */
+ skb_trim(skb, 0);
+
+ recv_packet_completion(target, ep, packet);
+
+ /* recover the packet container */
+ free_htc_packet_container(target, packet);
+ skb = NULL;
+
+free_skb:
+ dev_kfree_skb(skb);
+
+ return status;
+}
+
+static void htc_flush_rx_queue(struct htc_target *target,
+ struct htc_endpoint *ep)
+{
+ struct list_head container;
+ struct htc_packet *packet;
+
+ spin_lock_bh(&target->rx_lock);
+
+ while (1) {
+ if (list_empty(&ep->rx_bufq))
+ break;
+
+ packet = list_first_entry(&ep->rx_bufq,
+ struct htc_packet, list);
+ list_del(&packet->list);
+
+ spin_unlock_bh(&target->rx_lock);
+ packet->status = -ECANCELED;
+ packet->act_len = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "Flushing RX packet:0x%p, length:%d, ep:%d\n",
+ packet, packet->buf_len,
+ packet->endpoint);
+
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+
+ /* give the packet back */
+ do_recv_completion(ep, &container);
+ spin_lock_bh(&target->rx_lock);
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+}
+
+/* polling routine to wait for a control packet to be received */
+static int htc_wait_recv_ctrl_message(struct htc_target *target)
+{
+ int count = HTC_TARGET_RESPONSE_POLL_COUNT;
+
+ while (count > 0) {
+ spin_lock_bh(&target->rx_lock);
+
+ if (target->pipe.ctrl_response_valid) {
+ target->pipe.ctrl_response_valid = false;
+ spin_unlock_bh(&target->rx_lock);
+ break;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ count--;
+
+ msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
+ }
+
+ if (count <= 0) {
+ ath6kl_warn("htc pipe control receive timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void htc_rxctrl_complete(struct htc_target *context,
+ struct htc_packet *packet)
+{
+ struct sk_buff *skb = packet->skb;
+
+ if (packet->endpoint == ENDPOINT_0 &&
+ packet->status == -ECANCELED &&
+ skb != NULL)
+ dev_kfree_skb(skb);
+}
+
+/* htc pipe initialization */
+static void reset_endpoint_states(struct htc_target *target)
+{
+ struct htc_endpoint *ep;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ ep = &target->endpoint[i];
+ ep->svc_id = 0;
+ ep->len_max = 0;
+ ep->max_txq_depth = 0;
+ ep->eid = i;
+ INIT_LIST_HEAD(&ep->txq);
+ INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
+ INIT_LIST_HEAD(&ep->rx_bufq);
+ ep->target = target;
+ ep->pipe.tx_credit_flow_enabled = true;
+ }
+}
+
+/* start HTC, this is called after all services are connected */
+static int htc_config_target_hif_pipe(struct htc_target *target)
+{
+ return 0;
+}
+
+/* htc service functions */
+static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
+{
+ u8 allocation = 0;
+ int i;
+
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ if (target->pipe.txcredit_alloc[i].service_id == service_id)
+ allocation =
+ target->pipe.txcredit_alloc[i].credit_alloc;
+ }
+
+ if (allocation == 0) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "HTC Service TX : 0x%2.2X : allocation is zero!\n",
+ service_id);
+ }
+
+ return allocation;
+}
+
+static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *conn_req,
+ struct htc_service_connect_resp *conn_resp)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct htc_packet *packet = NULL;
+ struct htc_conn_service_resp *resp_msg;
+ struct htc_conn_service_msg *conn_msg;
+ enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
+ bool disable_credit_flowctrl = false;
+ unsigned int max_msg_size = 0;
+ struct htc_endpoint *ep;
+ int length, status = 0;
+ struct sk_buff *skb;
+ u8 tx_alloc;
+ u16 flags;
+
+ if (conn_req->svc_id == 0) {
+ WARN_ON_ONCE(1);
+ status = -EINVAL;
+ goto free_packet;
+ }
+
+ if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
+ /* special case for pseudo control service */
+ assigned_epid = ENDPOINT_0;
+ max_msg_size = HTC_MAX_CTRL_MSG_LEN;
+ tx_alloc = 0;
+
+ } else {
+ tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
+ if (tx_alloc == 0) {
+ status = -ENOMEM;
+ goto free_packet;
+ }
+
+ /* allocate a packet to send to the target */
+ packet = htc_alloc_txctrl_packet(target);
+
+ if (packet == NULL) {
+ WARN_ON_ONCE(1);
+ status = -ENOMEM;
+ goto free_packet;
+ }
+
+ skb = packet->skb;
+ length = sizeof(struct htc_conn_service_msg);
+
+ /* assemble connect service message */
+ conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
+ length);
+ if (conn_msg == NULL) {
+ WARN_ON_ONCE(1);
+ status = -EINVAL;
+ goto free_packet;
+ }
+
+ memset(conn_msg, 0,
+ sizeof(struct htc_conn_service_msg));
+ conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
+ conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
+ conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
+ ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
+
+ /* tell target desired recv alloc for this ep */
+ flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
+ conn_msg->conn_flags |= cpu_to_le16(flags);
+
+ if (conn_req->conn_flags &
+ HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
+ disable_credit_flowctrl = true;
+ }
+
+ set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
+ length,
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ status = ath6kl_htc_pipe_tx(target, packet);
+
+ /* we don't own it anymore */
+ packet = NULL;
+ if (status != 0)
+ goto free_packet;
+
+ /* wait for response */
+ status = htc_wait_recv_ctrl_message(target);
+ if (status != 0)
+ goto free_packet;
+
+ /* we controlled the buffer creation so it has to be
+ * properly aligned
+ */
+ resp_msg = (struct htc_conn_service_resp *)
+ target->pipe.ctrl_response_buf;
+
+ if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
+ (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
+ /* this message is not valid */
+ WARN_ON_ONCE(1);
+ status = -EINVAL;
+ goto free_packet;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: service 0x%X conn resp: status: %d ep: %d\n",
+ __func__, resp_msg->svc_id, resp_msg->status,
+ resp_msg->eid);
+
+ conn_resp->resp_code = resp_msg->status;
+ /* check response status */
+ if (resp_msg->status != HTC_SERVICE_SUCCESS) {
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "Target failed service 0x%X connect request (status:%d)\n",
+ resp_msg->svc_id, resp_msg->status);
+ status = -EINVAL;
+ goto free_packet;
+ }
+
+ assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
+ max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
+ }
+
+ /* the rest are parameter checks so set the error status */
+ status = -EINVAL;
+
+ if (assigned_epid >= ENDPOINT_MAX) {
+ WARN_ON_ONCE(1);
+ goto free_packet;
+ }
+
+ if (max_msg_size == 0) {
+ WARN_ON_ONCE(1);
+ goto free_packet;
+ }
+
+ ep = &target->endpoint[assigned_epid];
+ ep->eid = assigned_epid;
+ if (ep->svc_id != 0) {
+ /* endpoint already in use! */
+ WARN_ON_ONCE(1);
+ goto free_packet;
+ }
+
+ /* return assigned endpoint to caller */
+ conn_resp->endpoint = assigned_epid;
+ conn_resp->len_max = max_msg_size;
+
+ /* setup the endpoint */
+ ep->svc_id = conn_req->svc_id; /* this marks ep in use */
+ ep->max_txq_depth = conn_req->max_txq_depth;
+ ep->len_max = max_msg_size;
+ ep->cred_dist.credits = tx_alloc;
+ ep->cred_dist.cred_sz = target->tgt_cred_sz;
+ ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
+ if (max_msg_size % target->tgt_cred_sz)
+ ep->cred_dist.cred_per_msg++;
+
+ /* copy all the callbacks */
+ ep->ep_cb = conn_req->ep_cb;
+
+ /* initialize tx_drop_packet_threshold */
+ ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
+
+ status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
+ &ep->pipe.pipeid_ul,
+ &ep->pipe.pipeid_dl);
+ if (status != 0)
+ goto free_packet;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
+ ep->svc_id, ep->pipe.pipeid_ul,
+ ep->pipe.pipeid_dl, ep->eid);
+
+ if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
+ ep->pipe.tx_credit_flow_enabled = false;
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "SVC: 0x%4.4X ep:%d TX flow control off\n",
+ ep->svc_id, assigned_epid);
+ }
+
+free_packet:
+ if (packet != NULL)
+ htc_free_txctrl_packet(target, packet);
+ return status;
+}
+
+/* htc export functions */
+static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
+{
+ int status = 0;
+ struct htc_endpoint *ep = NULL;
+ struct htc_target *target = NULL;
+ struct htc_packet *packet;
+ int i;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (target == NULL) {
+ ath6kl_err("htc create unable to allocate memory\n");
+ status = -ENOMEM;
+ goto fail_htc_create;
+ }
+
+ spin_lock_init(&target->htc_lock);
+ spin_lock_init(&target->rx_lock);
+ spin_lock_init(&target->tx_lock);
+
+ reset_endpoint_states(target);
+
+ for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
+ packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
+
+ if (packet != NULL)
+ free_htc_packet_container(target, packet);
+ }
+
+ target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
+ if (!target->dev) {
+ ath6kl_err("unable to allocate memory\n");
+ status = -ENOMEM;
+ goto fail_htc_create;
+ }
+ target->dev->ar = ar;
+ target->dev->htc_cnxt = target;
+
+ /* Get HIF default pipe for HTC message exchange */
+ ep = &target->endpoint[ENDPOINT_0];
+
+ ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
+ &ep->pipe.pipeid_dl);
+
+ return target;
+
+fail_htc_create:
+ if (status != 0) {
+ if (target != NULL)
+ ath6kl_htc_pipe_cleanup(target);
+
+ target = NULL;
+ }
+ return target;
+}
+
+/* cleanup the HTC instance */
+static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
+{
+ struct htc_packet *packet;
+
+ while (true) {
+ packet = alloc_htc_packet_container(target);
+ if (packet == NULL)
+ break;
+ kfree(packet);
+ }
+
+ kfree(target->dev);
+
+ /* kfree our instance */
+ kfree(target);
+}
+
+static int ath6kl_htc_pipe_start(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_setup_comp_ext_msg *setup;
+ struct htc_packet *packet;
+
+ htc_config_target_hif_pipe(target);
+
+ /* allocate a buffer to send */
+ packet = htc_alloc_txctrl_packet(target);
+ if (packet == NULL) {
+ WARN_ON_ONCE(1);
+ return -ENOMEM;
+ }
+
+ skb = packet->skb;
+
+ /* assemble setup complete message */
+ setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
+ sizeof(*setup));
+ memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
+ setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
+
+ set_htc_pkt_info(packet, NULL, (u8 *) setup,
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
+
+ return ath6kl_htc_pipe_tx(target, packet);
+}
+
+static void ath6kl_htc_pipe_stop(struct htc_target *target)
+{
+ int i;
+ struct htc_endpoint *ep;
+
+ /* cleanup endpoints */
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep = &target->endpoint[i];
+ htc_flush_rx_queue(target, ep);
+ htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
+ }
+
+ reset_endpoint_states(target);
+ target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
+}
+
+static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
+ enum htc_endpoint_id endpoint)
+{
+ int num;
+
+ spin_lock_bh(&target->rx_lock);
+ num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
+ spin_unlock_bh(&target->rx_lock);
+
+ return num;
+}
+
+static int ath6kl_htc_pipe_tx(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct list_head queue;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
+ __func__, packet->endpoint, packet->buf,
+ packet->act_len);
+
+ INIT_LIST_HEAD(&queue);
+ list_add_tail(&packet->list, &queue);
+
+ return htc_send_packets_multiple(target, &queue);
+}
+
+static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
+{
+ struct htc_ready_ext_msg *ready_msg;
+ struct htc_service_connect_req connect;
+ struct htc_service_connect_resp resp;
+ int status = 0;
+
+ status = htc_wait_recv_ctrl_message(target);
+
+ if (status != 0)
+ return status;
+
+ if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
+ ath6kl_warn("invalid htc pipe ready msg len: %d\n",
+ target->pipe.ctrl_response_len);
+ return -ECOMM;
+ }
+
+ ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
+
+ if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
+ ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
+ ready_msg->ver2_0_info.msg_id);
+ return -ECOMM;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "Target Ready! : transmit resources : %d size:%d\n",
+ ready_msg->ver2_0_info.cred_cnt,
+ ready_msg->ver2_0_info.cred_sz);
+
+ target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
+ target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
+
+ if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
+ return -ECOMM;
+
+ htc_setup_target_buffer_assignments(target);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&connect, 0, sizeof(connect));
+ memset(&resp, 0, sizeof(resp));
+ connect.ep_cb.tx_complete = htc_txctrl_complete;
+ connect.ep_cb.rx = htc_rxctrl_complete;
+ connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
+ connect.svc_id = HTC_CTRL_RSVD_SVC;
+
+ /* connect fake service */
+ status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
+
+ return status;
+}
+
+static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id endpoint, u16 tag)
+{
+ struct htc_endpoint *ep = &target->endpoint[endpoint];
+
+ if (ep->svc_id == 0) {
+ WARN_ON_ONCE(1);
+ /* not in use.. */
+ return;
+ }
+
+ htc_flush_tx_endpoint(target, ep, tag);
+}
+
+static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pkt_queue)
+{
+ struct htc_packet *packet, *tmp_pkt, *first;
+ struct htc_endpoint *ep;
+ int status = 0;
+
+ if (list_empty(pkt_queue))
+ return -EINVAL;
+
+ first = list_first_entry(pkt_queue, struct htc_packet, list);
+
+ if (first->endpoint >= ENDPOINT_MAX) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
+ __func__, first->endpoint, get_queue_depth(pkt_queue),
+ first->buf_len);
+
+ ep = &target->endpoint[first->endpoint];
+
+ spin_lock_bh(&target->rx_lock);
+
+ /* store receive packets */
+ list_splice_tail_init(pkt_queue, &ep->rx_bufq);
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (status != 0) {
+ /* walk through queue and mark each one canceled */
+ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+ packet->status = -ECANCELED;
+ }
+
+ do_recv_completion(ep, pkt_queue);
+ }
+
+ return status;
+}
+
+static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
+ enum htc_endpoint_id ep,
+ bool active)
+{
+ /* TODO */
+}
+
+static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet, *tmp_pkt;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+
+ spin_lock_bh(&target->rx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt,
+ &endpoint->rx_bufq, list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&target->rx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx flush pkt 0x%p len %d ep %d\n",
+ packet, packet->buf_len,
+ packet->endpoint);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&target->rx_lock);
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+ }
+}
+
+static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
+ struct ath6kl_htc_credit_info *info)
+{
+ return 0;
+}
+
+static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
+ .create = ath6kl_htc_pipe_create,
+ .wait_target = ath6kl_htc_pipe_wait_target,
+ .start = ath6kl_htc_pipe_start,
+ .conn_service = ath6kl_htc_pipe_conn_service,
+ .tx = ath6kl_htc_pipe_tx,
+ .stop = ath6kl_htc_pipe_stop,
+ .cleanup = ath6kl_htc_pipe_cleanup,
+ .flush_txep = ath6kl_htc_pipe_flush_txep,
+ .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
+ .activity_changed = ath6kl_htc_pipe_activity_changed,
+ .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
+ .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
+ .credit_setup = ath6kl_htc_pipe_credit_setup,
+ .tx_complete = ath6kl_htc_pipe_tx_complete,
+ .rx_complete = ath6kl_htc_pipe_rx_complete,
+};
+
+void ath6kl_htc_pipe_attach(struct ath6kl *ar)
+{
+ ar->htc_ops = &ath6kl_htc_pipe_ops;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
new file mode 100644
index 000000000..ba5087d59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -0,0 +1,1916 @@
+
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "htc-ops.h"
+
+static const struct ath6kl_hw hw_list[] = {
+ {
+ .id = AR6003_HW_2_0_VERSION,
+ .name = "ar6003 hw 2.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x543180,
+ .board_ext_data_addr = 0x57e500,
+ .reserved_ram_size = 6912,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+ .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
+
+ /* hw2.0 needs override address hardcoded */
+ .app_start_override_addr = 0x944C00,
+
+ .fw = {
+ .dir = AR6003_HW_2_0_FW_DIR,
+ .otp = AR6003_HW_2_0_OTP_FILE,
+ .fw = AR6003_HW_2_0_FIRMWARE_FILE,
+ .tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+ .patch = AR6003_HW_2_0_PATCH_FILE,
+ },
+
+ .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6003_HW_2_1_1_VERSION,
+ .name = "ar6003 hw 2.1.1",
+ .dataset_patch_addr = 0x57ff74,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x542330,
+ .reserved_ram_size = 512,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+ .testscript_addr = 0x57ef74,
+ .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
+
+ .fw = {
+ .dir = AR6003_HW_2_1_1_FW_DIR,
+ .otp = AR6003_HW_2_1_1_OTP_FILE,
+ .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
+ .tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+ .patch = AR6003_HW_2_1_1_PATCH_FILE,
+ .utf = AR6003_HW_2_1_1_UTF_FIRMWARE_FILE,
+ .testscript = AR6003_HW_2_1_1_TESTSCRIPT_FILE,
+ },
+
+ .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_0_VERSION,
+ .name = "ar6004 hw 1.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 19456,
+ .board_addr = 0x433900,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 11,
+ .flags = 0,
+
+ .fw = {
+ .dir = AR6004_HW_1_0_FW_DIR,
+ .fw = AR6004_HW_1_0_FIRMWARE_FILE,
+ },
+
+ .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_1_VERSION,
+ .name = "ar6004 hw 1.1",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 11264,
+ .board_addr = 0x43d400,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+ .flags = 0,
+ .fw = {
+ .dir = AR6004_HW_1_1_FW_DIR,
+ .fw = AR6004_HW_1_1_FIRMWARE_FILE,
+ },
+
+ .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_2_VERSION,
+ .name = "ar6004 hw 1.2",
+ .dataset_patch_addr = 0x436ecc,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 9216,
+ .board_addr = 0x435c00,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+ .flags = 0,
+
+ .fw = {
+ .dir = AR6004_HW_1_2_FW_DIR,
+ .fw = AR6004_HW_1_2_FIRMWARE_FILE,
+ },
+ .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_3_VERSION,
+ .name = "ar6004 hw 1.3",
+ .dataset_patch_addr = 0x437860,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 7168,
+ .board_addr = 0x436400,
+ .refclk_hz = 0,
+ .uarttx_pin = 11,
+ .flags = 0,
+
+ .fw = {
+ .dir = AR6004_HW_1_3_FW_DIR,
+ .fw = AR6004_HW_1_3_FIRMWARE_FILE,
+ .tcmd = AR6004_HW_1_3_TCMD_FIRMWARE_FILE,
+ .utf = AR6004_HW_1_3_UTF_FIRMWARE_FILE,
+ .testscript = AR6004_HW_1_3_TESTSCRIPT_FILE,
+ },
+
+ .fw_board = AR6004_HW_1_3_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_3_0_VERSION,
+ .name = "ar6004 hw 3.0",
+ .dataset_patch_addr = 0,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0,
+ .reserved_ram_size = 7168,
+ .board_addr = 0x436400,
+ .testscript_addr = 0,
+ .flags = 0,
+
+ .fw = {
+ .dir = AR6004_HW_3_0_FW_DIR,
+ .fw = AR6004_HW_3_0_FIRMWARE_FILE,
+ .tcmd = AR6004_HW_3_0_TCMD_FIRMWARE_FILE,
+ .utf = AR6004_HW_3_0_UTF_FIRMWARE_FILE,
+ .testscript = AR6004_HW_3_0_TESTSCRIPT_FILE,
+ },
+
+ .fw_board = AR6004_HW_3_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE,
+ },
+};
+
+/*
+ * Include definitions here that can be used to tune the WLAN module
+ * behavior. Different customers can tune the behavior as per their needs,
+ * here.
+ */
+
+/*
+ * This configuration item enable/disable keepalive support.
+ * Keepalive support: In the absence of any data traffic to AP, null
+ * frames will be sent to the AP at periodic interval, to keep the association
+ * active. This configuration item defines the periodic interval.
+ * Use value of zero to disable keepalive support
+ * Default: 60 seconds
+ */
+#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
+
+/*
+ * This configuration item sets the value of disconnect timeout
+ * Firmware delays sending the disconnec event to the host for this
+ * timeout after is gets disconnected from the current AP.
+ * If the firmware successly roams within the disconnect timeout
+ * it sends a new connect event
+ */
+#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
+
+
+#define ATH6KL_DATA_OFFSET 64
+struct sk_buff *ath6kl_buf_alloc(int size)
+{
+ struct sk_buff *skb;
+ u16 reserved;
+
+ /* Add chacheline space at front and back of buffer */
+ reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
+ sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
+ skb = dev_alloc_skb(size + reserved);
+
+ if (skb)
+ skb_reserve(skb, reserved - L1_CACHE_BYTES);
+ return skb;
+}
+
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
+{
+ vif->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+
+ vif->dot11_auth_mode = OPEN_AUTH;
+ vif->auth_mode = NONE_AUTH;
+ vif->prwise_crypto = NONE_CRYPT;
+ vif->prwise_crypto_len = 0;
+ vif->grp_crypto = NONE_CRYPT;
+ vif->grp_crypto_len = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
+}
+
+static int ath6kl_set_host_app_area(struct ath6kl *ar)
+{
+ u32 address, data;
+ struct host_app_area host_app_area;
+
+ /* Fetch the address of the host_app_area_s
+ * instance in the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
+ address = TARG_VTOP(ar->target_type, address);
+
+ if (ath6kl_diag_read32(ar, address, &data))
+ return -EIO;
+
+ address = TARG_VTOP(ar->target_type, data);
+ host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
+ if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
+ sizeof(struct host_app_area)))
+ return -EIO;
+
+ return 0;
+}
+
+static inline void set_ac2_ep_map(struct ath6kl *ar,
+ u8 ac,
+ enum htc_endpoint_id ep)
+{
+ ar->ac2ep_map[ac] = ep;
+ ar->ep2ac_map[ep] = ac;
+}
+
+/* connect to a service */
+static int ath6kl_connectservice(struct ath6kl *ar,
+ struct htc_service_connect_req *con_req,
+ char *desc)
+{
+ int status;
+ struct htc_service_connect_resp response;
+
+ memset(&response, 0, sizeof(response));
+
+ status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response);
+ if (status) {
+ ath6kl_err("failed to connect to %s service status:%d\n",
+ desc, status);
+ return status;
+ }
+
+ switch (con_req->svc_id) {
+ case WMI_CONTROL_SVC:
+ if (test_bit(WMI_ENABLED, &ar->flag))
+ ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
+ ar->ctrl_ep = response.endpoint;
+ break;
+ case WMI_DATA_BE_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
+ break;
+ case WMI_DATA_BK_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
+ break;
+ case WMI_DATA_VI_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
+ break;
+ case WMI_DATA_VO_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
+ break;
+ default:
+ ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath6kl_init_service_ep(struct ath6kl *ar)
+{
+ struct htc_service_connect_req connect;
+
+ memset(&connect, 0, sizeof(connect));
+
+ /* these fields are the same for all service endpoints */
+ connect.ep_cb.tx_comp_multi = ath6kl_tx_complete;
+ connect.ep_cb.rx = ath6kl_rx;
+ connect.ep_cb.rx_refill = ath6kl_rx_refill;
+ connect.ep_cb.tx_full = ath6kl_tx_queue_full;
+
+ /*
+ * Set the max queue depth so that our ath6kl_tx_queue_full handler
+ * gets called.
+ */
+ connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
+ connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
+ if (!connect.ep_cb.rx_refill_thresh)
+ connect.ep_cb.rx_refill_thresh++;
+
+ /* connect to control service */
+ connect.svc_id = WMI_CONTROL_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
+ return -EIO;
+
+ connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
+
+ /*
+ * Limit the HTC message size on the send path, although e can
+ * receive A-MSDU frames of 4K, we will only send ethernet-sized
+ * (802.3) frames on the send path.
+ */
+ connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
+
+ /*
+ * To reduce the amount of committed memory for larger A_MSDU
+ * frames, use the recv-alloc threshold mechanism for larger
+ * packets.
+ */
+ connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
+ connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
+
+ /*
+ * For the remaining data services set the connection flag to
+ * reduce dribbling, if configured to do so.
+ */
+ connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
+ connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
+ connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
+
+ connect.svc_id = WMI_DATA_BE_SVC;
+
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
+ return -EIO;
+
+ /* connect to back-ground map this to WMI LOW_PRI */
+ connect.svc_id = WMI_DATA_BK_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
+ return -EIO;
+
+ /* connect to Video service, map this to HI PRI */
+ connect.svc_id = WMI_DATA_VI_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
+ return -EIO;
+
+ /*
+ * Connect to VO service, this is currently not mapped to a WMI
+ * priority stream due to historical reasons. WMI originally
+ * defined 3 priorities over 3 mailboxes We can change this when
+ * WMI is reworked so that priorities are not dependent on
+ * mailboxes.
+ */
+ connect.svc_id = WMI_DATA_VO_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
+ return -EIO;
+
+ return 0;
+}
+
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
+{
+ ath6kl_init_profile_info(vif);
+ vif->def_txkey_index = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ vif->ch_hint = 0;
+}
+
+/*
+ * Set HTC/Mbox operational parameters, this can only be called when the
+ * target is in the BMI phase.
+ */
+static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
+ u8 htc_ctrl_buf)
+{
+ int status;
+ u32 blk_size;
+
+ blk_size = ar->mbox_info.block_size;
+
+ if (htc_ctrl_buf)
+ blk_size |= ((u32)htc_ctrl_buf) << 16;
+
+ /* set the host interest area for the block size */
+ status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size);
+ if (status) {
+ ath6kl_err("bmi_write_memory for IO block size failed\n");
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
+ blk_size,
+ ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
+
+ if (mbox_isr_yield_val) {
+ /* set the host interest area for the mbox ISR yield limit */
+ status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit,
+ mbox_isr_yield_val);
+ if (status) {
+ ath6kl_err("bmi_write_memory for yield limit failed\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
+{
+ int ret;
+
+ /*
+ * Configure the device for rx dot11 header rules. "0,0" are the
+ * default values. Required if checksum offload is needed. Set
+ * RxMetaVersion to 2.
+ */
+ ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
+ ar->rx_meta_ver, 0, 0);
+ if (ret) {
+ ath6kl_err("unable to set the rx frame format: %d\n", ret);
+ return ret;
+ }
+
+ if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) {
+ ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
+ IGNORE_PS_FAIL_DURING_SCAN);
+ if (ret) {
+ ath6kl_err("unable to set power save fail event policy: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) {
+ ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
+ WMI_FOLLOW_BARKER_IN_ERP);
+ if (ret) {
+ ath6kl_err("unable to set barker preamble policy: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
+ WLAN_CONFIG_KEEP_ALIVE_INTERVAL);
+ if (ret) {
+ ath6kl_err("unable to set keep alive interval: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
+ WLAN_CONFIG_DISCONNECT_TIMEOUT);
+ if (ret) {
+ ath6kl_err("unable to set disconnect timeout: %d\n", ret);
+ return ret;
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) {
+ ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED);
+ if (ret) {
+ ath6kl_err("unable to set txop bursting: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
+ ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
+ P2P_FLAG_CAPABILITIES_REQ |
+ P2P_FLAG_MACADDR_REQ |
+ P2P_FLAG_HMODEL_REQ);
+ if (ret) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to request P2P capabilities (%d) - assuming P2P not supported\n",
+ ret);
+ ar->p2p = false;
+ }
+ }
+
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
+ /* Enable Probe Request reporting for P2P */
+ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
+ if (ret) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to enable Probe Request reporting (%d)\n",
+ ret);
+ }
+ }
+
+ return ret;
+}
+
+int ath6kl_configure_target(struct ath6kl *ar)
+{
+ u32 param, ram_reserved_size;
+ u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+ int i, status;
+
+ param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG);
+ if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) {
+ ath6kl_err("bmi_write_memory for uart debug failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Note: Even though the firmware interface type is
+ * chosen as BSS_STA for all three interfaces, can
+ * be configured to IBSS/AP as long as the fw submode
+ * remains normal mode (0 - AP, STA and IBSS). But
+ * due to an target assert in firmware only one interface is
+ * configured for now.
+ */
+ fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
+
+ for (i = 0; i < ar->vif_max; i++)
+ fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+ /*
+ * Submodes when fw does not support dynamic interface
+ * switching:
+ * vif[0] - AP/STA/IBSS
+ * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+ * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+ * Otherwise, All the interface are initialized to p2p dev.
+ */
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ for (i = 0; i < ar->vif_max; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+ } else {
+ for (i = 0; i < ar->max_norm_iface; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ if (ar->p2p && ar->vif_max == 1)
+ fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
+ }
+
+ if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION) != 0) {
+ ath6kl_err("bmi_write_memory for htc version failed\n");
+ return -EIO;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ param = 0;
+
+ if (ath6kl_bmi_read_hi32(ar, hi_option_flag, &param) != 0) {
+ ath6kl_err("bmi_read_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT);
+ param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+ param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
+ param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+
+ if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) {
+ ath6kl_err("bmi_write_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
+
+ /*
+ * Hardcode the address use for the extended board data
+ * Ideally this should be pre-allocate by the OS at boot time
+ * But since it is a new feature and board data is loaded
+ * at init time, we have to workaround this from host.
+ * It is difficult to patch the firmware boot code,
+ * but possible in theory.
+ */
+
+ if ((ar->target_type == TARGET_TYPE_AR6003) ||
+ (ar->version.target_ver == AR6004_HW_1_3_VERSION) ||
+ (ar->version.target_ver == AR6004_HW_3_0_VERSION)) {
+ param = ar->hw.board_ext_data_addr;
+ ram_reserved_size = ar->hw.reserved_ram_size;
+
+ if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
+ ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
+ return -EIO;
+ }
+
+ if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
+ ram_reserved_size) != 0) {
+ ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
+ return -EIO;
+ }
+ }
+
+ /* set the block size for the target */
+ if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
+ /* use default number of control buffers */
+ return -EIO;
+
+ /* Configure GPIO AR600x UART */
+ status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin,
+ ar->hw.uarttx_pin);
+ if (status)
+ return status;
+
+ /* Configure target refclk_hz */
+ if (ar->hw.refclk_hz != 0) {
+ status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz,
+ ar->hw.refclk_hz);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/* firmware upload */
+static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
+ u8 **fw, size_t *fw_len)
+{
+ const struct firmware *fw_entry;
+ int ret;
+
+ ret = reject_firmware(&fw_entry, filename, ar->dev);
+ if (ret)
+ return ret;
+
+ *fw_len = fw_entry->size;
+ *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+
+ if (*fw == NULL)
+ ret = -ENOMEM;
+
+ release_firmware(fw_entry);
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+/*
+ * Check the device tree for a board-id and use it to construct
+ * the pathname to the firmware file. /*(DEBLOBBED)*/
+static bool check_device_tree(struct ath6kl *ar)
+{
+ static const char *board_id_prop = "atheros,board-id";
+ struct device_node *node;
+ char board_filename[64];
+ const char *board_id;
+ int ret;
+
+ for_each_compatible_node(node, NULL, "atheros,ath6kl") {
+ board_id = of_get_property(node, board_id_prop, NULL);
+ if (board_id == NULL) {
+ ath6kl_warn("No \"%s\" property on %s node.\n",
+ board_id_prop, node->name);
+ continue;
+ }
+ snprintf(board_filename, sizeof(board_filename),
+ "/*(DEBLOBBED)*/", ar->hw.fw.dir, board_id);
+
+ ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret) {
+ ath6kl_err("Failed to get DT board file %s: %d\n",
+ board_filename, ret);
+ continue;
+ }
+ return true;
+ }
+ return false;
+}
+#else
+static bool check_device_tree(struct ath6kl *ar)
+{
+ return false;
+}
+#endif /* CONFIG_OF */
+
+static int ath6kl_fetch_board_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ if (ar->fw_board != NULL)
+ return 0;
+
+ if (WARN_ON(ar->hw.fw_board == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw_board;
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret == 0) {
+ /* managed to get proper board file */
+ return 0;
+ }
+
+ if (check_device_tree(ar)) {
+ /* got board file from device tree */
+ return 0;
+ }
+
+ /* there was no proper board file, try to use default instead */
+ ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
+ filename, ret);
+
+ filename = ar->hw.fw_default_board;
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret) {
+ ath6kl_err("Failed to get default board file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
+ ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
+
+ return 0;
+}
+
+static int ath6kl_fetch_otp_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->fw_otp != NULL)
+ return 0;
+
+ if (ar->hw.fw.otp == NULL) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "no OTP file configured for this hw\n");
+ return 0;
+ }
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.otp);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
+ &ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to get OTP file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_testmode_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->testmode == 0)
+ return 0;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode);
+
+ if (ar->testmode == 2) {
+ if (ar->hw.fw.utf == NULL) {
+ ath6kl_warn("testmode 2 not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.utf);
+ } else {
+ if (ar->hw.fw.tcmd == NULL) {
+ ath6kl_warn("testmode 1 not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.tcmd);
+ }
+
+ set_bit(TESTMODE, &ar->flag);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
+ if (ret) {
+ ath6kl_err("Failed to get testmode %d firmware file %s: %d\n",
+ ar->testmode, filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->fw != NULL)
+ return 0;
+
+ /* FIXME: remove WARN_ON() as we won't support FW API 1 for long */
+ if (WARN_ON(ar->hw.fw.fw == NULL))
+ return -EINVAL;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.fw);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
+ if (ret) {
+ ath6kl_err("Failed to get firmware file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_patch_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->fw_patch != NULL)
+ return 0;
+
+ if (ar->hw.fw.patch == NULL)
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.patch);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_testscript_file(struct ath6kl *ar)
+{
+ char filename[100];
+ int ret;
+
+ if (ar->testmode != 2)
+ return 0;
+
+ if (ar->fw_testscript != NULL)
+ return 0;
+
+ if (ar->hw.fw.testscript == NULL)
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw.fw.dir, ar->hw.fw.testscript);
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript,
+ &ar->fw_testscript_len);
+ if (ret) {
+ ath6kl_err("Failed to get testscript file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_api1(struct ath6kl *ar)
+{
+ int ret;
+
+ ret = ath6kl_fetch_otp_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_fw_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_patch_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_testscript_file(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
+{
+ size_t magic_len, len, ie_len;
+ const struct firmware *fw;
+ struct ath6kl_fw_ie *hdr;
+ char filename[100];
+ const u8 *data;
+ int ret, ie_id, i, index, bit;
+ __le32 *val;
+
+ snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name);
+
+ ret = reject_firmware(&fw, filename, ar->dev);
+ if (ret)
+ return ret;
+
+ data = fw->data;
+ len = fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH6KL_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (memcmp(data, ATH6KL_FIRMWARE_MAGIC, magic_len) != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath6kl_fw_ie)) {
+ /* hdr is unaligned! */
+ hdr = (struct ath6kl_fw_ie *) data;
+
+ ie_id = le32_to_cpup(&hdr->id);
+ ie_len = le32_to_cpup(&hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (ie_id) {
+ case ATH6KL_FW_IE_FW_VERSION:
+ strlcpy(ar->wiphy->fw_version, data,
+ sizeof(ar->wiphy->fw_version));
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found fw version %s\n",
+ ar->wiphy->fw_version);
+ break;
+ case ATH6KL_FW_IE_OTP_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
+ ie_len);
+
+ ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL);
+
+ if (ar->fw_otp == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ar->fw_otp_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_FW_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n",
+ ie_len);
+
+ /* in testmode we already might have a fw file */
+ if (ar->fw != NULL)
+ break;
+
+ ar->fw = vmalloc(ie_len);
+
+ if (ar->fw == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(ar->fw, data, ie_len);
+ ar->fw_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_PATCH_IMAGE:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n",
+ ie_len);
+
+ ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL);
+
+ if (ar->fw_patch == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ar->fw_patch_len = ie_len;
+ break;
+ case ATH6KL_FW_IE_RESERVED_RAM_SIZE:
+ val = (__le32 *) data;
+ ar->hw.reserved_ram_size = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found reserved ram size ie %d\n",
+ ar->hw.reserved_ram_size);
+ break;
+ case ATH6KL_FW_IE_CAPABILITIES:
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found firmware capabilities ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ar->fw_capabilities);
+ }
+
+ ath6kl_dbg_dump(ATH6KL_DBG_BOOT, "capabilities", "",
+ ar->fw_capabilities,
+ sizeof(ar->fw_capabilities));
+ break;
+ case ATH6KL_FW_IE_PATCH_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.dataset_patch_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found patch address ie 0x%x\n",
+ ar->hw.dataset_patch_addr);
+ break;
+ case ATH6KL_FW_IE_BOARD_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.board_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found board address ie 0x%x\n",
+ ar->hw.board_addr);
+ break;
+ case ATH6KL_FW_IE_VIF_MAX:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->vif_max = min_t(unsigned int, le32_to_cpup(val),
+ ATH6KL_VIF_MAX);
+
+ if (ar->vif_max > 1 && !ar->p2p)
+ ar->max_norm_iface = 2;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found vif max ie %d\n", ar->vif_max);
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
+ le32_to_cpup(&hdr->id));
+ break;
+ }
+
+ len -= ie_len;
+ data += ie_len;
+ };
+
+ ret = 0;
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+int ath6kl_init_fetch_firmwares(struct ath6kl *ar)
+{
+ int ret;
+
+ ret = ath6kl_fetch_board_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_testmode_file(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API5_FILE);
+ if (ret == 0) {
+ ar->fw_api = 5;
+ goto out;
+ }
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE);
+ if (ret == 0) {
+ ar->fw_api = 4;
+ goto out;
+ }
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
+ if (ret == 0) {
+ ar->fw_api = 3;
+ goto out;
+ }
+
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE);
+ if (ret == 0) {
+ ar->fw_api = 2;
+ goto out;
+ }
+
+ ret = ath6kl_fetch_fw_api1(ar);
+ if (ret)
+ return ret;
+
+ ar->fw_api = 1;
+
+out:
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath6kl_upload_board_file(struct ath6kl *ar)
+{
+ u32 board_address, board_ext_address, param;
+ u32 board_data_size, board_ext_data_size;
+ int ret;
+
+ if (WARN_ON(ar->fw_board == NULL))
+ return -ENOENT;
+
+ /*
+ * Determine where in Target RAM to write Board Data.
+ * For AR6004, host determine Target RAM address for
+ * writing board data.
+ */
+ if (ar->hw.board_addr != 0) {
+ board_address = ar->hw.board_addr;
+ ath6kl_bmi_write_hi32(ar, hi_board_data,
+ board_address);
+ } else {
+ ret = ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address);
+ if (ret) {
+ ath6kl_err("Failed to get board file target address.\n");
+ return ret;
+ }
+ }
+
+ /* determine where in target ram to write extended board data */
+ ret = ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address);
+ if (ret) {
+ ath6kl_err("Failed to get extended board file target address.\n");
+ return ret;
+ }
+
+ if (ar->target_type == TARGET_TYPE_AR6003 &&
+ board_ext_address == 0) {
+ ath6kl_err("Failed to get board file target address.\n");
+ return -EINVAL;
+ }
+
+ switch (ar->target_type) {
+ case TARGET_TYPE_AR6003:
+ board_data_size = AR6003_BOARD_DATA_SZ;
+ board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ;
+ if (ar->fw_board_len > (board_data_size + board_ext_data_size))
+ board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ_V2;
+ break;
+ case TARGET_TYPE_AR6004:
+ board_data_size = AR6004_BOARD_DATA_SZ;
+ board_ext_data_size = AR6004_BOARD_EXT_DATA_SZ;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (board_ext_address &&
+ ar->fw_board_len == (board_data_size + board_ext_data_size)) {
+ /* write extended board data */
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "writing extended board data to 0x%x (%d B)\n",
+ board_ext_address, board_ext_data_size);
+
+ ret = ath6kl_bmi_write(ar, board_ext_address,
+ ar->fw_board + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath6kl_err("Failed to write extended board data: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* record that extended board data is initialized */
+ param = (board_ext_data_size << 16) | 1;
+
+ ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param);
+ }
+
+ if (ar->fw_board_len < board_data_size) {
+ ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing board file to 0x%x (%d B)\n",
+ board_address, board_data_size);
+
+ ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
+ board_data_size);
+
+ if (ret) {
+ ath6kl_err("Board file bmi write failed: %d\n", ret);
+ return ret;
+ }
+
+ /* record the fact that Board Data IS initialized */
+ if ((ar->version.target_ver == AR6004_HW_1_3_VERSION) ||
+ (ar->version.target_ver == AR6004_HW_3_0_VERSION))
+ param = board_data_size;
+ else
+ param = 1;
+
+ ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, param);
+
+ return ret;
+}
+
+static int ath6kl_upload_otp(struct ath6kl *ar)
+{
+ u32 address, param;
+ bool from_hw = false;
+ int ret;
+
+ if (ar->fw_otp == NULL)
+ return 0;
+
+ address = ar->hw.app_load_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing otp to 0x%x (%zd B)\n", address,
+ ar->fw_otp_len);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
+ ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to upload OTP file: %d\n", ret);
+ return ret;
+ }
+
+ /* read firmware start address */
+ ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address);
+
+ if (ret) {
+ ath6kl_err("Failed to read hi_app_start: %d\n", ret);
+ return ret;
+ }
+
+ if (ar->hw.app_start_override_addr == 0) {
+ ar->hw.app_start_override_addr = address;
+ from_hw = true;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+ from_hw ? " (from hw)" : "",
+ ar->hw.app_start_override_addr);
+
+ /* execute the OTP code */
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+ ar->hw.app_start_override_addr);
+ param = 0;
+ ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
+
+ return ret;
+}
+
+static int ath6kl_upload_firmware(struct ath6kl *ar)
+{
+ u32 address;
+ int ret;
+
+ if (WARN_ON(ar->fw == NULL))
+ return 0;
+
+ address = ar->hw.app_load_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing firmware to 0x%x (%zd B)\n",
+ address, ar->fw_len);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
+
+ if (ret) {
+ ath6kl_err("Failed to write firmware: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set starting address for firmware
+ * Don't need to setup app_start override addr on AR6004
+ */
+ if (ar->target_type != TARGET_TYPE_AR6004) {
+ address = ar->hw.app_start_override_addr;
+ ath6kl_bmi_set_app_start(ar, address);
+ }
+ return ret;
+}
+
+static int ath6kl_upload_patch(struct ath6kl *ar)
+{
+ u32 address;
+ int ret;
+
+ if (ar->fw_patch == NULL)
+ return 0;
+
+ address = ar->hw.dataset_patch_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing patch to 0x%x (%zd B)\n",
+ address, ar->fw_patch_len);
+
+ ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to write patch file: %d\n", ret);
+ return ret;
+ }
+
+ ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address);
+
+ return 0;
+}
+
+static int ath6kl_upload_testscript(struct ath6kl *ar)
+{
+ u32 address;
+ int ret;
+
+ if (ar->testmode != 2)
+ return 0;
+
+ if (ar->fw_testscript == NULL)
+ return 0;
+
+ address = ar->hw.testscript_addr;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n",
+ address, ar->fw_testscript_len);
+
+ ret = ath6kl_bmi_write(ar, address, ar->fw_testscript,
+ ar->fw_testscript_len);
+ if (ret) {
+ ath6kl_err("Failed to write testscript file: %d\n", ret);
+ return ret;
+ }
+
+ ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address);
+
+ if ((ar->version.target_ver != AR6004_HW_1_3_VERSION) &&
+ (ar->version.target_ver != AR6004_HW_3_0_VERSION))
+ ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096);
+
+ ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1);
+
+ return 0;
+}
+
+static int ath6kl_init_upload(struct ath6kl *ar)
+{
+ u32 param, options, sleep, address;
+ int status = 0;
+
+ if (ar->target_type != TARGET_TYPE_AR6003 &&
+ ar->target_type != TARGET_TYPE_AR6004)
+ return -EINVAL;
+
+ /* temporarily disable system sleep */
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ options = param;
+
+ param |= ATH6KL_OPTION_SLEEP_DISABLE;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ sleep = param;
+
+ param |= SM(SYSTEM_SLEEP_DISABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
+ options, sleep);
+
+ /* program analog PLL register */
+ /* no need to control 40/44MHz clock on AR6004 */
+ if (ar->target_type != TARGET_TYPE_AR6004) {
+ status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
+ 0xF9104001);
+
+ if (status)
+ return status;
+
+ /* Run at 80/88MHz by default */
+ param = SM(CPU_CLOCK_STANDARD, 1);
+
+ address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+ }
+
+ param = 0;
+ address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
+ param = SM(LPO_CAL_ENABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ /* WAR to avoid SDIO CRC err */
+ if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) {
+ ath6kl_err("temporary war to avoid sdio crc error\n");
+
+ param = 0x28;
+ address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ param = 0x20;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+ }
+
+ /* write EEPROM data to Target RAM */
+ status = ath6kl_upload_board_file(ar);
+ if (status)
+ return status;
+
+ /* transfer One time Programmable data */
+ status = ath6kl_upload_otp(ar);
+ if (status)
+ return status;
+
+ /* Download Target firmware */
+ status = ath6kl_upload_firmware(ar);
+ if (status)
+ return status;
+
+ status = ath6kl_upload_patch(ar);
+ if (status)
+ return status;
+
+ /* Download the test script */
+ status = ath6kl_upload_testscript(ar);
+ if (status)
+ return status;
+
+ /* Restore system sleep */
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, sleep);
+ if (status)
+ return status;
+
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ param = options | 0x20;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ return status;
+}
+
+int ath6kl_init_hw_params(struct ath6kl *ar)
+{
+ const struct ath6kl_hw *uninitialized_var(hw);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = &hw_list[i];
+
+ if (hw->id == ar->version.target_ver)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hw_list)) {
+ ath6kl_err("Unsupported hardware version: 0x%x\n",
+ ar->version.target_ver);
+ return -EINVAL;
+ }
+
+ ar->hw = *hw;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
+ ar->version.target_ver, ar->target_type,
+ ar->hw.dataset_patch_addr, ar->hw.app_load_addr);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
+ ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
+ ar->hw.reserved_ram_size);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "refclk_hz %d uarttx_pin %d",
+ ar->hw.refclk_hz, ar->hw.uarttx_pin);
+
+ return 0;
+}
+
+static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
+{
+ switch (type) {
+ case ATH6KL_HIF_TYPE_SDIO:
+ return "sdio";
+ case ATH6KL_HIF_TYPE_USB:
+ return "usb";
+ }
+
+ return NULL;
+}
+
+
+static const struct fw_capa_str_map {
+ int id;
+ const char *name;
+} fw_capa_map[] = {
+ { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" },
+ { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" },
+ { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" },
+ { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" },
+ { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" },
+ { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" },
+ { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" },
+ { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" },
+ { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" },
+ { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
+ { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
+ { ATH6KL_FW_CAPABILITY_64BIT_RATES, "64bit-rates" },
+ { ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, "ap-inactivity-mins" },
+ { ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, "map-lp-endpoint" },
+ { ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, "ratetable-mcs15" },
+ { ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, "no-ip-checksum" },
+};
+
+static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) {
+ if (fw_capa_map[i].id == id)
+ return fw_capa_map[i].name;
+ }
+
+ return "<unknown>";
+}
+
+static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len)
+{
+ u8 *data = (u8 *) ar->fw_capabilities;
+ size_t trunc_len, len = 0;
+ int i, index, bit;
+ char *trunc = "...";
+
+ for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index >= sizeof(ar->fw_capabilities) * 4)
+ break;
+
+ if (buf_len - len < 4) {
+ ath6kl_warn("firmware capability buffer too small!\n");
+
+ /* add "..." to the end of string */
+ trunc_len = strlen(trunc) + 1;
+ strncpy(buf + buf_len - trunc_len, trunc, trunc_len);
+
+ return;
+ }
+
+ if (data[index] & (1 << bit)) {
+ len += scnprintf(buf + len, buf_len - len, "%s,",
+ ath6kl_init_get_fw_capa_name(i));
+ }
+ }
+
+ /* overwrite the last comma */
+ if (len > 0)
+ len--;
+
+ buf[len] = '\0';
+}
+
+static int ath6kl_init_hw_reset(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device");
+
+ return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS,
+ cpu_to_le32(RESET_CONTROL_COLD_RST));
+}
+
+static int __ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ long timeleft;
+ int ret, i;
+ char buf[200];
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_power_off;
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_power_off;
+
+ /* Do we need to finish the BMI phase */
+ ret = ath6kl_bmi_done(ar);
+ if (ret)
+ goto err_power_off;
+
+ /*
+ * The reason we have to wait for the target here is that the
+ * driver layer has to init BMI in order to set the host block
+ * size.
+ */
+ ret = ath6kl_htc_wait_target(ar->htc_target);
+
+ if (ret == -ETIMEDOUT) {
+ /*
+ * Most likely USB target is in odd state after reboot and
+ * needs a reset. A cold reset makes the whole device
+ * disappear from USB bus and initialisation starts from
+ * beginning.
+ */
+ ath6kl_warn("htc wait target timed out, resetting device\n");
+ ath6kl_init_hw_reset(ar);
+ goto err_power_off;
+ } else if (ret) {
+ ath6kl_err("htc wait target failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = ath6kl_init_service_ep(ar);
+ if (ret) {
+ ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
+ goto err_cleanup_scatter;
+ }
+
+ /* setup credit distribution */
+ ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info);
+
+ /* start HTC */
+ ret = ath6kl_htc_start(ar->htc_target);
+ if (ret) {
+ /* FIXME: call this */
+ ath6kl_cookie_cleanup(ar);
+ goto err_cleanup_scatter;
+ }
+
+ /* Wait for Wmi event to be ready */
+ timeleft = wait_event_interruptible_timeout(ar->event_wq,
+ test_bit(WMI_READY,
+ &ar->flag),
+ WMI_TIMEOUT);
+ if (timeleft <= 0) {
+ clear_bit(WMI_READY, &ar->flag);
+ ath6kl_err("wmi is not ready or wait was interrupted: %ld\n",
+ timeleft);
+ ret = -EIO;
+ goto err_htc_stop;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+ if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+ ath6kl_info("%s %s fw %s api %d%s\n",
+ ar->hw.name,
+ ath6kl_init_get_hif_name(ar->hif_type),
+ ar->wiphy->fw_version,
+ ar->fw_api,
+ test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+ ath6kl_init_get_fwcaps(ar, buf, sizeof(buf));
+ ath6kl_info("firmware supports: %s\n", buf);
+ }
+
+ if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
+ ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
+ ATH6KL_ABI_VERSION, ar->version.abi_ver);
+ ret = -EIO;
+ goto err_htc_stop;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
+
+ /* communicate the wmi protocol verision to the target */
+ /* FIXME: return error */
+ if ((ath6kl_set_host_app_area(ar)) != 0)
+ ath6kl_err("unable to set the host app area\n");
+
+ for (i = 0; i < ar->vif_max; i++) {
+ ret = ath6kl_target_config_wlan_params(ar, i);
+ if (ret)
+ goto err_htc_stop;
+ }
+
+ return 0;
+
+err_htc_stop:
+ ath6kl_htc_stop(ar->htc_target);
+err_cleanup_scatter:
+ ath6kl_hif_cleanup_scatter(ar);
+err_power_off:
+ ath6kl_hif_power_off(ar);
+
+ return ret;
+}
+
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ int err;
+
+ err = __ath6kl_init_hw_start(ar);
+ if (err)
+ return err;
+ ar->state = ATH6KL_STATE_ON;
+ return 0;
+}
+
+static int __ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+ ath6kl_htc_stop(ar->htc_target);
+
+ ath6kl_hif_stop(ar);
+
+ ath6kl_bmi_reset(ar);
+
+ ret = ath6kl_hif_power_off(ar);
+ if (ret)
+ ath6kl_warn("failed to power off hif: %d\n", ret);
+
+ return 0;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int err;
+
+ err = __ath6kl_init_hw_stop(ar);
+ if (err)
+ return err;
+ ar->state = ATH6KL_STATE_OFF;
+ return 0;
+}
+
+void ath6kl_init_hw_restart(struct ath6kl *ar)
+{
+ clear_bit(WMI_READY, &ar->flag);
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (__ath6kl_init_hw_stop(ar)) {
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n");
+ return;
+ }
+
+ if (__ath6kl_init_hw_start(ar)) {
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n");
+ return;
+ }
+}
+
+void ath6kl_stop_txrx(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif, *tmp_vif;
+ int i;
+
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
+ return;
+ }
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++)
+ aggr_reset_state(ar->sta_list[i].aggr_conn);
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
+ rtnl_lock();
+ ath6kl_cfg80211_vif_cleanup(vif);
+ rtnl_unlock();
+ spin_lock_bh(&ar->list_lock);
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ clear_bit(WMI_READY, &ar->flag);
+
+ if (ar->fw_recovery.enable)
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ ath6kl_wmi_shutdown(ar->wmi);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ ath6kl_htc_stop(ar->htc_target);
+ }
+
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_init_hw_reset(ar);
+
+ up(&ar->sem);
+}
+EXPORT_SYMBOL(ath6kl_stop_txrx);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
new file mode 100644
index 000000000..1af3fed5a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -0,0 +1,1313 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "core.h"
+#include "hif-ops.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
+{
+ struct ath6kl *ar = vif->ar;
+ struct ath6kl_sta *conn = NULL;
+ u8 i, max_conn;
+
+ if (is_zero_ether_addr(node_addr))
+ return NULL;
+
+ max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+
+ for (i = 0; i < max_conn; i++) {
+ if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
+ conn = &ar->sta_list[i];
+ break;
+ }
+ }
+
+ return conn;
+}
+
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
+{
+ struct ath6kl_sta *conn = NULL;
+ u8 ctr;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].aid == aid) {
+ conn = &ar->sta_list[ctr];
+ break;
+ }
+ }
+ return conn;
+}
+
+static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid,
+ u8 *wpaie, size_t ielen, u8 keymgmt,
+ u8 ucipher, u8 auth, u8 apsd_info)
+{
+ struct ath6kl *ar = vif->ar;
+ struct ath6kl_sta *sta;
+ u8 free_slot;
+
+ free_slot = aid - 1;
+
+ sta = &ar->sta_list[free_slot];
+ memcpy(sta->mac, mac, ETH_ALEN);
+ if (ielen <= ATH6KL_MAX_IE)
+ memcpy(sta->wpa_ie, wpaie, ielen);
+ sta->aid = aid;
+ sta->keymgmt = keymgmt;
+ sta->ucipher = ucipher;
+ sta->auth = auth;
+ sta->apsd_info = apsd_info;
+
+ ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
+ ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
+ aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn);
+}
+
+static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
+{
+ struct ath6kl_sta *sta = &ar->sta_list[i];
+ struct ath6kl_mgmt_buff *entry, *tmp;
+
+ /* empty the queued pkts in the PS queue if any */
+ spin_lock_bh(&sta->psq_lock);
+ skb_queue_purge(&sta->psq);
+ skb_queue_purge(&sta->apsdq);
+
+ if (sta->mgmt_psq_len != 0) {
+ list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) {
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&sta->mgmt_psq);
+ sta->mgmt_psq_len = 0;
+ }
+
+ spin_unlock_bh(&sta->psq_lock);
+
+ memset(&ar->ap_stats.sta[sta->aid - 1], 0,
+ sizeof(struct wmi_per_sta_stat));
+ eth_zero_addr(sta->mac);
+ memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
+ sta->aid = 0;
+ sta->sta_flags = 0;
+
+ ar->sta_list_index = ar->sta_list_index & ~(1 << i);
+ aggr_reset_state(sta->aggr_conn);
+}
+
+static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
+{
+ u8 i, removed = 0;
+
+ if (is_zero_ether_addr(mac))
+ return removed;
+
+ if (is_broadcast_ether_addr(mac)) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ }
+ }
+ } else {
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "deleting station %pM aid=%d reason=%d\n",
+ mac, ar->sta_list[i].aid, reason);
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ break;
+ }
+ }
+ }
+
+ return removed;
+}
+
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
+{
+ struct ath6kl *ar = devt;
+ return ar->ac2ep_map[ac];
+}
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
+{
+ struct ath6kl_cookie *cookie;
+
+ cookie = ar->cookie_list;
+ if (cookie != NULL) {
+ ar->cookie_list = cookie->arc_list_next;
+ ar->cookie_count--;
+ }
+
+ return cookie;
+}
+
+void ath6kl_cookie_init(struct ath6kl *ar)
+{
+ u32 i;
+
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+
+ memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
+
+ for (i = 0; i < MAX_COOKIE_NUM; i++)
+ ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
+}
+
+void ath6kl_cookie_cleanup(struct ath6kl *ar)
+{
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+}
+
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
+{
+ /* Insert first */
+
+ if (!ar || !cookie)
+ return;
+
+ cookie->arc_list_next = ar->cookie_list;
+ ar->cookie_list = cookie;
+ ar->cookie_count++;
+}
+
+/*
+ * Read from the hardware through its diagnostic window. No cooperation
+ * from the firmware is required for this.
+ */
+int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
+{
+ int ret;
+
+ ret = ath6kl_hif_diag_read32(ar, address, value);
+ if (ret) {
+ ath6kl_warn("failed to read32 through diagnose window: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
+{
+ int ret;
+
+ ret = ath6kl_hif_diag_write32(ar, address, value);
+
+ if (ret) {
+ ath6kl_err("failed to write 0x%x during diagnose window to 0x%x\n",
+ address, value);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
+{
+ u32 count, *buf = data;
+ int ret;
+
+ if (WARN_ON(length % 4))
+ return -EINVAL;
+
+ for (count = 0; count < length / 4; count++, address += 4) {
+ ret = ath6kl_diag_read32(ar, address, &buf[count]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length)
+{
+ u32 count;
+ __le32 *buf = data;
+ int ret;
+
+ if (WARN_ON(length % 4))
+ return -EINVAL;
+
+ for (count = 0; count < length / 4; count++, address += 4) {
+ ret = ath6kl_diag_write32(ar, address, buf[count]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_read_fwlogs(struct ath6kl *ar)
+{
+ struct ath6kl_dbglog_hdr debug_hdr;
+ struct ath6kl_dbglog_buf debug_buf;
+ u32 address, length, dropped, firstbuf, debug_hdr_addr;
+ int ret, loop;
+ u8 *buf;
+
+ buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ address = TARG_VTOP(ar->target_type,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbglog_hdr)));
+
+ ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr);
+ if (ret)
+ goto out;
+
+ /* Get the contents of the ring buffer */
+ if (debug_hdr_addr == 0) {
+ ath6kl_warn("Invalid address for debug_hdr_addr\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ address = TARG_VTOP(ar->target_type, debug_hdr_addr);
+ ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+ if (ret)
+ goto out;
+
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_hdr.dbuf_addr));
+ firstbuf = address;
+ dropped = le32_to_cpu(debug_hdr.dropped);
+ ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+ if (ret)
+ goto out;
+
+ loop = 100;
+
+ do {
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_buf.buffer_addr));
+ length = le32_to_cpu(debug_buf.length);
+
+ if (length != 0 && (le32_to_cpu(debug_buf.length) <=
+ le32_to_cpu(debug_buf.bufsize))) {
+ length = ALIGN(length, 4);
+
+ ret = ath6kl_diag_read(ar, address,
+ buf, length);
+ if (ret)
+ goto out;
+
+ ath6kl_debug_fwlog_event(ar, buf, length);
+ }
+
+ address = TARG_VTOP(ar->target_type,
+ le32_to_cpu(debug_buf.next));
+ ret = ath6kl_diag_read(ar, address, &debug_buf,
+ sizeof(debug_buf));
+ if (ret)
+ goto out;
+
+ loop--;
+
+ if (WARN_ON(loop == 0)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } while (address != firstbuf);
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
+{
+ u8 index;
+ u8 keyusage;
+
+ for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) {
+ if (vif->wep_key_list[index].key_len) {
+ keyusage = GROUP_USAGE;
+ if (index == vif->def_txkey_index)
+ keyusage |= TX_USAGE;
+
+ ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ index,
+ WEP_CRYPT,
+ keyusage,
+ vif->wep_key_list[index].key_len,
+ NULL, 0,
+ vif->wep_key_list[index].key,
+ KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+}
+
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
+{
+ struct ath6kl *ar = vif->ar;
+ struct ath6kl_req_key *ik;
+ int res;
+ u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
+
+ ik = &ar->ap_mode_bkey;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
+
+ switch (vif->auth_mode) {
+ case NONE_AUTH:
+ if (vif->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(vif);
+ if (!ik->valid || ik->key_type != WAPI_CRYPT)
+ break;
+ /* for WAPI, we need to set the delayed group key, continue: */
+ case WPA_PSK_AUTH:
+ case WPA2_PSK_AUTH:
+ case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
+ if (!ik->valid)
+ break;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey for the initial group key for AP mode\n");
+ memset(key_rsc, 0, sizeof(key_rsc));
+ res = ath6kl_wmi_addkey_cmd(
+ ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+ GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+ ik->key,
+ KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
+ if (res) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey failed: %d\n", res);
+ }
+ break;
+ }
+
+ if (ar->last_ch != channel)
+ /* we actually don't know the phymode, default to HT20 */
+ ath6kl_cfg80211_ch_switch_notify(vif, channel, WMI_11G_HT20);
+
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+ set_bit(CONNECTED, &vif->flags);
+ netif_carrier_on(vif->ndev);
+}
+
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
+ u8 keymgmt, u8 ucipher, u8 auth,
+ u8 assoc_req_len, u8 *assoc_info, u8 apsd_info)
+{
+ u8 *ies = NULL, *wpa_ie = NULL, *pos;
+ size_t ies_len = 0;
+ struct station_info sinfo;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+
+ if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *) assoc_info;
+ if (ieee80211_is_assoc_req(mgmt->frame_control) &&
+ assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) +
+ sizeof(mgmt->u.assoc_req)) {
+ ies = mgmt->u.assoc_req.variable;
+ ies_len = assoc_info + assoc_req_len - ies;
+ } else if (ieee80211_is_reassoc_req(mgmt->frame_control) &&
+ assoc_req_len >= sizeof(struct ieee80211_hdr_3addr)
+ + sizeof(mgmt->u.reassoc_req)) {
+ ies = mgmt->u.reassoc_req.variable;
+ ies_len = assoc_info + assoc_req_len - ies;
+ }
+ }
+
+ pos = ies;
+ while (pos && pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (pos[0] == WLAN_EID_RSN)
+ wpa_ie = pos; /* RSN IE */
+ else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) {
+ if (pos[5] == 0x01)
+ wpa_ie = pos; /* WPA IE */
+ else if (pos[5] == 0x04) {
+ wpa_ie = pos; /* WPS IE */
+ break; /* overrides WPA/RSN IE */
+ }
+ } else if (pos[0] == 0x44 && wpa_ie == NULL) {
+ /*
+ * Note: WAPI Parameter Set IE re-uses Element ID that
+ * was officially allocated for BSS AC Access Delay. As
+ * such, we need to be a bit more careful on when
+ * parsing the frame. However, BSS AC Access Delay
+ * element is not supposed to be included in
+ * (Re)Association Request frames, so this should not
+ * cause problems.
+ */
+ wpa_ie = pos; /* WAPI IE */
+ break;
+ }
+ pos += 2 + pos[1];
+ }
+
+ ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie,
+ wpa_ie ? 2 + wpa_ie[1] : 0,
+ keymgmt, ucipher, auth, apsd_info);
+
+ /* send event to application */
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ /* TODO: sinfo.generation */
+
+ sinfo.assoc_req_ies = ies;
+ sinfo.assoc_req_ies_len = ies_len;
+
+ cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
+
+ netif_wake_queue(vif->ndev);
+}
+
+void disconnect_timer_handler(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ ath6kl_init_profile_info(vif);
+ ath6kl_disconnect(vif);
+}
+
+void ath6kl_disconnect(struct ath6kl_vif *vif)
+{
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)) {
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
+ /*
+ * Disconnect command is issued, clear the connect pending
+ * flag. The connected flag will be cleared in
+ * disconnect event notification.
+ */
+ clear_bit(CONNECT_PEND, &vif->flags);
+ }
+}
+
+/* WMI Event handlers */
+
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap)
+{
+ struct ath6kl *ar = devt;
+
+ memcpy(ar->mac_addr, datap, ETH_ALEN);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n",
+ ar->mac_addr, sw_ver, abi_ver, cap);
+
+ ar->version.wlan_ver = sw_ver;
+ ar->version.abi_ver = abi_ver;
+ ar->hw.cap = cap;
+
+ if (strlen(ar->wiphy->fw_version) == 0) {
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ (ar->version.wlan_ver & 0xf0000000) >> 28,
+ (ar->version.wlan_ver & 0x0f000000) >> 24,
+ (ar->version.wlan_ver & 0x00ff0000) >> 16,
+ (ar->version.wlan_ver & 0x0000ffff));
+ }
+
+ /* indicate to the waiting thread that the ready event was received */
+ set_bit(WMI_READY, &ar->flag);
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
+{
+ struct ath6kl *ar = vif->ar;
+ bool aborted = false;
+
+ if (status != WMI_SCAN_STATUS_SUCCESS)
+ aborted = true;
+
+ ath6kl_cfg80211_scan_complete_event(vif, aborted);
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
+}
+
+static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
+{
+ struct ath6kl *ar = vif->ar;
+
+ vif->profile.ch = cpu_to_le16(channel);
+
+ switch (vif->nw_type) {
+ case AP_NETWORK:
+ /*
+ * reconfigure any saved RSN IE capabilites in the beacon /
+ * probe response to stay in sync with the supplicant.
+ */
+ if (vif->rsn_capab &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities))
+ ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
+ WLAN_EID_RSN, WMI_RSN_IE_CAPB,
+ (const u8 *) &vif->rsn_capab,
+ sizeof(vif->rsn_capab));
+
+ return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
+ &vif->profile);
+ default:
+ ath6kl_err("won't switch channels nw_type=%d\n", vif->nw_type);
+ return -ENOTSUPP;
+ }
+}
+
+static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
+{
+ struct ath6kl_vif *vif;
+ int res = 0;
+
+ if (!ar->want_ch_switch)
+ return;
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
+ res = ath6kl_commit_ch_switch(vif, channel);
+
+ /* if channel switch failed, oh well we tried */
+ ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+
+ if (res)
+ ath6kl_err("channel switch failed nw_type %d res %d\n",
+ vif->nw_type, res);
+ }
+ spin_unlock_bh(&ar->list_lock);
+}
+
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
+ u16 listen_int, u16 beacon_int,
+ enum network_type net_type, u8 beacon_ie_len,
+ u8 assoc_req_len, u8 assoc_resp_len,
+ u8 *assoc_info)
+{
+ struct ath6kl *ar = vif->ar;
+
+ ath6kl_cfg80211_connect_event(vif, channel, bssid,
+ listen_int, beacon_int,
+ net_type, beacon_ie_len,
+ assoc_req_len, assoc_resp_len,
+ assoc_info);
+
+ memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+ vif->bss_ch = channel;
+
+ if ((vif->nw_type == INFRA_NETWORK)) {
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->listen_intvl_t, 0);
+ ath6kl_check_ch_switch(ar, channel);
+ }
+
+ netif_wake_queue(vif->ndev);
+
+ /* Update connect & link status atomically */
+ spin_lock_bh(&vif->if_lock);
+ set_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+ netif_carrier_on(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
+
+ aggr_reset_state(vif->aggr_cntxt->aggr_conn);
+ vif->reconnect_flag = 0;
+
+ if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ memset(ar->node_map, 0, sizeof(ar->node_map));
+ ar->node_num = 0;
+ ar->next_ep_id = ENDPOINT_2;
+ }
+
+ if (!ar->usr_bss_filter) {
+ set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ CURRENT_BSS_FILTER, 0);
+ }
+}
+
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
+{
+ struct ath6kl_sta *sta;
+ struct ath6kl *ar = vif->ar;
+ u8 tsc[6];
+
+ /*
+ * For AP case, keyid will have aid of STA which sent pkt with
+ * MIC error. Use this aid to get MAC & send it to hostapd.
+ */
+ if (vif->nw_type == AP_NETWORK) {
+ sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
+ if (!sta)
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "ap tkip mic error received from aid=%d\n", keyid);
+
+ memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
+ cfg80211_michael_mic_failure(vif->ndev, sta->mac,
+ NL80211_KEYTYPE_PAIRWISE, keyid,
+ tsc, GFP_KERNEL);
+ } else {
+ ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
+ }
+}
+
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
+{
+ struct wmi_target_stats *tgt_stats =
+ (struct wmi_target_stats *) ptr;
+ struct ath6kl *ar = vif->ar;
+ struct target_stats *stats = &vif->target_stats;
+ struct tkip_ccmp_stats *ccmp_stats;
+ s32 rate;
+ u8 ac;
+
+ if (len < sizeof(*tgt_stats))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
+
+ stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
+ stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
+ stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
+ stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
+ stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
+ stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
+ stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
+ stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
+ stats->tx_rts_success_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
+
+ for (ac = 0; ac < WMM_NUM_AC; ac++)
+ stats->tx_pkt_per_ac[ac] +=
+ le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
+
+ stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
+ stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
+ stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
+ stats->tx_mult_retry_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
+ stats->tx_rts_fail_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
+
+ rate = a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate);
+ stats->tx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate);
+
+ stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
+ stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
+ stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
+ stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
+ stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
+ stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
+ stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
+ stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
+ stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
+ stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
+ stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
+ stats->rx_key_cache_miss +=
+ le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
+ stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
+ stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
+
+ rate = a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate);
+ stats->rx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate);
+
+ ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
+
+ stats->tkip_local_mic_fail +=
+ le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
+ stats->tkip_cnter_measures_invoked +=
+ le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
+ stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
+
+ stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
+ stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
+
+ stats->pwr_save_fail_cnt +=
+ le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
+ stats->noise_floor_calib =
+ a_sle32_to_cpu(tgt_stats->noise_floor_calib);
+
+ stats->cs_bmiss_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
+ stats->cs_low_rssi_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
+ stats->cs_connect_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
+ stats->cs_discon_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
+
+ stats->cs_ave_beacon_rssi =
+ a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
+
+ stats->cs_last_roam_msec =
+ tgt_stats->cserv_stats.cs_last_roam_msec;
+ stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
+ stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
+
+ stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
+
+ stats->wow_pkt_dropped +=
+ le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
+ stats->wow_host_pkt_wakeups +=
+ tgt_stats->wow_stats.wow_host_pkt_wakeups;
+ stats->wow_host_evt_wakeups +=
+ tgt_stats->wow_stats.wow_host_evt_wakeups;
+ stats->wow_evt_discarded +=
+ le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
+
+ stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received);
+ stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied);
+ stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched);
+
+ if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+ clear_bit(STATS_UPDATE_PEND, &vif->flags);
+ wake_up(&ar->event_wq);
+ }
+}
+
+static void ath6kl_add_le32(__le32 *var, __le32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
+}
+
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
+{
+ struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct ath6kl *ar = vif->ar;
+ struct wmi_ap_mode_stat *ap = &ar->ap_stats;
+ struct wmi_per_sta_stat *st_ap, *st_p;
+ u8 ac;
+
+ if (vif->nw_type == AP_NETWORK) {
+ if (len < sizeof(*p))
+ return;
+
+ for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
+ st_ap = &ap->sta[ac];
+ st_p = &p->sta[ac];
+
+ ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
+ ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
+ ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
+ ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
+ ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
+ ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
+ ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
+ ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
+ }
+
+ } else {
+ ath6kl_update_target_stats(vif, ptr, len);
+ }
+}
+
+void ath6kl_wakeup_event(void *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *) dev;
+
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
+{
+ struct ath6kl *ar = (struct ath6kl *) devt;
+
+ ar->tx_pwr = tx_pwr;
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
+{
+ struct ath6kl_sta *conn;
+ struct sk_buff *skb;
+ bool psq_empty = false;
+ struct ath6kl *ar = vif->ar;
+ struct ath6kl_mgmt_buff *mgmt_buf;
+
+ conn = ath6kl_find_sta_by_aid(ar, aid);
+
+ if (!conn)
+ return;
+ /*
+ * Send out a packet queued on ps queue. When the ps queue
+ * becomes empty update the PVB for this station.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ /* TODO: Send out a NULL data frame */
+ return;
+
+ spin_lock_bh(&conn->psq_lock);
+ if (conn->mgmt_psq_len > 0) {
+ mgmt_buf = list_first_entry(&conn->mgmt_psq,
+ struct ath6kl_mgmt_buff, list);
+ list_del(&mgmt_buf->list);
+ conn->mgmt_psq_len--;
+ spin_unlock_bh(&conn->psq_lock);
+
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx,
+ mgmt_buf->id, mgmt_buf->freq,
+ mgmt_buf->wait, mgmt_buf->buf,
+ mgmt_buf->len, mgmt_buf->no_cck);
+ conn->sta_flags &= ~STA_PS_POLLED;
+ kfree(mgmt_buf);
+ } else {
+ skb = skb_dequeue(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_data_tx(skb, vif->ndev);
+ conn->sta_flags &= ~STA_PS_POLLED;
+ }
+
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
+}
+
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
+{
+ bool mcastq_empty = false;
+ struct sk_buff *skb;
+ struct ath6kl *ar = vif->ar;
+
+ /*
+ * If there are no associated STAs, ignore the DTIM expiry event.
+ * There can be potential race conditions where the last associated
+ * STA may disconnect & before the host could clear the 'Indicate
+ * DTIM' request to the firmware, the firmware would have just
+ * indicated a DTIM expiry event. The race is between 'clear DTIM
+ * expiry cmd' going from the host to the firmware & the DTIM
+ * expiry event happening from the firmware to the host.
+ */
+ if (!ar->sta_list_index)
+ return;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ mcastq_empty = skb_queue_empty(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ if (mcastq_empty)
+ return;
+
+ /* set the STA flag to dtim_expired for the frame to go out */
+ set_bit(DTIM_EXPIRED, &vif->flags);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ ath6kl_data_tx(skb, vif->ndev);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ }
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ clear_bit(DTIM_EXPIRED, &vif->flags);
+
+ /* clear the LSB of the BitMapCtl field of the TIM IE */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
+}
+
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
+ u8 assoc_resp_len, u8 *assoc_info,
+ u16 prot_reason_status)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (vif->nw_type == AP_NETWORK) {
+ /* disconnect due to other STA vif switching channels */
+ if (reason == BSS_DISCONNECTED &&
+ prot_reason_status == WMI_AP_REASON_STA_ROAM) {
+ ar->want_ch_switch |= 1 << vif->fw_vif_idx;
+ /* bail back to this channel if STA vif fails connect */
+ ar->last_ch = le16_to_cpu(vif->profile.ch);
+ }
+
+ if (prot_reason_status == WMI_AP_REASON_MAX_STA) {
+ /* send max client reached notification to user space */
+ cfg80211_conn_failed(vif->ndev, bssid,
+ NL80211_CONN_FAIL_MAX_CLIENTS,
+ GFP_KERNEL);
+ }
+
+ if (prot_reason_status == WMI_AP_REASON_ACL) {
+ /* send blocked client notification to user space */
+ cfg80211_conn_failed(vif->ndev, bssid,
+ NL80211_CONN_FAIL_BLOCKED_CLIENT,
+ GFP_KERNEL);
+ }
+
+ if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
+ return;
+
+ /* if no more associated STAs, empty the mcast PS q */
+ if (ar->sta_list_index == 0) {
+ spin_lock_bh(&ar->mcastpsq_lock);
+ skb_queue_purge(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /* clear the LSB of the TIM IE's BitMapCtl field */
+ if (test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ MCAST_AID, 0);
+ }
+
+ if (!is_broadcast_ether_addr(bssid)) {
+ /* send event to application */
+ cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
+ }
+
+ if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ clear_bit(CONNECTED, &vif->flags);
+ }
+ return;
+ }
+
+ ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
+ assoc_resp_len, assoc_info,
+ prot_reason_status);
+
+ aggr_reset_state(vif->aggr_cntxt->aggr_conn);
+
+ del_timer(&vif->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
+
+ /*
+ * If the event is due to disconnect cmd from the host, only they
+ * the target would stop trying to connect. Under any other
+ * condition, target would keep trying to connect.
+ */
+ if (reason == DISCONNECT_CMD) {
+ if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
+ } else {
+ set_bit(CONNECT_PEND, &vif->flags);
+ if (((reason == ASSOC_FAILED) &&
+ (prot_reason_status == 0x11)) ||
+ ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) &&
+ (vif->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &vif->flags);
+ return;
+ }
+ }
+
+ /* restart disconnected concurrent vifs waiting for new channel */
+ ath6kl_check_ch_switch(ar, ar->last_ch);
+
+ /* update connect & link status atomically */
+ spin_lock_bh(&vif->if_lock);
+ clear_bit(CONNECTED, &vif->flags);
+ netif_carrier_off(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
+
+ if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+ vif->reconnect_flag = 0;
+
+ if (reason != CSERV_DISCONNECT)
+ ar->user_key_ctrl = 0;
+
+ netif_stop_queue(vif->ndev);
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
+
+ ath6kl_tx_data_cleanup(ar);
+}
+
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+
+ spin_lock_bh(&ar->list_lock);
+ if (list_empty(&ar->vif_list)) {
+ spin_unlock_bh(&ar->list_lock);
+ return NULL;
+ }
+
+ vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
+
+ spin_unlock_bh(&ar->list_lock);
+
+ return vif;
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ set_bit(WLAN_ENABLED, &vif->flags);
+
+ if (test_bit(CONNECTED, &vif->flags)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ } else {
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+static int ath6kl_close(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ ath6kl_cfg80211_stop(vif);
+
+ clear_bit(WLAN_ENABLED, &vif->flags);
+
+ return 0;
+}
+
+static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ return &vif->net_stats;
+}
+
+static int ath6kl_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl *ar = vif->ar;
+ int err = 0;
+
+ if ((features & NETIF_F_RXCSUM) &&
+ (ar->rx_meta_ver != WMI_META_VERSION_2)) {
+ ar->rx_meta_ver = WMI_META_VERSION_2;
+ err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ ar->rx_meta_ver, 0, 0);
+ if (err) {
+ dev->features = features & ~NETIF_F_RXCSUM;
+ return err;
+ }
+ } else if (!(features & NETIF_F_RXCSUM) &&
+ (ar->rx_meta_ver == WMI_META_VERSION_2)) {
+ ar->rx_meta_ver = 0;
+ err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ ar->rx_meta_ver, 0, 0);
+ if (err) {
+ dev->features = features | NETIF_F_RXCSUM;
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static void ath6kl_set_multicast_list(struct net_device *ndev)
+{
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+ bool mc_all_on = false;
+ int mc_count = netdev_mc_count(ndev);
+ struct netdev_hw_addr *ha;
+ bool found;
+ struct ath6kl_mc_filter *mc_filter, *tmp;
+ struct list_head mc_filter_new;
+ int ret;
+
+ if (!test_bit(WMI_READY, &vif->ar->flag) ||
+ !test_bit(WLAN_ENABLED, &vif->flags))
+ return;
+
+ /* Enable multicast-all filter. */
+ mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
+ !!(ndev->flags & IFF_ALLMULTI) ||
+ !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
+
+ if (mc_all_on)
+ set_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+ else
+ clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ vif->ar->fw_capabilities)) {
+ mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
+ }
+
+ if (!(ndev->flags & IFF_MULTICAST)) {
+ mc_all_on = false;
+ set_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ } else {
+ clear_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ }
+
+ /* Enable/disable "multicast-all" filter*/
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast-all filter\n",
+ mc_all_on ? "enabling" : "disabling");
+
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ mc_all_on);
+ if (ret) {
+ ath6kl_warn("Failed to %s multicast-all receive\n",
+ mc_all_on ? "enable" : "disable");
+ return;
+ }
+
+ if (test_bit(NETDEV_MCAST_ALL_ON, &vif->flags))
+ return;
+
+ /* Keep the driver and firmware mcast list in sync. */
+ list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
+ found = false;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (memcmp(ha->addr, mc_filter->hw_addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * Delete the filter which was previously set
+ * but not in the new request.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "Removing %pM from multicast filter\n",
+ mc_filter->hw_addr);
+ ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, mc_filter->hw_addr,
+ false);
+ if (ret) {
+ ath6kl_warn("Failed to remove multicast filter:%pM\n",
+ mc_filter->hw_addr);
+ return;
+ }
+
+ list_del(&mc_filter->list);
+ kfree(mc_filter);
+ }
+ }
+
+ INIT_LIST_HEAD(&mc_filter_new);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ found = false;
+ list_for_each_entry(mc_filter, &vif->mc_filter, list) {
+ if (memcmp(ha->addr, mc_filter->hw_addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter),
+ GFP_ATOMIC);
+ if (!mc_filter) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ memcpy(mc_filter->hw_addr, ha->addr,
+ ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
+ /* Set the multicast filter */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "Adding %pM to multicast filter list\n",
+ mc_filter->hw_addr);
+ ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, mc_filter->hw_addr,
+ true);
+ if (ret) {
+ ath6kl_warn("Failed to add multicast filter :%pM\n",
+ mc_filter->hw_addr);
+ kfree(mc_filter);
+ goto out;
+ }
+
+ list_add_tail(&mc_filter->list, &mc_filter_new);
+ }
+ }
+
+out:
+ list_splice_tail(&mc_filter_new, &vif->mc_filter);
+}
+
+static const struct net_device_ops ath6kl_netdev_ops = {
+ .ndo_open = ath6kl_open,
+ .ndo_stop = ath6kl_close,
+ .ndo_start_xmit = ath6kl_data_tx,
+ .ndo_get_stats = ath6kl_get_stats,
+ .ndo_set_features = ath6kl_set_features,
+ .ndo_set_rx_mode = ath6kl_set_multicast_list,
+};
+
+void init_netdev(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
+
+ dev->needed_headroom = ETH_HLEN;
+ dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) +
+ sizeof(struct wmi_data_hdr) +
+ HTC_HDR_LENGTH +
+ WMI_MAX_TX_META_SZ +
+ ATH6KL_HTC_ALIGN_BYTES, 4);
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
+ ar->fw_capabilities))
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ return;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
new file mode 100644
index 000000000..3a8d5e97d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+static void ath6kl_recovery_work(struct work_struct *work)
+{
+ struct ath6kl *ar = container_of(work, struct ath6kl,
+ fw_recovery.recovery_work);
+
+ ar->state = ATH6KL_STATE_RECOVERY;
+
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+
+ ath6kl_init_hw_restart(ar);
+
+ ar->state = ATH6KL_STATE_ON;
+ clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+ ar->fw_recovery.err_reason = 0;
+
+ if (ar->fw_recovery.hb_poll)
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Fw error detected, reason:%d\n",
+ reason);
+
+ set_bit(reason, &ar->fw_recovery.err_reason);
+
+ if (!test_bit(RECOVERY_CLEANUP, &ar->flag) &&
+ ar->state != ATH6KL_STATE_RECOVERY)
+ queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
+{
+ if (cookie == ar->fw_recovery.seq_num)
+ ar->fw_recovery.hb_pending = false;
+}
+
+static void ath6kl_recovery_hb_timer(unsigned long data)
+{
+ struct ath6kl *ar = (struct ath6kl *) data;
+ int err;
+
+ if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
+ (ar->state == ATH6KL_STATE_RECOVERY))
+ return;
+
+ if (ar->fw_recovery.hb_pending)
+ ar->fw_recovery.hb_misscnt++;
+ else
+ ar->fw_recovery.hb_misscnt = 0;
+
+ if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) {
+ ar->fw_recovery.hb_misscnt = 0;
+ ar->fw_recovery.seq_num = 0;
+ ar->fw_recovery.hb_pending = false;
+ ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE);
+ return;
+ }
+
+ ar->fw_recovery.seq_num++;
+ ar->fw_recovery.hb_pending = true;
+
+ err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi,
+ ar->fw_recovery.seq_num, 0);
+ if (err)
+ ath6kl_warn("Failed to send hb challenge request, err:%d\n",
+ err);
+
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_init(struct ath6kl *ar)
+{
+ struct ath6kl_fw_recovery *recovery = &ar->fw_recovery;
+
+ clear_bit(RECOVERY_CLEANUP, &ar->flag);
+ INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work);
+ recovery->seq_num = 0;
+ recovery->hb_misscnt = 0;
+ ar->fw_recovery.hb_pending = false;
+ ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
+ ar->fw_recovery.hb_timer.data = (unsigned long) ar;
+ init_timer_deferrable(&ar->fw_recovery.hb_timer);
+
+ if (ar->fw_recovery.hb_poll)
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_cleanup(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ set_bit(RECOVERY_CLEANUP, &ar->flag);
+
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+ cancel_work_sync(&ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_suspend(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ ath6kl_recovery_cleanup(ar);
+
+ if (!ar->fw_recovery.err_reason)
+ return;
+
+ /* Process pending fw error detection */
+ ar->fw_recovery.err_reason = 0;
+ WARN_ON(ar->state != ATH6KL_STATE_ON);
+ ar->state = ATH6KL_STATE_RECOVERY;
+ ath6kl_init_hw_restart(ar);
+ ar->state = ATH6KL_STATE_ON;
+}
+
+void ath6kl_recovery_resume(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ clear_bit(RECOVERY_CLEANUP, &ar->flag);
+
+ if (!ar->fw_recovery.hb_poll)
+ return;
+
+ ar->fw_recovery.hb_pending = false;
+ ar->fw_recovery.seq_num = 0;
+ ar->fw_recovery.hb_misscnt = 0;
+ mod_timer(&ar->fw_recovery.hb_timer,
+ jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
new file mode 100644
index 000000000..ed8cfeab6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -0,0 +1,1440 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "hif.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+#include "cfg80211.h"
+#include "trace.h"
+
+struct ath6kl_sdio {
+ struct sdio_func *func;
+
+ /* protects access to bus_req_freeq */
+ spinlock_t lock;
+
+ /* free list */
+ struct list_head bus_req_freeq;
+
+ /* available bus requests */
+ struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
+
+ struct ath6kl *ar;
+
+ u8 *dma_buffer;
+
+ /* protects access to dma_buffer */
+ struct mutex dma_buffer_mutex;
+
+ /* scatter request list head */
+ struct list_head scat_req;
+
+ atomic_t irq_handling;
+ wait_queue_head_t irq_wq;
+
+ /* protects access to scat_req */
+ spinlock_t scat_lock;
+
+ bool scatter_enabled;
+
+ bool is_disabled;
+ const struct sdio_device_id *id;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+
+ /* protects access to wr_asyncq */
+ spinlock_t wr_async_lock;
+};
+
+#define CMD53_ARG_READ 0
+#define CMD53_ARG_WRITE 1
+#define CMD53_ARG_BLOCK_BASIS 1
+#define CMD53_ARG_FIXED_ADDRESS 0
+#define CMD53_ARG_INCR_ADDRESS 1
+
+static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/*
+ * Macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the buffer is DMA'able and will
+ * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
+ * check fails on stack memory.
+ */
+static inline bool buf_needs_bounce(u8 *buf)
+{
+ return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
+}
+
+static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
+{
+ struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
+
+ /* EP1 has an extended range */
+ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
+ mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
+ mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
+ mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
+ mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
+}
+
+static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
+ u8 mode, u8 opcode, u32 addr,
+ u16 blksz)
+{
+ *arg = (((rw & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((mode & 1) << 27) |
+ ((opcode & 1) << 26) |
+ ((addr & 0x1FFFF) << 9) |
+ (blksz & 0x1FF));
+}
+
+static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ const u8 func = 0;
+
+ *arg = ((write & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((raw & 1) << 27) |
+ (1 << 26) |
+ ((address & 0x1FFFF) << 9) |
+ (1 << 8) |
+ (val & 0xFF);
+}
+
+static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
+ u8 *buf, u32 len)
+{
+ int ret = 0;
+
+ sdio_claim_host(func);
+
+ if (request & HIF_WRITE) {
+ /* FIXME: looks like ugly workaround for something */
+ if (addr >= HIF_MBOX_BASE_ADDR &&
+ addr <= HIF_MBOX_END_ADDR)
+ addr += (HIF_MBOX_WIDTH - len);
+
+ /* FIXME: this also looks like ugly workaround */
+ if (addr == HIF_MBOX0_EXT_BASE_ADDR)
+ addr += HIF_MBOX0_EXT_WIDTH - len;
+
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ } else {
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ }
+
+ sdio_release_host(func);
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
+ request & HIF_WRITE ? "wr" : "rd", addr,
+ request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
+
+ trace_ath6kl_sdio(addr, request, buf, len);
+
+ return ret;
+}
+
+static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
+{
+ struct bus_request *bus_req;
+
+ spin_lock_bh(&ar_sdio->lock);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ spin_unlock_bh(&ar_sdio->lock);
+ return NULL;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct bus_request, list);
+ list_del(&bus_req->list);
+
+ spin_unlock_bh(&ar_sdio->lock);
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ return bus_req;
+}
+
+static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *bus_req)
+{
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ spin_lock_bh(&ar_sdio->lock);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_bh(&ar_sdio->lock);
+}
+
+static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+ data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
+ (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
+ data->blksz, data->blocks, scat_req->len,
+ scat_req->scat_entries);
+
+ data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
+ MMC_DATA_READ;
+
+ /* fill SG entries */
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
+ /* assemble SG list */
+ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+ i, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+
+ sg_set_buf(sg, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+ }
+
+ /* set scatter-gather table for request */
+ data->sg = scat_req->sgentries;
+ data->sg_len = scat_req->scat_entries;
+}
+
+static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ struct mmc_request mmc_req;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct hif_scatter_req *scat_req;
+ u8 opcode, rw;
+ int status, len;
+
+ scat_req = req->scat_req;
+
+ if (scat_req->virt_scat) {
+ len = scat_req->len;
+ if (scat_req->req & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
+ scat_req->addr, scat_req->virt_dma_buf,
+ len);
+ goto scat_complete;
+ }
+
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ ath6kl_sdio_setup_scat_data(scat_req, &data);
+
+ opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
+ CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
+
+ rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
+
+ /* Fixup the address so that the last byte will fall on MBOX EOM */
+ if (scat_req->req & HIF_WRITE) {
+ if (scat_req->addr == HIF_MBOX_BASE_ADDR)
+ scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
+ else
+ /* Uses extended address range */
+ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
+ }
+
+ /* set command argument */
+ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
+ CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &cmd;
+ mmc_req.data = &data;
+
+ sdio_claim_host(ar_sdio->func);
+
+ mmc_set_data_timeout(&data, ar_sdio->func->card);
+
+ trace_ath6kl_sdio_scat(scat_req->addr,
+ scat_req->req,
+ scat_req->len,
+ scat_req->scat_entries,
+ scat_req->scat_list);
+
+ /* synchronous call to process request */
+ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+
+ sdio_release_host(ar_sdio->func);
+
+ status = cmd.error ? cmd.error : data.error;
+
+scat_complete:
+ scat_req->status = status;
+
+ if (scat_req->status)
+ ath6kl_err("Scatter write request failed:%d\n",
+ scat_req->status);
+
+ if (scat_req->req & HIF_ASYNCHRONOUS)
+ scat_req->complete(ar_sdio->ar->htc_target, scat_req);
+
+ return status;
+}
+
+static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
+ int n_scat_entry, int n_scat_req,
+ bool virt_scat)
+{
+ struct hif_scatter_req *s_req;
+ struct bus_request *bus_req;
+ int i, scat_req_sz, scat_list_sz, size;
+ u8 *virt_buf;
+
+ scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*s_req) + scat_list_sz;
+
+ if (!virt_scat)
+ size = sizeof(struct scatterlist) * n_scat_entry;
+ else
+ size = 2 * L1_CACHE_BYTES +
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ for (i = 0; i < n_scat_req; i++) {
+ /* allocate the scatter request */
+ s_req = kzalloc(scat_req_sz, GFP_KERNEL);
+ if (!s_req)
+ return -ENOMEM;
+
+ if (virt_scat) {
+ virt_buf = kzalloc(size, GFP_KERNEL);
+ if (!virt_buf) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ s_req->virt_dma_buf =
+ (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
+ } else {
+ /* allocate sglist */
+ s_req->sgentries = kzalloc(size, GFP_KERNEL);
+
+ if (!s_req->sgentries) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+ }
+
+ /* allocate a bus request for this scatter request */
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+ if (!bus_req) {
+ kfree(s_req->sgentries);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ /* assign the scatter request to this bus request */
+ bus_req->scat_req = s_req;
+ s_req->busrequest = bus_req;
+
+ s_req->virt_scat = virt_scat;
+
+ /* add it to the scatter pool */
+ hif_scatter_req_add(ar_sdio->ar, s_req);
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u8 *tbuf = NULL;
+ int ret;
+ bool bounced = false;
+
+ if (request & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ if (buf_needs_bounce(buf)) {
+ if (!ar_sdio->dma_buffer)
+ return -ENOMEM;
+ mutex_lock(&ar_sdio->dma_buffer_mutex);
+ tbuf = ar_sdio->dma_buffer;
+
+ if (request & HIF_WRITE)
+ memcpy(tbuf, buf, len);
+
+ bounced = true;
+ } else {
+ tbuf = buf;
+ }
+
+ ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
+ if ((request & HIF_READ) && bounced)
+ memcpy(buf, tbuf, len);
+
+ if (bounced)
+ mutex_unlock(&ar_sdio->dma_buffer_mutex);
+
+ return ret;
+}
+
+static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ if (req->scat_req) {
+ ath6kl_sdio_scat_rw(ar_sdio, req);
+ } else {
+ void *context;
+ int status;
+
+ status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
+ req->buffer, req->length,
+ req->request);
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kl_hif_rw_comp_handler(context, status);
+ }
+}
+
+static void ath6kl_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath6kl_sdio *ar_sdio;
+ struct bus_request *req, *tmp_req;
+
+ ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+ __ath6kl_sdio_write_async(ar_sdio, req);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ }
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+}
+
+static void ath6kl_sdio_irq_handler(struct sdio_func *func)
+{
+ int status;
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
+
+ ar_sdio = sdio_get_drvdata(func);
+ atomic_set(&ar_sdio->irq_handling, 1);
+ /*
+ * Release the host during interrups so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
+ sdio_claim_host(ar_sdio->func);
+
+ atomic_set(&ar_sdio->irq_handling, 0);
+ wake_up(&ar_sdio->irq_wq);
+
+ WARN_ON(status && status != -ECANCELED);
+}
+
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret = 0;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath6kl_err("Unable to enable sdio func: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /*
+ * Wait for hardware to initialise. It should take a lot less than
+ * 10 ms but let's be conservative here.
+ */
+ msleep(10);
+
+ ar_sdio->is_disabled = false;
+
+ return ret;
+}
+
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return 0;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ return ret;
+
+ ar_sdio->is_disabled = true;
+
+ return ret;
+}
+
+static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *bus_req;
+
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+
+ if (WARN_ON_ONCE(!bus_req))
+ return -ENOMEM;
+
+ bus_req->address = address;
+ bus_req->buffer = buffer;
+ bus_req->length = length;
+ bus_req->request = request;
+ bus_req->packet = packet;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
+ if (ret)
+ ath6kl_err("Failed to claim sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+
+ return !atomic_read(&ar_sdio->irq_handling);
+}
+
+static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ if (atomic_read(&ar_sdio->irq_handling)) {
+ sdio_release_host(ar_sdio->func);
+
+ ret = wait_event_interruptible(ar_sdio->irq_wq,
+ ath6kl_sdio_is_on_irq(ar));
+ if (ret)
+ return;
+
+ sdio_claim_host(ar_sdio->func);
+ }
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath6kl_err("Failed to release sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *node = NULL;
+
+ spin_lock_bh(&ar_sdio->scat_lock);
+
+ if (!list_empty(&ar_sdio->scat_req)) {
+ node = list_first_entry(&ar_sdio->scat_req,
+ struct hif_scatter_req, list);
+ list_del(&node->list);
+
+ node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
+ }
+
+ spin_unlock_bh(&ar_sdio->scat_lock);
+
+ return node;
+}
+
+static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+
+ spin_lock_bh(&ar_sdio->scat_lock);
+
+ list_add_tail(&s_req->list, &ar_sdio->scat_req);
+
+ spin_unlock_bh(&ar_sdio->scat_lock);
+}
+
+/* scatter gather read write request */
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u32 request = scat_req->req;
+ int status = 0;
+
+ if (!scat_req->len)
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (request & HIF_SYNCHRONOUS) {
+ status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
+ } else {
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+ }
+
+ return status;
+}
+
+/* clean up scatter support */
+static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *s_req, *tmp_req;
+
+ /* empty the free list */
+ spin_lock_bh(&ar_sdio->scat_lock);
+ list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
+ list_del(&s_req->list);
+ spin_unlock_bh(&ar_sdio->scat_lock);
+
+ /*
+ * FIXME: should we also call completion handler with
+ * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+ * that the packet is properly freed?
+ */
+ if (s_req->busrequest)
+ ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req->sgentries);
+ kfree(s_req);
+
+ spin_lock_bh(&ar_sdio->scat_lock);
+ }
+ spin_unlock_bh(&ar_sdio->scat_lock);
+}
+
+/* setup of HIF scatter resources */
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct htc_target *target = ar->htc_target;
+ int ret = 0;
+ bool virt_scat = false;
+
+ if (ar_sdio->scatter_enabled)
+ return 0;
+
+ ar_sdio->scatter_enabled = true;
+
+ /* check if host supports scatter and it meets our requirements */
+ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
+ ar_sdio->func->card->host->max_segs,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+ virt_scat = true;
+ }
+
+ if (!virt_scat) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ MAX_SCATTER_ENTRIES_PER_REQ,
+ MAX_SCATTER_REQUESTS, virt_scat);
+
+ if (!ret) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "hif-scatter enabled requests %d entries %d\n",
+ MAX_SCATTER_REQUESTS,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ MAX_SCATTER_REQ_TRANSFER_SIZE;
+ } else {
+ ath6kl_sdio_cleanup_scatter(ar);
+ ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
+ }
+ }
+
+ if (virt_scat || ret) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ ATH6KL_SCATTER_ENTRIES_PER_REQ,
+ ATH6KL_SCATTER_REQS, virt_scat);
+
+ if (ret) {
+ ath6kl_err("failed to alloc virtual scatter resources !\n");
+ ath6kl_sdio_cleanup_scatter(ar);
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "virtual scatter enabled requests %d entries %d\n",
+ ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ int ret;
+
+ flags = sdio_get_host_pm_caps(func);
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+ if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
+ !(flags & MMC_PM_KEEP_POWER))
+ return -EINVAL;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret) {
+ ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
+ return ret;
+ }
+
+ /* sdio irq wakes up host */
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ bool try_deepsleep = false;
+ int ret;
+
+ if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
+ (!ar->suspend_mode && wow)) {
+ ret = ath6kl_set_sdio_pm_caps(ar);
+ if (ret)
+ goto cut_pwr;
+
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+ if (ret && ret != -ENOTCONN)
+ ath6kl_err("wow suspend failed: %d\n", ret);
+
+ if (ret &&
+ (!ar->wow_suspend_mode ||
+ ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
+ try_deepsleep = true;
+ else if (ret &&
+ ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
+ goto cut_pwr;
+ if (!ret)
+ return 0;
+ }
+
+ if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
+ !ar->suspend_mode || try_deepsleep) {
+ flags = sdio_get_host_pm_caps(func);
+ if (!(flags & MMC_PM_KEEP_POWER))
+ goto cut_pwr;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ goto cut_pwr;
+
+ /*
+ * Workaround to support Deep Sleep with MSM, set the host pm
+ * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
+ * the sdc2_clock and internally allows MSM to enter
+ * TCXO shutdown properly.
+ */
+ if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
+ ret = sdio_set_host_pm_flags(func,
+ MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ goto cut_pwr;
+ }
+
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ NULL);
+ if (ret)
+ goto cut_pwr;
+
+ return 0;
+ }
+
+cut_pwr:
+ if (func->card && func->card->host)
+ func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
+
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+ switch (ar->state) {
+ case ATH6KL_STATE_OFF:
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath6kl_sdio_config(ar);
+ break;
+
+ case ATH6KL_STATE_ON:
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ break;
+
+ case ATH6KL_STATE_WOW:
+ break;
+
+ case ATH6KL_STATE_SUSPENDING:
+ break;
+
+ case ATH6KL_STATE_RESUMING:
+ break;
+
+ case ATH6KL_STATE_RECOVERY:
+ break;
+ }
+
+ ath6kl_cfg80211_resume(ar);
+
+ return 0;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ u8 addr_val[4];
+ s32 i;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times.
+ */
+ memset(addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ int status;
+
+ /* set window register to start read cycle */
+ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+ address);
+
+ if (status)
+ return status;
+
+ /* read the data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to read from window data addr\n",
+ __func__);
+ return status;
+ }
+
+ return status;
+}
+
+static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 data)
+{
+ int status;
+ u32 val = (__force u32) data;
+
+ /* set write data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window data addr\n",
+ __func__, data);
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ address);
+}
+
+static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath6kl_sdio_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath6kl_sdio_read_write_sync(ar,
+ RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to send the bmi data to the device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *req, *tmp_req;
+ void *context;
+
+ /* FIXME: make sure that wq is not queued again */
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+
+ if (req->scat_req) {
+ /* this is a scatter gather request */
+ req->scat_req->status = -ECANCELED;
+ req->scat_req->complete(ar_sdio->ar->htc_target,
+ req->scat_req);
+ } else {
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+ }
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
+static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
+ .read_write_sync = ath6kl_sdio_read_write_sync,
+ .write_async = ath6kl_sdio_write_async,
+ .irq_enable = ath6kl_sdio_irq_enable,
+ .irq_disable = ath6kl_sdio_irq_disable,
+ .scatter_req_get = ath6kl_sdio_scatter_req_get,
+ .scatter_req_add = ath6kl_sdio_scatter_req_add,
+ .enable_scatter = ath6kl_sdio_enable_scatter,
+ .scat_req_rw = ath6kl_sdio_async_rw_scatter,
+ .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
+ .suspend = ath6kl_sdio_suspend,
+ .resume = ath6kl_sdio_resume,
+ .diag_read32 = ath6kl_sdio_diag_read32,
+ .diag_write32 = ath6kl_sdio_diag_write32,
+ .bmi_read = ath6kl_sdio_bmi_read,
+ .bmi_write = ath6kl_sdio_bmi_write,
+ .power_on = ath6kl_sdio_power_on,
+ .power_off = ath6kl_sdio_power_off,
+ .stop = ath6kl_sdio_stop,
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+ return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+ ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static int ath6kl_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+ struct ath6kl_sdio *ar_sdio;
+ struct ath6kl *ar;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
+ if (!ar_sdio)
+ return -ENOMEM;
+
+ ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!ar_sdio->dma_buffer) {
+ ret = -ENOMEM;
+ goto err_hif;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->id = id;
+ ar_sdio->is_disabled = true;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->scat_lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->dma_buffer_mutex);
+
+ INIT_LIST_HEAD(&ar_sdio->scat_req);
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+
+ init_waitqueue_head(&ar_sdio->irq_wq);
+
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
+ ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
+
+ ar = ath6kl_core_create(&ar_sdio->func->dev);
+ if (!ar) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ ar_sdio->ar = ar;
+ ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
+ ar->hif_priv = ar_sdio;
+ ar->hif_ops = &ath6kl_sdio_ops;
+ ar->bmi.max_data_size = 256;
+
+ ath6kl_sdio_set_mbox_info(ar);
+
+ ret = ath6kl_sdio_config(ar);
+ if (ret) {
+ ath6kl_err("Failed to config sdio: %d\n", ret);
+ goto err_core_alloc;
+ }
+
+ ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core\n");
+ goto err_core_alloc;
+ }
+
+ return ret;
+
+err_core_alloc:
+ ath6kl_core_destroy(ar_sdio->ar);
+err_dma:
+ kfree(ar_sdio->dma_buffer);
+err_hif:
+ kfree(ar_sdio);
+
+ return ret;
+}
+
+static void ath6kl_sdio_remove(struct sdio_func *func)
+{
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ ar_sdio = sdio_get_drvdata(func);
+
+ ath6kl_stop_txrx(ar_sdio->ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ ath6kl_core_cleanup(ar_sdio->ar);
+ ath6kl_core_destroy(ar_sdio->ar);
+
+ kfree(ar_sdio->dma_buffer);
+ kfree(ar_sdio);
+}
+
+static const struct sdio_device_id ath6kl_sdio_devices[] = {
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
+
+static struct sdio_driver ath6kl_sdio_driver = {
+ .name = "ath6kl_sdio",
+ .id_table = ath6kl_sdio_devices,
+ .probe = ath6kl_sdio_probe,
+ .remove = ath6kl_sdio_remove,
+ .drv.pm = ATH6KL_SDIO_PM_OPS,
+};
+
+static int __init ath6kl_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath6kl_sdio_driver);
+ if (ret)
+ ath6kl_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath6kl_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath6kl_sdio_driver);
+}
+
+module_init(ath6kl_sdio_init);
+module_exit(ath6kl_sdio_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
new file mode 100644
index 000000000..d5eeeae77
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef TARGET_H
+#define TARGET_H
+
+#define AR6003_BOARD_DATA_SZ 1024
+#define AR6003_BOARD_EXT_DATA_SZ 768
+#define AR6003_BOARD_EXT_DATA_SZ_V2 1024
+
+#define AR6004_BOARD_DATA_SZ 6144
+#define AR6004_BOARD_EXT_DATA_SZ 0
+
+#define RESET_CONTROL_ADDRESS 0x00004000
+#define RESET_CONTROL_COLD_RST 0x00000100
+#define RESET_CONTROL_MBOX_RST 0x00000004
+
+#define CPU_CLOCK_STANDARD_S 0
+#define CPU_CLOCK_STANDARD 0x00000003
+#define CPU_CLOCK_ADDRESS 0x00000020
+
+#define CLOCK_CONTROL_ADDRESS 0x00000028
+#define CLOCK_CONTROL_LF_CLK32_S 2
+#define CLOCK_CONTROL_LF_CLK32 0x00000004
+
+#define SYSTEM_SLEEP_ADDRESS 0x000000c4
+#define SYSTEM_SLEEP_DISABLE_S 0
+#define SYSTEM_SLEEP_DISABLE 0x00000001
+
+#define LPO_CAL_ADDRESS 0x000000e0
+#define LPO_CAL_ENABLE_S 20
+#define LPO_CAL_ENABLE 0x00100000
+
+#define GPIO_PIN9_ADDRESS 0x0000004c
+#define GPIO_PIN10_ADDRESS 0x00000050
+#define GPIO_PIN11_ADDRESS 0x00000054
+#define GPIO_PIN12_ADDRESS 0x00000058
+#define GPIO_PIN13_ADDRESS 0x0000005c
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_ERROR_S 7
+#define HOST_INT_STATUS_ERROR 0x00000080
+
+#define HOST_INT_STATUS_CPU_S 6
+#define HOST_INT_STATUS_CPU 0x00000040
+
+#define HOST_INT_STATUS_COUNTER_S 4
+#define HOST_INT_STATUS_COUNTER 0x00000010
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_WAKEUP_S 2
+#define ERROR_INT_STATUS_WAKEUP 0x00000004
+
+#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002
+
+#define ERROR_INT_STATUS_TX_OVERFLOW_S 0
+#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_S 0
+#define COUNTER_INT_STATUS_COUNTER 0x000000ff
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_ERROR_S 7
+#define INT_STATUS_ENABLE_ERROR 0x00000080
+
+#define INT_STATUS_ENABLE_CPU_S 6
+#define INT_STATUS_ENABLE_CPU 0x00000040
+
+#define INT_STATUS_ENABLE_INT_S 5
+#define INT_STATUS_ENABLE_INT 0x00000020
+#define INT_STATUS_ENABLE_COUNTER_S 4
+#define INT_STATUS_ENABLE_COUNTER 0x00000010
+
+#define INT_STATUS_ENABLE_MBOX_DATA_S 0
+#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_S 0
+#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002
+
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_S 0
+#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define COUNT_ADDRESS 0x00000420
+
+#define COUNT_DEC_ADDRESS 0x00000440
+
+#define WINDOW_DATA_ADDRESS 0x00000474
+#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
+#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
+#define CPU_DBG_SEL_ADDRESS 0x00000483
+#define CPU_DBG_ADDRESS 0x00000484
+
+#define LOCAL_SCRATCH_ADDRESS 0x000000c0
+#define ATH6KL_OPTION_SLEEP_DISABLE 0x08
+
+#define RTC_BASE_ADDRESS 0x00004000
+#define GPIO_BASE_ADDRESS 0x00014000
+#define MBOX_BASE_ADDRESS 0x00018000
+#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
+
+/* real name of the register is unknown */
+#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284)
+
+#define SM(f, v) (((v) << f##_S) & f)
+#define MS(f, v) (((v) & f) >> f##_S)
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end).
+ */
+#define ATH6KL_AR6003_HI_START_ADDR 0x00540600
+#define ATH6KL_AR6004_HI_START_ADDR 0x00400800
+
+/*
+ * These are items that the Host may need to access
+ * via BMI or via the Diagnostic Window. The position
+ * of items in this structure must remain constant.
+ * across firmware revisions!
+ *
+ * Types for each item must be fixed size across target and host platforms.
+ * The structure is used only to calculate offset for each register with
+ * HI_ITEM() macro, no values are stored to it.
+ *
+ * More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused1; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to ATH6KL_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /*
+ * Flash configuration overrides, used only
+ * when firmware is not executing from flash.
+ * (When using flash, modify the global variables
+ * with equivalent names.)
+ */
+ u32 hi_bank0_addr_value; /* 0x44 */
+ u32 hi_bank0_read_value; /* 0x48 */
+ u32 hi_bank0_write_value; /* 0x4c */
+ u32 hi_bank0_config_value; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_tbl; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+ /*
+ * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts
+ * pin
+ */
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+ /*
+ * NOTE: byte [0] = RESET pin (bit 7 is polarity),
+ * bytes[1]..bytes[3] are for future use
+ */
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+
+ /* Pointer to extended board data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /*
+ * 0xbc - [31:0]: idle timeout in ms
+ */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic -
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+#define HI_OPTION_FW_MODE_IBSS 0x0
+#define HI_OPTION_FW_MODE_BSS_STA 0x1
+#define HI_OPTION_FW_MODE_AP 0x2
+
+#define HI_OPTION_FW_SUBMODE_NONE 0x0
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3
+
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/* Fw Mode/SubMode Mask
+|------------------------------------------------------------------------------|
+| SUB | SUB | SUB | SUB | | | |
+| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
+| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+|------------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS 0x2
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+
+/* Convert a Target virtual address into a Target physical address */
+#define AR6003_VTOP(vaddr) ((vaddr) & 0x001fffff)
+#define AR6004_VTOP(vaddr) (vaddr)
+
+#define TARG_VTOP(target_type, vaddr) \
+ (((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
+ (((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
+
+#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500
+
+struct ath6kl_dbglog_buf {
+ __le32 next;
+ __le32 buffer_addr;
+ __le32 bufsize;
+ __le32 length;
+ __le32 count;
+ __le32 free;
+} __packed;
+
+struct ath6kl_dbglog_hdr {
+ __le32 dbuf_addr;
+ __le32 dropped;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
new file mode 100644
index 000000000..d67170ea1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "testmode.h"
+#include "debug.h"
+
+#include <net/netlink.h>
+
+enum ath6kl_tm_attr {
+ __ATH6KL_TM_ATTR_INVALID = 0,
+ ATH6KL_TM_ATTR_CMD = 1,
+ ATH6KL_TM_ATTR_DATA = 2,
+
+ /* keep last */
+ __ATH6KL_TM_ATTR_AFTER_LAST,
+ ATH6KL_TM_ATTR_MAX = __ATH6KL_TM_ATTR_AFTER_LAST - 1,
+};
+
+enum ath6kl_tm_cmd {
+ ATH6KL_TM_CMD_TCMD = 0,
+ ATH6KL_TM_CMD_RX_REPORT = 1, /* not used anymore */
+};
+
+#define ATH6KL_TM_DATA_MAX_LEN 5000
+
+static const struct nla_policy ath6kl_tm_policy[ATH6KL_TM_ATTR_MAX + 1] = {
+ [ATH6KL_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH6KL_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH6KL_TM_DATA_MAX_LEN },
+};
+
+void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
+{
+ struct sk_buff *skb;
+
+ if (!buf || buf_len == 0)
+ return;
+
+ skb = cfg80211_testmode_alloc_event_skb(ar->wiphy, buf_len, GFP_KERNEL);
+ if (!skb) {
+ ath6kl_warn("failed to allocate testmode rx skb!\n");
+ return;
+ }
+ if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
+ nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
+ goto nla_put_failure;
+ cfg80211_testmode_event(skb, GFP_KERNEL);
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+ ath6kl_warn("nla_put failed on testmode rx skb!\n");
+}
+
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
+ int err, buf_len;
+ void *buf;
+
+ err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len,
+ ath6kl_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[ATH6KL_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) {
+ case ATH6KL_TM_CMD_TCMD:
+ if (!tb[ATH6KL_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]);
+
+ ath6kl_wmi_test_cmd(ar->wmi, buf, buf_len);
+
+ return 0;
+
+ break;
+ case ATH6KL_TM_CMD_RX_REPORT:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
new file mode 100644
index 000000000..9fbcdec3e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
+
+#else
+
+static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
+ size_t buf_len)
+{
+}
+
+static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/trace.c b/drivers/net/wireless/ath/ath6kl/trace.c
new file mode 100644
index 000000000..e7d64b128
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio);
+EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat);
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
new file mode 100644
index 000000000..1a1ea7881
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -0,0 +1,332 @@
+#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <net/cfg80211.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+#include "wmi.h"
+#include "hif.h"
+
+#if !defined(_ATH6KL_TRACE_H)
+static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len)
+{
+ struct wmi_cmd_hdr *hdr = buf;
+
+ if (buf_len < sizeof(*hdr))
+ return 0;
+
+ return le16_to_cpu(hdr->cmd_id);
+}
+#endif /* __ATH6KL_TRACE_H */
+
+#define _ATH6KL_TRACE_H
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH6KL_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath6kl
+
+TRACE_EVENT(ath6kl_wmi_cmd,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = ath6kl_get_wmi_id(buf, buf_len);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zd",
+ __entry->id, __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_wmi_event,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = ath6kl_get_wmi_id(buf, buf_len);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zd",
+ __entry->id, __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio,
+ TP_PROTO(unsigned int addr, int flags,
+ void *buf, size_t buf_len),
+
+ TP_ARGS(addr, flags, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, tx)
+ __field(unsigned int, addr)
+ __field(int, flags)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->flags = flags;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+
+ if (flags & HIF_WRITE)
+ __entry->tx = 1;
+ else
+ __entry->tx = 0;
+ ),
+
+ TP_printk(
+ "%s addr 0x%x flags 0x%x len %zd\n",
+ __entry->tx ? "tx" : "rx",
+ __entry->addr,
+ __entry->flags,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio_scat,
+ TP_PROTO(unsigned int addr, int flags, unsigned int total_len,
+ unsigned int entries, struct hif_scatter_item *list),
+
+ TP_ARGS(addr, flags, total_len, entries, list),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, tx)
+ __field(unsigned int, addr)
+ __field(int, flags)
+ __field(unsigned int, entries)
+ __field(size_t, total_len)
+ __dynamic_array(unsigned int, len_array, entries)
+ __dynamic_array(u8, data, total_len)
+ ),
+
+ TP_fast_assign(
+ unsigned int *len_array;
+ int i, offset = 0;
+ size_t len;
+
+ __entry->addr = addr;
+ __entry->flags = flags;
+ __entry->entries = entries;
+ __entry->total_len = total_len;
+
+ if (flags & HIF_WRITE)
+ __entry->tx = 1;
+ else
+ __entry->tx = 0;
+
+ len_array = __get_dynamic_array(len_array);
+
+ for (i = 0; i < entries; i++) {
+ len = list[i].len;
+
+ memcpy((u8 *) __get_dynamic_array(data) + offset,
+ list[i].buf, len);
+
+ len_array[i] = len;
+ offset += len;
+ }
+ ),
+
+ TP_printk(
+ "%s addr 0x%x flags 0x%x entries %d total_len %zd\n",
+ __entry->tx ? "tx" : "rx",
+ __entry->addr,
+ __entry->flags,
+ __entry->entries,
+ __entry->total_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio_irq,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "irq len %zd\n", __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_htc_rx,
+ TP_PROTO(int status, int endpoint, void *buf,
+ size_t buf_len),
+
+ TP_ARGS(status, endpoint, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, endpoint)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->endpoint = endpoint;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "status %d endpoint %d len %zd\n",
+ __entry->status,
+ __entry->endpoint,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_htc_tx,
+ TP_PROTO(int status, int endpoint, void *buf,
+ size_t buf_len),
+
+ TP_ARGS(status, endpoint, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, endpoint)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->endpoint = endpoint;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "status %d endpoint %d len %zd\n",
+ __entry->status,
+ __entry->endpoint,
+ __entry->buf_len
+ )
+);
+
+#define ATH6KL_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath6kl_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH6KL_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH6KL_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(ath6kl_log_dbg,
+ TP_PROTO(unsigned int level, struct va_format *vaf),
+ TP_ARGS(level, vaf),
+ TP_STRUCT__entry(
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH6KL_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH6KL_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(ath6kl_log_dbg_dump,
+ TP_PROTO(const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s/%s\n", __get_str(prefix), __get_str(msg)
+ )
+);
+
+#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
new file mode 100644
index 000000000..40432fe7a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -0,0 +1,1877 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "core.h"
+#include "debug.h"
+#include "htc-ops.h"
+#include "trace.h"
+
+/*
+ * tid - tid_mux0..tid_mux3
+ * aid - tid_mux4..tid_mux7
+ */
+#define ATH6KL_TID_MASK 0xf
+#define ATH6KL_AID_SHIFT 4
+
+static inline u8 ath6kl_get_tid(u8 tid_mux)
+{
+ return tid_mux & ATH6KL_TID_MASK;
+}
+
+static inline u8 ath6kl_get_aid(u8 tid_mux)
+{
+ return tid_mux >> ATH6KL_AID_SHIFT;
+}
+
+static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
+ u32 *map_no)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ethhdr *eth_hdr;
+ u32 i, ep_map = -1;
+ u8 *datap;
+
+ *map_no = 0;
+ datap = skb->data;
+ eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
+
+ if (is_multicast_ether_addr(eth_hdr->h_dest))
+ return ENDPOINT_2;
+
+ for (i = 0; i < ar->node_num; i++) {
+ if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
+ ETH_ALEN) == 0) {
+ *map_no = i + 1;
+ ar->node_map[i].tx_pend++;
+ return ar->node_map[i].ep_id;
+ }
+
+ if ((ep_map == -1) && !ar->node_map[i].tx_pend)
+ ep_map = i;
+ }
+
+ if (ep_map == -1) {
+ ep_map = ar->node_num;
+ ar->node_num++;
+ if (ar->node_num > MAX_NODE_NUM)
+ return ENDPOINT_UNUSED;
+ }
+
+ memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
+
+ for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
+ if (!ar->tx_pending[i]) {
+ ar->node_map[ep_map].ep_id = i;
+ break;
+ }
+
+ /*
+ * No free endpoint is available, start redistribution on
+ * the inuse endpoints.
+ */
+ if (i == ENDPOINT_5) {
+ ar->node_map[ep_map].ep_id = ar->next_ep_id;
+ ar->next_ep_id++;
+ if (ar->next_ep_id > ENDPOINT_5)
+ ar->next_ep_id = ENDPOINT_2;
+ }
+ }
+
+ *map_no = ep_map + 1;
+ ar->node_map[ep_map].tx_pend++;
+
+ return ar->node_map[ep_map].ep_id;
+}
+
+static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
+ struct ath6kl_vif *vif,
+ struct sk_buff *skb,
+ u32 *flags)
+{
+ struct ath6kl *ar = vif->ar;
+ bool is_apsdq_empty = false;
+ struct ethhdr *datap = (struct ethhdr *) skb->data;
+ u8 up = 0, traffic_class, *ip_hdr;
+ u16 ether_type;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+
+ if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
+ /*
+ * This tx is because of a uAPSD trigger, determine
+ * more and EOSP bit. Set EOSP if queue is empty
+ * or sufficient frames are delivered for this trigger.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->apsdq))
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
+ else if (conn->sta_flags & STA_PS_APSD_EOSP)
+ *flags |= WMI_DATA_HDR_FLAGS_EOSP;
+ *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
+ spin_unlock_bh(&conn->psq_lock);
+ return false;
+ } else if (!conn->apsd_info) {
+ return false;
+ }
+
+ if (test_bit(WMM_ENABLED, &vif->flags)) {
+ ether_type = be16_to_cpu(datap->h_proto);
+ if (is_ethertype(ether_type)) {
+ /* packet is in DIX format */
+ ip_hdr = (u8 *)(datap + 1);
+ } else {
+ /* packet is in 802.3 format */
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)
+ (datap + 1);
+ ether_type = be16_to_cpu(llc_hdr->eth_type);
+ ip_hdr = (u8 *)(llc_hdr + 1);
+ }
+
+ if (ether_type == IP_ETHERTYPE)
+ up = ath6kl_wmi_determine_user_priority(
+ ip_hdr, 0);
+ }
+
+ traffic_class = ath6kl_wmi_get_traffic_class(up);
+
+ if ((conn->apsd_info & (1 << traffic_class)) == 0)
+ return false;
+
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ skb_queue_tail(&conn->apsdq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this STA
+ */
+ if (is_apsdq_empty) {
+ ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 1, 0);
+ }
+ *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
+
+ return true;
+}
+
+static bool ath6kl_process_psq(struct ath6kl_sta *conn,
+ struct ath6kl_vif *vif,
+ struct sk_buff *skb,
+ u32 *flags)
+{
+ bool is_psq_empty = false;
+ struct ath6kl *ar = vif->ar;
+
+ if (conn->sta_flags & STA_PS_POLLED) {
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq))
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
+ spin_unlock_bh(&conn->psq_lock);
+ return false;
+ }
+
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq);
+ skb_queue_tail(&conn->psq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 1);
+ return true;
+}
+
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
+ u32 *flags)
+{
+ struct ethhdr *datap = (struct ethhdr *) skb->data;
+ struct ath6kl_sta *conn = NULL;
+ bool ps_queued = false;
+ struct ath6kl *ar = vif->ar;
+
+ if (is_multicast_ether_addr(datap->h_dest)) {
+ u8 ctr = 0;
+ bool q_mcast = false;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
+ q_mcast = true;
+ break;
+ }
+ }
+
+ if (q_mcast) {
+ /*
+ * If this transmit is not because of a Dtim Expiry
+ * q it.
+ */
+ if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
+ bool is_mcastq_empty = false;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ is_mcastq_empty =
+ skb_queue_empty(&ar->mcastpsq);
+ skb_queue_tail(&ar->mcastpsq, skb);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /*
+ * If this is the first Mcast pkt getting
+ * queued indicate to the target to set the
+ * BitmapControl LSB of the TIM IE.
+ */
+ if (is_mcastq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
+ MCAST_AID, 1);
+
+ ps_queued = true;
+ } else {
+ /*
+ * This transmit is because of Dtim expiry.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&ar->mcastpsq_lock);
+ if (!skb_queue_empty(&ar->mcastpsq))
+ *flags |= WMI_DATA_HDR_FLAGS_MORE;
+ spin_unlock_bh(&ar->mcastpsq_lock);
+ }
+ }
+ } else {
+ conn = ath6kl_find_sta(vif, datap->h_dest);
+ if (!conn) {
+ dev_kfree_skb(skb);
+
+ /* Inform the caller that the skb is consumed */
+ return true;
+ }
+
+ if (conn->sta_flags & STA_PS_SLEEP) {
+ ps_queued = ath6kl_process_uapsdq(conn,
+ vif, skb, flags);
+ if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
+ ps_queued = ath6kl_process_psq(conn,
+ vif, skb, flags);
+ }
+ }
+ return ps_queued;
+}
+
+/* Tx functions */
+
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid)
+{
+ struct ath6kl *ar = devt;
+ int status = 0;
+ struct ath6kl_cookie *cookie = NULL;
+
+ trace_ath6kl_wmi_cmd(skb->data, skb->len);
+
+ if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
+ dev_kfree_skb(skb);
+ return -EACCES;
+ }
+
+ if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
+ eid >= ENDPOINT_MAX)) {
+ status = -EINVAL;
+ goto fail_ctrl_tx;
+ }
+
+ spin_lock_bh(&ar->lock);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
+ skb, skb->len, eid);
+
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
+ /*
+ * Control endpoint is full, don't allocate resources, we
+ * are just going to drop this packet.
+ */
+ cookie = NULL;
+ ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
+ skb, skb->len);
+ } else {
+ cookie = ath6kl_alloc_cookie(ar);
+ }
+
+ if (cookie == NULL) {
+ spin_unlock_bh(&ar->lock);
+ status = -ENOMEM;
+ goto fail_ctrl_tx;
+ }
+
+ ar->tx_pending[eid]++;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ cookie->skb = skb;
+ cookie->map_no = 0;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, ATH6KL_CONTROL_PKT_TAG);
+ cookie->htc_pkt.skb = skb;
+
+ /*
+ * This interface is asynchronous, if there is an error, cleanup
+ * will happen in the TX completion callback.
+ */
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_ctrl_tx:
+ dev_kfree_skb(skb);
+ return status;
+}
+
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_cookie *cookie = NULL;
+ enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u32 map_no = 0;
+ u16 htc_tag = ATH6KL_DATA_PKT_TAG;
+ u8 ac = 99; /* initialize to unmapped ac */
+ bool chk_adhoc_ps_mapping = false;
+ int ret;
+ struct wmi_tx_meta_v2 meta_v2;
+ void *meta;
+ u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
+ u8 meta_ver = 0;
+ u32 flags = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
+ skb, skb->data, skb->len);
+
+ /* If target is not associated */
+ if (!test_bit(CONNECTED, &vif->flags))
+ goto fail_tx;
+
+ if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
+ goto fail_tx;
+
+ if (!test_bit(WMI_READY, &ar->flag))
+ goto fail_tx;
+
+ /* AP mode Power saving processing */
+ if (vif->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(vif, skb, &flags))
+ return 0;
+ }
+
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ if ((dev->features & NETIF_F_IP_CSUM) &&
+ (csum == CHECKSUM_PARTIAL)) {
+ csum_start = skb->csum_start -
+ (skb_network_header(skb) - skb->head) +
+ sizeof(struct ath6kl_llc_snap_hdr);
+ csum_dest = skb->csum_offset + csum_start;
+ }
+
+ if (skb_headroom(skb) < dev->needed_headroom) {
+ struct sk_buff *tmp_skb = skb;
+
+ skb = skb_realloc_headroom(skb, dev->needed_headroom);
+ kfree_skb(tmp_skb);
+ if (skb == NULL) {
+ vif->net_stats.tx_dropped++;
+ return 0;
+ }
+ }
+
+ if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
+ ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
+ goto fail_tx;
+ }
+
+ if ((dev->features & NETIF_F_IP_CSUM) &&
+ (csum == CHECKSUM_PARTIAL)) {
+ meta_v2.csum_start = csum_start;
+ meta_v2.csum_dest = csum_dest;
+
+ /* instruct target to calculate checksum */
+ meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
+ meta_ver = WMI_META_VERSION_2;
+ meta = &meta_v2;
+ } else {
+ meta_ver = 0;
+ meta = NULL;
+ }
+
+ ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
+ DATA_MSGTYPE, flags, 0,
+ meta_ver,
+ meta, vif->fw_vif_idx);
+
+ if (ret) {
+ ath6kl_warn("failed to add wmi data header:%d\n"
+ , ret);
+ goto fail_tx;
+ }
+
+ if ((vif->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
+ chk_adhoc_ps_mapping = true;
+ else {
+ /* get the stream mapping */
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+ vif->fw_vif_idx, skb,
+ 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
+ if (ret)
+ goto fail_tx;
+ }
+ } else {
+ goto fail_tx;
+ }
+
+ spin_lock_bh(&ar->lock);
+
+ if (chk_adhoc_ps_mapping)
+ eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
+ else
+ eid = ar->ac2ep_map[ac];
+
+ if (eid == 0 || eid == ENDPOINT_UNUSED) {
+ ath6kl_err("eid %d is not mapped!\n", eid);
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* allocate resource for this packet */
+ cookie = ath6kl_alloc_cookie(ar);
+
+ if (!cookie) {
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* update counts while the lock is held */
+ ar->tx_pending[eid]++;
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
+ skb_cloned(skb)) {
+ /*
+ * We will touch (move the buffer data to align it. Since the
+ * skb buffer is cloned and not only the header is changed, we
+ * have to copy it to allow the changes. Since we are copying
+ * the data here, we may as well align it by reserving suitable
+ * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
+ */
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
+ if (nskb == NULL)
+ goto fail_tx;
+ kfree_skb(skb);
+ skb = nskb;
+ }
+
+ cookie->skb = skb;
+ cookie->map_no = map_no;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, htc_tag);
+ cookie->htc_pkt.skb = skb;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
+ skb->data, skb->len);
+
+ /*
+ * HTC interface is asynchronous, if this fails, cleanup will
+ * happen in the ath6kl_tx_complete callback.
+ */
+ ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb(skb);
+
+ vif->net_stats.tx_dropped++;
+ vif->net_stats.tx_aborted_errors++;
+
+ return 0;
+}
+
+/* indicate tx activity or inactivity on a WMI stream */
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
+{
+ struct ath6kl *ar = devt;
+ enum htc_endpoint_id eid;
+ int i;
+
+ eid = ar->ac2ep_map[traffic_class];
+
+ if (!test_bit(WMI_ENABLED, &ar->flag))
+ goto notify_htc;
+
+ spin_lock_bh(&ar->lock);
+
+ ar->ac_stream_active[traffic_class] = active;
+
+ if (active) {
+ /*
+ * Keep track of the active stream with the highest
+ * priority.
+ */
+ if (ar->ac_stream_pri_map[traffic_class] >
+ ar->hiac_stream_active_pri)
+ /* set the new highest active priority */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[traffic_class];
+
+ } else {
+ /*
+ * We may have to search for the next active stream
+ * that is the highest priority.
+ */
+ if (ar->hiac_stream_active_pri ==
+ ar->ac_stream_pri_map[traffic_class]) {
+ /*
+ * The highest priority stream just went inactive
+ * reset and search for the "next" highest "active"
+ * priority stream.
+ */
+ ar->hiac_stream_active_pri = 0;
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (ar->ac_stream_active[i] &&
+ (ar->ac_stream_pri_map[i] >
+ ar->hiac_stream_active_pri))
+ /*
+ * Set the new highest active
+ * priority.
+ */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[i];
+ }
+ }
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+notify_htc:
+ /* notify HTC, this may cause credit distribution changes */
+ ath6kl_htc_activity_changed(ar->htc_target, eid, active);
+}
+
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct ath6kl_vif *vif;
+ enum htc_endpoint_id endpoint = packet->endpoint;
+ enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
+
+ if (endpoint == ar->ctrl_ep) {
+ /*
+ * Under normal WMI if this is getting full, then something
+ * is running rampant the host should not be exhausting the
+ * WMI queue with too many commands the only exception to
+ * this is during testing using endpointping.
+ */
+ set_bit(WMI_CTRL_EP_FULL, &ar->flag);
+ ath6kl_err("wmi ctrl ep is full\n");
+ ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
+ return action;
+ }
+
+ if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
+ return action;
+
+ /*
+ * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
+ * the highest active stream.
+ */
+ if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
+ ar->hiac_stream_active_pri &&
+ ar->cookie_count <=
+ target->endpoint[endpoint].tx_drop_packet_threshold)
+ /*
+ * Give preference to the highest priority stream by
+ * dropping the packets which overflowed.
+ */
+ action = HTC_SEND_FULL_DROP;
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->nw_type == ADHOC_NETWORK ||
+ action != HTC_SEND_FULL_DROP) {
+ spin_unlock_bh(&ar->list_lock);
+
+ set_bit(NETQ_STOPPED, &vif->flags);
+ netif_stop_queue(vif->ndev);
+
+ return action;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return action;
+}
+
+/* TODO this needs to be looked at */
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
+ enum htc_endpoint_id eid, u32 map_no)
+{
+ struct ath6kl *ar = vif->ar;
+ u32 i;
+
+ if (vif->nw_type != ADHOC_NETWORK)
+ return;
+
+ if (!ar->ibss_ps_enable)
+ return;
+
+ if (eid == ar->ctrl_ep)
+ return;
+
+ if (map_no == 0)
+ return;
+
+ map_no--;
+ ar->node_map[map_no].tx_pend--;
+
+ if (ar->node_map[map_no].tx_pend)
+ return;
+
+ if (map_no != (ar->node_num - 1))
+ return;
+
+ for (i = ar->node_num; i > 0; i--) {
+ if (ar->node_map[i - 1].tx_pend)
+ break;
+
+ memset(&ar->node_map[i - 1], 0,
+ sizeof(struct ath6kl_node_mapping));
+ ar->node_num--;
+ }
+}
+
+void ath6kl_tx_complete(struct htc_target *target,
+ struct list_head *packet_queue)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff_head skb_queue;
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+ struct ath6kl_cookie *ath6kl_cookie;
+ u32 map_no = 0;
+ int status;
+ enum htc_endpoint_id eid;
+ bool wake_event = false;
+ bool flushing[ATH6KL_VIF_MAX] = {false};
+ u8 if_idx;
+ struct ath6kl_vif *vif;
+
+ skb_queue_head_init(&skb_queue);
+
+ /* lock the driver as we update internal state */
+ spin_lock_bh(&ar->lock);
+
+ /* reap completed packets */
+ while (!list_empty(packet_queue)) {
+ packet = list_first_entry(packet_queue, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
+ packet->endpoint >= ENDPOINT_MAX))
+ continue;
+
+ ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
+ if (WARN_ON_ONCE(!ath6kl_cookie))
+ continue;
+
+ status = packet->status;
+ skb = ath6kl_cookie->skb;
+ eid = packet->endpoint;
+ map_no = ath6kl_cookie->map_no;
+
+ if (WARN_ON_ONCE(!skb || !skb->data)) {
+ dev_kfree_skb(skb);
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
+ __skb_queue_tail(&skb_queue, skb);
+
+ if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
+ ar->tx_pending[eid]--;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend--;
+
+ if (eid == ar->ctrl_ep) {
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
+ clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+ if (ar->tx_pending[eid] == 0)
+ wake_event = true;
+ }
+
+ if (eid == ar->ctrl_ep) {
+ if_idx = wmi_cmd_hdr_get_if_idx(
+ (struct wmi_cmd_hdr *) packet->buf);
+ } else {
+ if_idx = wmi_data_hdr_get_if_idx(
+ (struct wmi_data_hdr *) packet->buf);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
+ if (status) {
+ if (status == -ECANCELED)
+ /* a packet was flushed */
+ flushing[if_idx] = true;
+
+ vif->net_stats.tx_errors++;
+
+ if (status != -ENOSPC && status != -ECANCELED)
+ ath6kl_warn("tx complete error: %d\n", status);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "error!");
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "OK");
+
+ flushing[if_idx] = false;
+ vif->net_stats.tx_packets++;
+ vif->net_stats.tx_bytes += skb->len;
+ }
+
+ ath6kl_tx_clear_node_map(vif, eid, map_no);
+
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+
+ if (test_bit(NETQ_STOPPED, &vif->flags))
+ clear_bit(NETQ_STOPPED, &vif->flags);
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+ __skb_queue_purge(&skb_queue);
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ !flushing[vif->fw_vif_idx]) {
+ spin_unlock_bh(&ar->list_lock);
+ netif_wake_queue(vif->ndev);
+ spin_lock_bh(&ar->list_lock);
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ if (wake_event)
+ wake_up(&ar->event_wq);
+
+ return;
+}
+
+void ath6kl_tx_data_cleanup(struct ath6kl *ar)
+{
+ int i;
+
+ /* flush all the data (non-control) streams */
+ for (i = 0; i < WMM_NUM_AC; i++)
+ ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
+ ATH6KL_DATA_PKT_TAG);
+}
+
+/* Rx functions */
+
+static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ skb->dev = dev;
+
+ if (!(skb->dev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif_rx_ni(skb);
+}
+
+static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
+{
+ struct sk_buff *skb;
+
+ while (num) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb) {
+ ath6kl_err("netbuf allocation failed\n");
+ return;
+ }
+ skb_queue_tail(q, skb);
+ num--;
+ }
+}
+
+static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
+{
+ struct sk_buff *skb = NULL;
+
+ if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
+ (AGGR_NUM_OF_FREE_NETBUFS >> 2))
+ ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
+ AGGR_NUM_OF_FREE_NETBUFS);
+
+ skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
+
+ return skb;
+}
+
+void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb;
+ int rx_buf;
+ int n_buf_refill;
+ struct htc_packet *packet;
+ struct list_head queue;
+
+ n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
+ ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
+
+ if (n_buf_refill <= 0)
+ return;
+
+ INIT_LIST_HEAD(&queue);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: providing htc with %d buffers at eid=%d\n",
+ __func__, n_buf_refill, endpoint);
+
+ for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb)
+ break;
+
+ packet = (struct htc_packet *) skb->head;
+ if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+ size_t len = skb_headlen(skb);
+ skb->data = PTR_ALIGN(skb->data - 4, 4);
+ skb_set_tail_pointer(skb, len);
+ }
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_BUFFER_SIZE, endpoint);
+ packet->skb = skb;
+ list_add_tail(&packet->list, &queue);
+ }
+
+ if (!list_empty(&queue))
+ ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
+}
+
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
+{
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+
+ while (count) {
+ skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
+ if (!skb)
+ return;
+
+ packet = (struct htc_packet *) skb->head;
+ if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+ size_t len = skb_headlen(skb);
+ skb->data = PTR_ALIGN(skb->data - 4, 4);
+ skb_set_tail_pointer(skb, len);
+ }
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_AMSDU_BUFFER_SIZE, 0);
+ packet->skb = skb;
+
+ spin_lock_bh(&ar->lock);
+ list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
+ spin_unlock_bh(&ar->lock);
+ count--;
+ }
+}
+
+/*
+ * Callback to allocate a receive buffer for a pending packet. We use a
+ * pre-allocated list of buffers of maximum AMSDU size (4K).
+ */
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct htc_packet *packet = NULL;
+ struct list_head *pkt_pos;
+ int refill_cnt = 0, depth = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
+ __func__, endpoint, len);
+
+ if ((len <= ATH6KL_BUFFER_SIZE) ||
+ (len > ATH6KL_AMSDU_BUFFER_SIZE))
+ return NULL;
+
+ spin_lock_bh(&ar->lock);
+
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
+ goto refill_buf;
+ }
+
+ packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
+ depth++;
+
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
+ spin_unlock_bh(&ar->lock);
+
+ /* set actual endpoint ID */
+ packet->endpoint = endpoint;
+
+refill_buf:
+ if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
+ ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
+
+ return packet;
+}
+
+static void aggr_slice_amsdu(struct aggr_info *p_aggr,
+ struct rxtid *rxtid, struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct ethhdr *hdr;
+ u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
+ u8 *framep;
+
+ mac_hdr_len = sizeof(struct ethhdr);
+ framep = skb->data + mac_hdr_len;
+ amsdu_len = skb->len - mac_hdr_len;
+
+ while (amsdu_len > mac_hdr_len) {
+ hdr = (struct ethhdr *) framep;
+ payload_8023_len = ntohs(hdr->h_proto);
+
+ if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
+ payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
+ ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
+ payload_8023_len);
+ break;
+ }
+
+ frame_8023_len = payload_8023_len + mac_hdr_len;
+ new_skb = aggr_get_free_skb(p_aggr);
+ if (!new_skb) {
+ ath6kl_err("no buffer available\n");
+ break;
+ }
+
+ memcpy(new_skb->data, framep, frame_8023_len);
+ skb_put(new_skb, frame_8023_len);
+ if (ath6kl_wmi_dot3_2_dix(new_skb)) {
+ ath6kl_err("dot3_2_dix error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ skb_queue_tail(&rxtid->q, new_skb);
+
+ /* Is this the last subframe within this aggregate ? */
+ if ((amsdu_len - frame_8023_len) == 0)
+ break;
+
+ /* Add the length of A-MSDU subframe padding bytes -
+ * Round to nearest word.
+ */
+ frame_8023_len = ALIGN(frame_8023_len, 4);
+
+ framep += frame_8023_len;
+ amsdu_len -= frame_8023_len;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
+ u16 seq_no, u8 order)
+{
+ struct sk_buff *skb;
+ struct rxtid *rxtid;
+ struct skb_hold_q *node;
+ u16 idx, idx_end, seq_end;
+ struct rxtid_stats *stats;
+
+ rxtid = &agg_conn->rx_tid[tid];
+ stats = &agg_conn->stat[tid];
+
+ spin_lock_bh(&rxtid->lock);
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+
+ /*
+ * idx_end is typically the last possible frame in the window,
+ * but changes to 'the' seq_no, when BAR comes. If seq_no
+ * is non-zero, we will go up to that and stop.
+ * Note: last seq no in current window will occupy the same
+ * index position as index that is just previous to start.
+ * An imp point : if win_sz is 7, for seq_no space of 4095,
+ * then, there would be holes when sequence wrap around occurs.
+ * Target should judiciously choose the win_sz, based on
+ * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
+ * 2, 4, 8, 16 win_sz works fine).
+ * We must deque from "idx" to "idx_end", including both.
+ */
+ seq_end = seq_no ? seq_no : rxtid->seq_next;
+ idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
+
+ do {
+ node = &rxtid->hold_q[idx];
+ if ((order == 1) && (!node->skb))
+ break;
+
+ if (node->skb) {
+ if (node->is_amsdu)
+ aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
+ node->skb);
+ else
+ skb_queue_tail(&rxtid->q, node->skb);
+ node->skb = NULL;
+ } else {
+ stats->num_hole++;
+ }
+
+ rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+ } while (idx != idx_end);
+
+ spin_unlock_bh(&rxtid->lock);
+
+ stats->num_delivered += skb_queue_len(&rxtid->q);
+
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
+}
+
+static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
+ u16 seq_no,
+ bool is_amsdu, struct sk_buff *frame)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ struct sk_buff *skb;
+ struct skb_hold_q *node;
+ u16 idx, st, cur, end;
+ bool is_queued = false;
+ u16 extended_end;
+
+ rxtid = &agg_conn->rx_tid[tid];
+ stats = &agg_conn->stat[tid];
+
+ stats->num_into_aggr++;
+
+ if (!rxtid->aggr) {
+ if (is_amsdu) {
+ aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
+ is_queued = true;
+ stats->num_amsdu++;
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
+ skb);
+ }
+ return is_queued;
+ }
+
+ /* Check the incoming sequence no, if it's in the window */
+ st = rxtid->seq_next;
+ cur = seq_no;
+ end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
+
+ if (((st < end) && (cur < st || cur > end)) ||
+ ((st > end) && (cur > end) && (cur < st))) {
+ extended_end = (end + rxtid->hold_q_sz - 1) &
+ ATH6KL_MAX_SEQ_NO;
+
+ if (((end < extended_end) &&
+ (cur < end || cur > extended_end)) ||
+ ((end > extended_end) && (cur > extended_end) &&
+ (cur < end))) {
+ aggr_deque_frms(agg_conn, tid, 0, 0);
+ spin_lock_bh(&rxtid->lock);
+ if (cur >= rxtid->hold_q_sz - 1)
+ rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
+ else
+ rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+ spin_unlock_bh(&rxtid->lock);
+ } else {
+ /*
+ * Dequeue only those frames that are outside the
+ * new shifted window.
+ */
+ if (cur >= rxtid->hold_q_sz - 1)
+ st = cur - (rxtid->hold_q_sz - 1);
+ else
+ st = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+
+ aggr_deque_frms(agg_conn, tid, st, 0);
+ }
+
+ stats->num_oow++;
+ }
+
+ idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
+
+ node = &rxtid->hold_q[idx];
+
+ spin_lock_bh(&rxtid->lock);
+
+ /*
+ * Is the cur frame duplicate or something beyond our window(hold_q
+ * -> which is 2x, already)?
+ *
+ * 1. Duplicate is easy - drop incoming frame.
+ * 2. Not falling in current sliding window.
+ * 2a. is the frame_seq_no preceding current tid_seq_no?
+ * -> drop the frame. perhaps sender did not get our ACK.
+ * this is taken care of above.
+ * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
+ * -> Taken care of it above, by moving window forward.
+ */
+ dev_kfree_skb(node->skb);
+ stats->num_dups++;
+
+ node->skb = frame;
+ is_queued = true;
+ node->is_amsdu = is_amsdu;
+ node->seq_no = seq_no;
+
+ if (node->is_amsdu)
+ stats->num_amsdu++;
+ else
+ stats->num_mpdu++;
+
+ spin_unlock_bh(&rxtid->lock);
+
+ aggr_deque_frms(agg_conn, tid, 0, 1);
+
+ if (agg_conn->timer_scheduled)
+ return is_queued;
+
+ spin_lock_bh(&rxtid->lock);
+ for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
+ if (rxtid->hold_q[idx].skb) {
+ /*
+ * There is a frame in the queue and no
+ * timer so start a timer to ensure that
+ * the frame doesn't remain stuck
+ * forever.
+ */
+ agg_conn->timer_scheduled = true;
+ mod_timer(&agg_conn->timer,
+ (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
+ rxtid->timer_mon = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&rxtid->lock);
+
+ return is_queued;
+}
+
+static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
+ struct ath6kl_sta *conn)
+{
+ struct ath6kl *ar = vif->ar;
+ bool is_apsdq_empty, is_apsdq_empty_at_start;
+ u32 num_frames_to_deliver, flags;
+ struct sk_buff *skb = NULL;
+
+ /*
+ * If the APSD q for this STA is not empty, dequeue and
+ * send a pkt from the head of the q. Also update the
+ * More data bit in the WMI_DATA_HDR if there are
+ * more pkts for this STA in the APSD q.
+ * If there are no more pkts for this STA,
+ * update the APSD bitmap for this STA.
+ */
+
+ num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
+ ATH6KL_APSD_FRAME_MASK;
+ /*
+ * Number of frames to send in a service period is
+ * indicated by the station
+ * in the QOS_INFO of the association request
+ * If it is zero, send all frames
+ */
+ if (!num_frames_to_deliver)
+ num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
+
+ spin_lock_bh(&conn->psq_lock);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ spin_unlock_bh(&conn->psq_lock);
+ is_apsdq_empty_at_start = is_apsdq_empty;
+
+ while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
+ spin_lock_bh(&conn->psq_lock);
+ skb = skb_dequeue(&conn->apsdq);
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * Set the STA flag to Trigger delivery,
+ * so that the frame will go out
+ */
+ conn->sta_flags |= STA_PS_APSD_TRIGGER;
+ num_frames_to_deliver--;
+
+ /* Last frame in the service period, set EOSP or queue empty */
+ if ((is_apsdq_empty) || (!num_frames_to_deliver))
+ conn->sta_flags |= STA_PS_APSD_EOSP;
+
+ ath6kl_data_tx(skb, vif->ndev);
+ conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
+ conn->sta_flags &= ~(STA_PS_APSD_EOSP);
+ }
+
+ if (is_apsdq_empty) {
+ if (is_apsdq_empty_at_start)
+ flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
+ else
+ flags = 0;
+
+ ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 0, flags);
+ }
+
+ return;
+}
+
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb = packet->pkt_cntxt;
+ struct wmi_rx_meta_v2 *meta;
+ struct wmi_data_hdr *dhdr;
+ int min_hdr_len;
+ u8 meta_type, dot11_hdr = 0;
+ u8 pad_before_data_start;
+ int status = packet->status;
+ enum htc_endpoint_id ept = packet->endpoint;
+ bool is_amsdu, prev_ps, ps_state = false;
+ bool trig_state = false;
+ struct ath6kl_sta *conn = NULL;
+ struct sk_buff *skb1 = NULL;
+ struct ethhdr *datap = NULL;
+ struct ath6kl_vif *vif;
+ struct aggr_info_conn *aggr_conn;
+ u16 seq_no, offset;
+ u8 tid, if_idx;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
+ __func__, ar, ept, skb, packet->buf,
+ packet->act_len, status);
+
+ if (status || packet->act_len < HTC_HDR_LENGTH) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
+ skb->data, skb->len);
+
+ if (ept == ar->ctrl_ep) {
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ ath6kl_check_wow_status(ar);
+ ath6kl_wmi_control_rx(ar->wmi, skb);
+ return;
+ }
+ if_idx =
+ wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+ } else {
+ if_idx =
+ wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * Take lock to protect buffer counts and adaptive power throughput
+ * state.
+ */
+ spin_lock_bh(&vif->if_lock);
+
+ vif->net_stats.rx_packets++;
+ vif->net_stats.rx_bytes += packet->act_len;
+
+ spin_unlock_bh(&vif->if_lock);
+
+ skb->dev = vif->ndev;
+
+ if (!test_bit(WMI_ENABLED, &ar->flag)) {
+ if (EPPING_ALIGNMENT_PAD > 0)
+ skb_pull(skb, EPPING_ALIGNMENT_PAD);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
+ return;
+ }
+
+ ath6kl_check_wow_status(ar);
+
+ min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr);
+
+ dhdr = (struct wmi_data_hdr *) skb->data;
+
+ /*
+ * In the case of AP mode we may receive NULL data frames
+ * that do not have LLC hdr. They are 16 bytes in size.
+ * Allow these frames in the AP mode.
+ */
+ if (vif->nw_type != AP_NETWORK &&
+ ((packet->act_len < min_hdr_len) ||
+ (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
+ ath6kl_info("frame len is too short or too long\n");
+ vif->net_stats.rx_errors++;
+ vif->net_stats.rx_length_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* Get the Power save state of the STA */
+ if (vif->nw_type == AP_NETWORK) {
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+
+ ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
+ WMI_DATA_HDR_PS_MASK);
+
+ offset = sizeof(struct wmi_data_hdr);
+ trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
+
+ switch (meta_type) {
+ case 0:
+ break;
+ case WMI_META_VERSION_1:
+ offset += sizeof(struct wmi_rx_meta_v1);
+ break;
+ case WMI_META_VERSION_2:
+ offset += sizeof(struct wmi_rx_meta_v2);
+ break;
+ default:
+ break;
+ }
+
+ datap = (struct ethhdr *) (skb->data + offset);
+ conn = ath6kl_find_sta(vif, datap->h_source);
+
+ if (!conn) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * If there is a change in PS state of the STA,
+ * take appropriate steps:
+ *
+ * 1. If Sleep-->Awake, flush the psq for the STA
+ * Clear the PVB for the STA.
+ * 2. If Awake-->Sleep, Starting queueing frames
+ * the STA.
+ */
+ prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
+
+ if (ps_state)
+ conn->sta_flags |= STA_PS_SLEEP;
+ else
+ conn->sta_flags &= ~STA_PS_SLEEP;
+
+ /* Accept trigger only when the station is in sleep */
+ if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
+ ath6kl_uapsd_trigger_frame_rx(vif, conn);
+
+ if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
+ if (!(conn->sta_flags & STA_PS_SLEEP)) {
+ struct sk_buff *skbuff = NULL;
+ bool is_apsdq_empty;
+ struct ath6kl_mgmt_buff *mgmt;
+ u8 idx;
+
+ spin_lock_bh(&conn->psq_lock);
+ while (conn->mgmt_psq_len > 0) {
+ mgmt = list_first_entry(
+ &conn->mgmt_psq,
+ struct ath6kl_mgmt_buff,
+ list);
+ list_del(&mgmt->list);
+ conn->mgmt_psq_len--;
+ spin_unlock_bh(&conn->psq_lock);
+ idx = vif->fw_vif_idx;
+
+ ath6kl_wmi_send_mgmt_cmd(ar->wmi,
+ idx,
+ mgmt->id,
+ mgmt->freq,
+ mgmt->wait,
+ mgmt->buf,
+ mgmt->len,
+ mgmt->no_cck);
+
+ kfree(mgmt);
+ spin_lock_bh(&conn->psq_lock);
+ }
+ conn->mgmt_psq_len = 0;
+ while ((skbuff = skb_dequeue(&conn->psq))) {
+ spin_unlock_bh(&conn->psq_lock);
+ ath6kl_data_tx(skbuff, vif->ndev);
+ spin_lock_bh(&conn->psq_lock);
+ }
+
+ is_apsdq_empty = skb_queue_empty(&conn->apsdq);
+ while ((skbuff = skb_dequeue(&conn->apsdq))) {
+ spin_unlock_bh(&conn->psq_lock);
+ ath6kl_data_tx(skbuff, vif->ndev);
+ spin_lock_bh(&conn->psq_lock);
+ }
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (!is_apsdq_empty)
+ ath6kl_wmi_set_apsd_bfrd_traf(
+ ar->wmi,
+ vif->fw_vif_idx,
+ conn->aid, 0, 0);
+
+ /* Clear the PVB for this STA */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 0);
+ }
+ }
+
+ /* drop NULL data frames here */
+ if ((packet->act_len < min_hdr_len) ||
+ (packet->act_len >
+ WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+
+ is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
+ tid = wmi_data_hdr_get_up(dhdr);
+ seq_no = wmi_data_hdr_get_seqno(dhdr);
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+ dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
+ pad_before_data_start =
+ (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
+ & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
+
+ skb_pull(skb, sizeof(struct wmi_data_hdr));
+
+ switch (meta_type) {
+ case WMI_META_VERSION_1:
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
+ break;
+ case WMI_META_VERSION_2:
+ meta = (struct wmi_rx_meta_v2 *) skb->data;
+ if (meta->csum_flags & 0x1) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum) meta->csum;
+ }
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
+ break;
+ default:
+ break;
+ }
+
+ skb_pull(skb, pad_before_data_start);
+
+ if (dot11_hdr)
+ status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
+ else if (!is_amsdu)
+ status = ath6kl_wmi_dot3_2_dix(skb);
+
+ if (status) {
+ /*
+ * Drop frames that could not be processed (lack of
+ * memory, etc.)
+ */
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (!(vif->ndev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (vif->nw_type == AP_NETWORK) {
+ datap = (struct ethhdr *) skb->data;
+ if (is_multicast_ether_addr(datap->h_dest))
+ /*
+ * Bcast/Mcast frames should be sent to the
+ * OS stack as well as on the air.
+ */
+ skb1 = skb_copy(skb, GFP_ATOMIC);
+ else {
+ /*
+ * Search for a connected STA with dstMac
+ * as the Mac address. If found send the
+ * frame to it on the air else send the
+ * frame up the stack.
+ */
+ conn = ath6kl_find_sta(vif, datap->h_dest);
+
+ if (conn && ar->intra_bss) {
+ skb1 = skb;
+ skb = NULL;
+ } else if (conn && !ar->intra_bss) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (skb1)
+ ath6kl_data_tx(skb1, vif->ndev);
+
+ if (skb == NULL) {
+ /* nothing to deliver up the stack */
+ return;
+ }
+ }
+
+ datap = (struct ethhdr *) skb->data;
+
+ if (is_unicast_ether_addr(datap->h_dest)) {
+ if (vif->nw_type == AP_NETWORK) {
+ conn = ath6kl_find_sta(vif, datap->h_source);
+ if (!conn)
+ return;
+ aggr_conn = conn->aggr_conn;
+ } else {
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
+
+ if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
+ is_amsdu, skb)) {
+ /* aggregation code will handle the skb */
+ return;
+ }
+ } else if (!is_broadcast_ether_addr(datap->h_dest)) {
+ vif->net_stats.multicast++;
+ }
+
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
+}
+
+static void aggr_timeout(unsigned long arg)
+{
+ u8 i, j;
+ struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &aggr_conn->rx_tid[i];
+ stats = &aggr_conn->stat[i];
+
+ if (!rxtid->aggr || !rxtid->timer_mon)
+ continue;
+
+ stats->num_timeouts++;
+ ath6kl_dbg(ATH6KL_DBG_AGGR,
+ "aggr timeout (st %d end %d)\n",
+ rxtid->seq_next,
+ ((rxtid->seq_next + rxtid->hold_q_sz-1) &
+ ATH6KL_MAX_SEQ_NO));
+ aggr_deque_frms(aggr_conn, i, 0, 0);
+ }
+
+ aggr_conn->timer_scheduled = false;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &aggr_conn->rx_tid[i];
+
+ if (rxtid->aggr && rxtid->hold_q) {
+ spin_lock_bh(&rxtid->lock);
+ for (j = 0; j < rxtid->hold_q_sz; j++) {
+ if (rxtid->hold_q[j].skb) {
+ aggr_conn->timer_scheduled = true;
+ rxtid->timer_mon = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&rxtid->lock);
+
+ if (j >= rxtid->hold_q_sz)
+ rxtid->timer_mon = false;
+ }
+ }
+
+ if (aggr_conn->timer_scheduled)
+ mod_timer(&aggr_conn->timer,
+ jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
+}
+
+static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ if (!aggr_conn || tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &aggr_conn->rx_tid[tid];
+ stats = &aggr_conn->stat[tid];
+
+ if (rxtid->aggr)
+ aggr_deque_frms(aggr_conn, tid, 0, 0);
+
+ rxtid->aggr = false;
+ rxtid->timer_mon = false;
+ rxtid->win_sz = 0;
+ rxtid->seq_next = 0;
+ rxtid->hold_q_sz = 0;
+
+ kfree(rxtid->hold_q);
+ rxtid->hold_q = NULL;
+
+ memset(stats, 0, sizeof(struct rxtid_stats));
+}
+
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
+ u8 win_sz)
+{
+ struct ath6kl_sta *sta;
+ struct aggr_info_conn *aggr_conn = NULL;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ u16 hold_q_size;
+ u8 tid, aid;
+
+ if (vif->nw_type == AP_NETWORK) {
+ aid = ath6kl_get_aid(tid_mux);
+ sta = ath6kl_find_sta_by_aid(vif->ar, aid);
+ if (sta)
+ aggr_conn = sta->aggr_conn;
+ } else {
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
+
+ if (!aggr_conn)
+ return;
+
+ tid = ath6kl_get_tid(tid_mux);
+ if (tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &aggr_conn->rx_tid[tid];
+ stats = &aggr_conn->stat[tid];
+
+ if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
+ __func__, win_sz, tid);
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(aggr_conn, tid);
+
+ rxtid->seq_next = seq_no;
+ hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
+ rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
+ if (!rxtid->hold_q)
+ return;
+
+ rxtid->win_sz = win_sz;
+ rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
+ if (!skb_queue_empty(&rxtid->q))
+ return;
+
+ rxtid->aggr = true;
+}
+
+void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
+ struct aggr_info_conn *aggr_conn)
+{
+ struct rxtid *rxtid;
+ u8 i;
+
+ aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
+ aggr_conn->dev = vif->ndev;
+ init_timer(&aggr_conn->timer);
+ aggr_conn->timer.function = aggr_timeout;
+ aggr_conn->timer.data = (unsigned long) aggr_conn;
+ aggr_conn->aggr_info = aggr_info;
+
+ aggr_conn->timer_scheduled = false;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &aggr_conn->rx_tid[i];
+ rxtid->aggr = false;
+ rxtid->timer_mon = false;
+ skb_queue_head_init(&rxtid->q);
+ spin_lock_init(&rxtid->lock);
+ }
+}
+
+struct aggr_info *aggr_init(struct ath6kl_vif *vif)
+{
+ struct aggr_info *p_aggr = NULL;
+
+ p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
+ if (!p_aggr) {
+ ath6kl_err("failed to alloc memory for aggr_node\n");
+ return NULL;
+ }
+
+ p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
+ if (!p_aggr->aggr_conn) {
+ ath6kl_err("failed to alloc memory for connection specific aggr info\n");
+ kfree(p_aggr);
+ return NULL;
+ }
+
+ aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
+
+ skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
+ ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
+
+ return p_aggr;
+}
+
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
+{
+ struct ath6kl_sta *sta;
+ struct rxtid *rxtid;
+ struct aggr_info_conn *aggr_conn = NULL;
+ u8 tid, aid;
+
+ if (vif->nw_type == AP_NETWORK) {
+ aid = ath6kl_get_aid(tid_mux);
+ sta = ath6kl_find_sta_by_aid(vif->ar, aid);
+ if (sta)
+ aggr_conn = sta->aggr_conn;
+ } else {
+ aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
+
+ if (!aggr_conn)
+ return;
+
+ tid = ath6kl_get_tid(tid_mux);
+ if (tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &aggr_conn->rx_tid[tid];
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(aggr_conn, tid);
+}
+
+void aggr_reset_state(struct aggr_info_conn *aggr_conn)
+{
+ u8 tid;
+
+ if (!aggr_conn)
+ return;
+
+ if (aggr_conn->timer_scheduled) {
+ del_timer(&aggr_conn->timer);
+ aggr_conn->timer_scheduled = false;
+ }
+
+ for (tid = 0; tid < NUM_OF_TIDS; tid++)
+ aggr_delete_tid_state(aggr_conn, tid);
+}
+
+/* clean up our amsdu buffer list */
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
+{
+ struct htc_packet *packet, *tmp_pkt;
+
+ spin_lock_bh(&ar->lock);
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ return;
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
+ list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&ar->lock);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&ar->lock);
+ }
+
+ spin_unlock_bh(&ar->lock);
+}
+
+void aggr_module_destroy(struct aggr_info *aggr_info)
+{
+ if (!aggr_info)
+ return;
+
+ aggr_reset_state(aggr_info->aggr_conn);
+ skb_queue_purge(&aggr_info->rx_amsdu_freeq);
+ kfree(aggr_info->aggr_conn);
+ kfree(aggr_info);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
new file mode 100644
index 000000000..e0c27b1ac
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -0,0 +1,1228 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "debug.h"
+#include "core.h"
+
+/* constants */
+#define TX_URB_COUNT 32
+#define RX_URB_COUNT 32
+#define ATH6KL_USB_RX_BUFFER_SIZE 4096
+
+/* tx/rx pipes for usb */
+enum ATH6KL_USB_PIPE_ID {
+ ATH6KL_USB_PIPE_TX_CTRL = 0,
+ ATH6KL_USB_PIPE_TX_DATA_LP,
+ ATH6KL_USB_PIPE_TX_DATA_MP,
+ ATH6KL_USB_PIPE_TX_DATA_HP,
+ ATH6KL_USB_PIPE_RX_CTRL,
+ ATH6KL_USB_PIPE_RX_DATA,
+ ATH6KL_USB_PIPE_RX_DATA2,
+ ATH6KL_USB_PIPE_RX_INT,
+ ATH6KL_USB_PIPE_MAX
+};
+
+#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX
+
+struct ath6kl_usb_pipe {
+ struct list_head urb_list_head;
+ struct usb_anchor urb_submitted;
+ u32 urb_alloc;
+ u32 urb_cnt;
+ u32 urb_cnt_thresh;
+ unsigned int usb_pipe_handle;
+ u32 flags;
+ u8 ep_address;
+ u8 logical_pipe_num;
+ struct ath6kl_usb *ar_usb;
+ u16 max_packet_size;
+ struct work_struct io_complete_work;
+ struct sk_buff_head io_comp_queue;
+ struct usb_endpoint_descriptor *ep_desc;
+};
+
+#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0)
+
+/* usb device object */
+struct ath6kl_usb {
+ /* protects pipe->urb_list_head and pipe->urb_cnt */
+ spinlock_t cs_lock;
+
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX];
+ u8 *diag_cmd_buffer;
+ u8 *diag_resp_buffer;
+ struct ath6kl *ar;
+};
+
+/* usb urb object */
+struct ath6kl_urb_context {
+ struct list_head link;
+ struct ath6kl_usb_pipe *pipe;
+ struct sk_buff *skb;
+ struct ath6kl *ar;
+};
+
+/* USB endpoint definitions */
+#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81
+#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82
+#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83
+#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84
+
+#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01
+#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
+#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
+#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
+
+/* diagnostic command defnitions */
+#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
+#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
+#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3
+#define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4
+
+#define ATH6KL_USB_CTRL_DIAG_CC_READ 0
+#define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1
+
+struct ath6kl_usb_ctrl_diag_cmd_write {
+ __le32 cmd;
+ __le32 address;
+ __le32 value;
+ __le32 _pad[1];
+} __packed;
+
+struct ath6kl_usb_ctrl_diag_cmd_read {
+ __le32 cmd;
+ __le32 address;
+} __packed;
+
+struct ath6kl_usb_ctrl_diag_resp_read {
+ __le32 value;
+} __packed;
+
+/* function declarations */
+static void ath6kl_usb_recv_complete(struct urb *urb);
+
+#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
+#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
+#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
+#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80)
+
+/* pipe/urb operations */
+static struct ath6kl_urb_context *
+ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
+{
+ struct ath6kl_urb_context *urb_context = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ if (!list_empty(&pipe->urb_list_head)) {
+ urb_context =
+ list_first_entry(&pipe->urb_list_head,
+ struct ath6kl_urb_context, link);
+ list_del(&urb_context->link);
+ pipe->urb_cnt--;
+ }
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+
+ return urb_context;
+}
+
+static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
+ struct ath6kl_urb_context *urb_context)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ pipe->urb_cnt++;
+
+ list_add(&urb_context->link, &pipe->urb_list_head);
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+}
+
+static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
+{
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
+
+ ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+}
+
+static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/* pipe resource allocation/cleanup */
+static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
+ int urb_cnt)
+{
+ struct ath6kl_urb_context *urb_context;
+ int status = 0, i;
+
+ INIT_LIST_HEAD(&pipe->urb_list_head);
+ init_usb_anchor(&pipe->urb_submitted);
+
+ for (i = 0; i < urb_cnt; i++) {
+ urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
+ GFP_KERNEL);
+ if (urb_context == NULL) {
+ status = -ENOMEM;
+ goto fail_alloc_pipe_resources;
+ }
+
+ urb_context->pipe = pipe;
+
+ /*
+ * we are only allocate the urb contexts here, the actual URB
+ * is allocated from the kernel as needed to do a transaction
+ */
+ pipe->urb_alloc++;
+ ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc);
+
+fail_alloc_pipe_resources:
+ return status;
+}
+
+static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
+{
+ struct ath6kl_urb_context *urb_context;
+
+ if (pipe->ar_usb == NULL) {
+ /* nothing allocated for this pipe */
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "ath6kl usb: free resources lpipe:%d"
+ "hpipe:0x%X urbs:%d avail:%d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+
+ if (pipe->urb_alloc != pipe->urb_cnt) {
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "ath6kl usb: urb leak! lpipe:%d"
+ "hpipe:0x%X urbs:%d avail:%d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+ }
+
+ while (true) {
+ urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
+ if (urb_context == NULL)
+ break;
+ kfree(urb_context);
+ }
+}
+
+static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
+{
+ int i;
+
+ for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
+ ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
+}
+
+static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
+ u8 ep_address, int *urb_count)
+{
+ u8 pipe_num = ATH6KL_USB_PIPE_INVALID;
+
+ switch (ep_address) {
+ case ATH6KL_USB_EP_ADDR_APP_CTRL_IN:
+ pipe_num = ATH6KL_USB_PIPE_RX_CTRL;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_DATA_IN:
+ pipe_num = ATH6KL_USB_PIPE_RX_DATA;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_INT_IN:
+ pipe_num = ATH6KL_USB_PIPE_RX_INT;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_DATA2_IN:
+ pipe_num = ATH6KL_USB_PIPE_RX_DATA2;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT:
+ pipe_num = ATH6KL_USB_PIPE_TX_CTRL;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT:
+ pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT:
+ pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT:
+ pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ default:
+ /* note: there may be endpoints not currently used */
+ break;
+ }
+
+ return pipe_num;
+}
+
+static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
+{
+ struct usb_interface *interface = ar_usb->interface;
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *endpoint;
+ struct ath6kl_usb_pipe *pipe;
+ int i, urbcount, status = 0;
+ u8 pipe_num;
+
+ ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
+
+ /* walk decriptors and setup pipes */
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "%s Bulk Ep:0x%2.2X maxpktsz:%d\n",
+ ATH6KL_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "RX" : "TX", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize));
+ } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n",
+ ATH6KL_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "RX" : "TX", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ ath6kl_dbg(ATH6KL_DBG_USB,
+ "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n",
+ ATH6KL_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "RX" : "TX", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ }
+ urbcount = 0;
+
+ pipe_num =
+ ath6kl_usb_get_logical_pipe_num(ar_usb,
+ endpoint->bEndpointAddress,
+ &urbcount);
+ if (pipe_num == ATH6KL_USB_PIPE_INVALID)
+ continue;
+
+ pipe = &ar_usb->pipes[pipe_num];
+ if (pipe->ar_usb != NULL) {
+ /* hmmm..pipe was already setup */
+ continue;
+ }
+
+ pipe->ar_usb = ar_usb;
+ pipe->logical_pipe_num = pipe_num;
+ pipe->ep_address = endpoint->bEndpointAddress;
+ pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
+
+ if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvintpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndintpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ }
+
+ pipe->ep_desc = endpoint;
+
+ if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address))
+ pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX;
+
+ status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount);
+ if (status != 0)
+ break;
+ }
+
+ return status;
+}
+
+/* pipe operations */
+static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe,
+ int buffer_length)
+{
+ struct ath6kl_urb_context *urb_context;
+ struct urb *urb;
+ int usb_status;
+
+ while (true) {
+ urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe);
+ if (urb_context == NULL)
+ break;
+
+ urb_context->skb = dev_alloc_skb(buffer_length);
+ if (urb_context->skb == NULL)
+ goto err_cleanup_urb;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL)
+ goto err_cleanup_urb;
+
+ usb_fill_bulk_urb(urb,
+ recv_pipe->ar_usb->udev,
+ recv_pipe->usb_pipe_handle,
+ urb_context->skb->data,
+ buffer_length,
+ ath6kl_usb_recv_complete, urb_context);
+
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n",
+ recv_pipe->logical_pipe_num,
+ recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
+ buffer_length, urb_context->skb);
+
+ usb_anchor_urb(urb, &recv_pipe->urb_submitted);
+ usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (usb_status) {
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "ath6kl usb : usb bulk recv failed %d\n",
+ usb_status);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto err_cleanup_urb;
+ }
+ usb_free_urb(urb);
+ }
+ return;
+
+err_cleanup_urb:
+ ath6kl_usb_cleanup_recv_urb(urb_context);
+ return;
+}
+
+static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
+{
+ int i;
+
+ for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
+ if (ar_usb->pipes[i].ar_usb != NULL)
+ usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
+ }
+
+ /*
+ * Flushing any pending I/O may schedule work this call will block
+ * until all scheduled work runs to completion.
+ */
+ flush_scheduled_work();
+}
+
+static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
+{
+ /*
+ * note: control pipe is no longer used
+ * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh =
+ * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2;
+ * ath6kl_usb_post_recv_transfers(&ar_usb->
+ * pipes[ATH6KL_USB_PIPE_RX_CTRL],
+ * ATH6KL_USB_RX_BUFFER_SIZE);
+ */
+
+ ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
+ ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
+ ATH6KL_USB_RX_BUFFER_SIZE);
+}
+
+/* hif usb rx/tx completion functions */
+static void ath6kl_usb_recv_complete(struct urb *urb)
+{
+ struct ath6kl_urb_context *urb_context = urb->context;
+ struct ath6kl_usb_pipe *pipe = urb_context->pipe;
+ struct sk_buff *skb = NULL;
+ int status = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__,
+ pipe->logical_pipe_num, urb->status, urb->actual_length,
+ urb);
+
+ if (urb->status != 0) {
+ status = -EIO;
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /*
+ * no need to spew these errors when device
+ * removed or urb killed due to driver shutdown
+ */
+ status = -ECANCELED;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n",
+ __func__, pipe->logical_pipe_num,
+ pipe->ep_address, urb->status);
+ break;
+ }
+ goto cleanup_recv_urb;
+ }
+
+ if (urb->actual_length == 0)
+ goto cleanup_recv_urb;
+
+ skb = urb_context->skb;
+
+ /* we are going to pass it up */
+ urb_context->skb = NULL;
+ skb_put(skb, urb->actual_length);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+
+cleanup_recv_urb:
+ ath6kl_usb_cleanup_recv_urb(urb_context);
+
+ if (status == 0 &&
+ pipe->urb_cnt >= pipe->urb_cnt_thresh) {
+ /* our free urbs are piling up, post more transfers */
+ ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE);
+ }
+}
+
+static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
+{
+ struct ath6kl_urb_context *urb_context = urb->context;
+ struct ath6kl_usb_pipe *pipe = urb_context->pipe;
+ struct sk_buff *skb;
+
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "%s: pipe: %d, stat:%d, len:%d\n",
+ __func__, pipe->logical_pipe_num, urb->status,
+ urb->actual_length);
+
+ if (urb->status != 0) {
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "%s: pipe: %d, failed:%d\n",
+ __func__, pipe->logical_pipe_num, urb->status);
+ }
+
+ skb = urb_context->skb;
+ urb_context->skb = NULL;
+ ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+}
+
+static void ath6kl_usb_io_comp_work(struct work_struct *work)
+{
+ struct ath6kl_usb_pipe *pipe = container_of(work,
+ struct ath6kl_usb_pipe,
+ io_complete_work);
+ struct ath6kl_usb *ar_usb;
+ struct sk_buff *skb;
+
+ ar_usb = pipe->ar_usb;
+
+ while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
+ if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) {
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "ath6kl usb xmit callback buf:0x%p\n", skb);
+ ath6kl_core_tx_complete(ar_usb->ar, skb);
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "ath6kl usb recv callback buf:0x%p\n", skb);
+ ath6kl_core_rx_complete(ar_usb->ar, skb,
+ pipe->logical_pipe_num);
+ }
+ }
+}
+
+#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
+#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
+
+static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
+{
+ ath6kl_usb_flush_all(ar_usb);
+
+ ath6kl_usb_cleanup_pipe_resources(ar_usb);
+
+ usb_set_intfdata(ar_usb->interface, NULL);
+
+ kfree(ar_usb->diag_cmd_buffer);
+ kfree(ar_usb->diag_resp_buffer);
+
+ kfree(ar_usb);
+}
+
+static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
+{
+ struct usb_device *dev = interface_to_usbdev(interface);
+ struct ath6kl_usb *ar_usb;
+ struct ath6kl_usb_pipe *pipe;
+ int status = 0;
+ int i;
+
+ ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
+ if (ar_usb == NULL)
+ goto fail_ath6kl_usb_create;
+
+ usb_set_intfdata(interface, ar_usb);
+ spin_lock_init(&(ar_usb->cs_lock));
+ ar_usb->udev = dev;
+ ar_usb->interface = interface;
+
+ for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
+ pipe = &ar_usb->pipes[i];
+ INIT_WORK(&pipe->io_complete_work,
+ ath6kl_usb_io_comp_work);
+ skb_queue_head_init(&pipe->io_comp_queue);
+ }
+
+ ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
+ if (ar_usb->diag_cmd_buffer == NULL) {
+ status = -ENOMEM;
+ goto fail_ath6kl_usb_create;
+ }
+
+ ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP,
+ GFP_KERNEL);
+ if (ar_usb->diag_resp_buffer == NULL) {
+ status = -ENOMEM;
+ goto fail_ath6kl_usb_create;
+ }
+
+ status = ath6kl_usb_setup_pipe_resources(ar_usb);
+
+fail_ath6kl_usb_create:
+ if (status != 0) {
+ ath6kl_usb_destroy(ar_usb);
+ ar_usb = NULL;
+ }
+ return ar_usb;
+}
+
+static void ath6kl_usb_device_detached(struct usb_interface *interface)
+{
+ struct ath6kl_usb *ar_usb;
+
+ ar_usb = usb_get_intfdata(interface);
+ if (ar_usb == NULL)
+ return;
+
+ ath6kl_stop_txrx(ar_usb->ar);
+
+ /* Delay to wait for the target to reboot */
+ mdelay(20);
+ ath6kl_core_cleanup(ar_usb->ar);
+ ath6kl_usb_destroy(ar_usb);
+}
+
+/* exported hif usb APIs for htc pipe */
+static void hif_start(struct ath6kl *ar)
+{
+ struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+ int i;
+
+ ath6kl_usb_start_recv_pipes(device);
+
+ /* set the TX resource avail threshold for each TX pipe */
+ for (i = ATH6KL_USB_PIPE_TX_CTRL;
+ i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) {
+ device->pipes[i].urb_cnt_thresh =
+ device->pipes[i].urb_alloc / 2;
+ }
+}
+
+static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID,
+ struct sk_buff *hdr_skb, struct sk_buff *skb)
+{
+ struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+ struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID];
+ struct ath6kl_urb_context *urb_context;
+ int usb_status, status = 0;
+ struct urb *urb;
+ u8 *data;
+ u32 len;
+
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n",
+ __func__, PipeID, skb);
+
+ urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
+
+ if (urb_context == NULL) {
+ /*
+ * TODO: it is possible to run out of urbs if
+ * 2 endpoints map to the same pipe ID
+ */
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "%s pipe:%d no urbs left. URB Cnt : %d\n",
+ __func__, PipeID, pipe->urb_cnt);
+ status = -ENOMEM;
+ goto fail_hif_send;
+ }
+
+ urb_context->skb = skb;
+
+ data = skb->data;
+ len = skb->len;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL) {
+ status = -ENOMEM;
+ ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
+ urb_context);
+ goto fail_hif_send;
+ }
+
+ usb_fill_bulk_urb(urb,
+ device->udev,
+ pipe->usb_pipe_handle,
+ data,
+ len,
+ ath6kl_usb_usb_transmit_complete, urb_context);
+
+ if ((len % pipe->max_packet_size) == 0) {
+ /* hit a max packet boundary on this pipe */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->ep_address, len);
+
+ usb_anchor_urb(urb, &pipe->urb_submitted);
+ usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (usb_status) {
+ ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+ "ath6kl usb : usb bulk transmit failed %d\n",
+ usb_status);
+ usb_unanchor_urb(urb);
+ ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
+ urb_context);
+ status = -EINVAL;
+ }
+ usb_free_urb(urb);
+
+fail_hif_send:
+ return status;
+}
+
+static void hif_stop(struct ath6kl *ar)
+{
+ struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+ ath6kl_usb_flush_all(device);
+}
+
+static void ath6kl_usb_get_default_pipe(struct ath6kl *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
+ *dl_pipe = ATH6KL_USB_PIPE_RX_CTRL;
+}
+
+static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ int status = 0;
+
+ switch (svc_id) {
+ case HTC_CTRL_RSVD_SVC:
+ case WMI_CONTROL_SVC:
+ *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
+ /* due to large control packets, shift to data pipe */
+ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+ break;
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+ /*
+ * Disable rxdata2 directly, it will be enabled
+ * if FW enable rxdata2
+ */
+ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+ break;
+ case WMI_DATA_VI_SVC:
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+ ar->fw_capabilities))
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+ else
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
+ /*
+ * Disable rxdata2 directly, it will be enabled
+ * if FW enable rxdata2
+ */
+ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+ break;
+ case WMI_DATA_VO_SVC:
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+ ar->fw_capabilities))
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+ else
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
+ /*
+ * Disable rxdata2 directly, it will be enabled
+ * if FW enable rxdata2
+ */
+ *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+ break;
+ default:
+ status = -EPERM;
+ break;
+ }
+
+ return status;
+}
+
+static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id)
+{
+ struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+ return device->pipes[pipe_id].urb_cnt;
+}
+
+static void hif_detach_htc(struct ath6kl *ar)
+{
+ struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+ ath6kl_usb_flush_all(device);
+}
+
+static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transfered */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_sndctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 1000);
+
+ if (ret < 0) {
+ ath6kl_warn("Failed to submit usb control message: %d\n", ret);
+ kfree(buf);
+ return ret;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transfered */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_rcvctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 2 * HZ);
+
+ if (ret < 0) {
+ ath6kl_warn("Failed to read usb control message: %d\n", ret);
+ kfree(buf);
+ return ret;
+ }
+
+ memcpy((u8 *) data, buf, size);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb,
+ u8 req_val, u8 *req_buf, u32 req_len,
+ u8 resp_val, u8 *resp_buf, u32 *resp_len)
+{
+ int ret;
+
+ /* send command */
+ ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0,
+ req_buf, req_len);
+
+ if (ret != 0)
+ return ret;
+
+ if (resp_buf == NULL) {
+ /* no expected response */
+ return ret;
+ }
+
+ /* get response */
+ ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0,
+ resp_buf, *resp_len);
+
+ return ret;
+}
+
+static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ struct ath6kl_usb_ctrl_diag_resp_read *resp;
+ struct ath6kl_usb_ctrl_diag_cmd_read *cmd;
+ u32 resp_len;
+ int ret;
+
+ cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ;
+ cmd->address = cpu_to_le32(address);
+ resp_len = sizeof(*resp);
+
+ ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *) cmd,
+ sizeof(struct ath6kl_usb_ctrl_diag_cmd_write),
+ ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
+ ar_usb->diag_resp_buffer, &resp_len);
+
+ if (ret) {
+ ath6kl_warn("diag read32 failed: %d\n", ret);
+ return ret;
+ }
+
+ resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
+ ar_usb->diag_resp_buffer;
+
+ *data = le32_to_cpu(resp->value);
+
+ return ret;
+}
+
+static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
+ int ret;
+
+ cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
+
+ memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write));
+ cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE);
+ cmd->address = cpu_to_le32(address);
+ cmd->value = data;
+
+ ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *) cmd,
+ sizeof(*cmd),
+ 0, NULL, NULL);
+ if (ret) {
+ ath6kl_warn("diag_write32 failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ int ret;
+
+ /* get response */
+ ret = ath6kl_usb_submit_ctrl_in(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
+ 0, 0, buf, len);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ struct ath6kl_usb *ar_usb = ar->hif_priv;
+ int ret;
+
+ /* send command */
+ ret = ath6kl_usb_submit_ctrl_out(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
+ 0, 0, buf, len);
+ if (ret) {
+ ath6kl_err("unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath6kl_usb_power_on(struct ath6kl *ar)
+{
+ hif_start(ar);
+ return 0;
+}
+
+static int ath6kl_usb_power_off(struct ath6kl *ar)
+{
+ hif_detach_htc(ar);
+ return 0;
+}
+
+static void ath6kl_usb_stop(struct ath6kl *ar)
+{
+ hif_stop(ar);
+}
+
+static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
+{
+ /*
+ * USB doesn't support it. Just return.
+ */
+ return;
+}
+
+static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ /*
+ * cfg80211 suspend/WOW currently not supported for USB.
+ */
+ return 0;
+}
+
+static int ath6kl_usb_resume(struct ath6kl *ar)
+{
+ /*
+ * cfg80211 resume currently not supported for USB.
+ */
+ return 0;
+}
+
+static const struct ath6kl_hif_ops ath6kl_usb_ops = {
+ .diag_read32 = ath6kl_usb_diag_read32,
+ .diag_write32 = ath6kl_usb_diag_write32,
+ .bmi_read = ath6kl_usb_bmi_read,
+ .bmi_write = ath6kl_usb_bmi_write,
+ .power_on = ath6kl_usb_power_on,
+ .power_off = ath6kl_usb_power_off,
+ .stop = ath6kl_usb_stop,
+ .pipe_send = ath6kl_usb_send,
+ .pipe_get_default = ath6kl_usb_get_default_pipe,
+ .pipe_map_service = ath6kl_usb_map_service_pipe,
+ .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
+ .cleanup_scatter = ath6kl_usb_cleanup_scatter,
+ .suspend = ath6kl_usb_suspend,
+ .resume = ath6kl_usb_resume,
+};
+
+/* ath6kl usb driver registered functions */
+static int ath6kl_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(interface);
+ struct ath6kl *ar;
+ struct ath6kl_usb *ar_usb = NULL;
+ int vendor_id, product_id;
+ int ret = 0;
+
+ usb_get_dev(dev);
+
+ vendor_id = le16_to_cpu(dev->descriptor.idVendor);
+ product_id = le16_to_cpu(dev->descriptor.idProduct);
+
+ ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id);
+ ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id);
+
+ if (interface->cur_altsetting)
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n",
+ interface->cur_altsetting->desc.bInterfaceNumber);
+
+
+ if (dev->speed == USB_SPEED_HIGH)
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n");
+ else
+ ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n");
+
+ ar_usb = ath6kl_usb_create(interface);
+
+ if (ar_usb == NULL) {
+ ret = -ENOMEM;
+ goto err_usb_put;
+ }
+
+ ar = ath6kl_core_create(&ar_usb->udev->dev);
+ if (ar == NULL) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_usb_destroy;
+ }
+
+ ar->hif_priv = ar_usb;
+ ar->hif_type = ATH6KL_HIF_TYPE_USB;
+ ar->hif_ops = &ath6kl_usb_ops;
+ ar->mbox_info.block_size = 16;
+ ar->bmi.max_data_size = 252;
+
+ ar_usb->ar = ar;
+
+ ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core: %d\n", ret);
+ goto err_core_free;
+ }
+
+ return ret;
+
+err_core_free:
+ ath6kl_core_destroy(ar);
+err_usb_destroy:
+ ath6kl_usb_destroy(ar_usb);
+err_usb_put:
+ usb_put_dev(dev);
+
+ return ret;
+}
+
+static void ath6kl_usb_remove(struct usb_interface *interface)
+{
+ usb_put_dev(interface_to_usbdev(interface));
+ ath6kl_usb_device_detached(interface);
+}
+
+#ifdef CONFIG_PM
+
+static int ath6kl_usb_pm_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct ath6kl_usb *device;
+ device = usb_get_intfdata(interface);
+
+ ath6kl_usb_flush_all(device);
+ return 0;
+}
+
+static int ath6kl_usb_pm_resume(struct usb_interface *interface)
+{
+ struct ath6kl_usb *device;
+ device = usb_get_intfdata(interface);
+
+ ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA],
+ ATH6KL_USB_RX_BUFFER_SIZE);
+ ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2],
+ ATH6KL_USB_RX_BUFFER_SIZE);
+
+ return 0;
+}
+
+#else
+
+#define ath6kl_usb_pm_suspend NULL
+#define ath6kl_usb_pm_resume NULL
+
+#endif
+
+/* table of devices that work with this driver */
+static struct usb_device_id ath6kl_usb_ids[] = {
+ {USB_DEVICE(0x0cf3, 0x9375)},
+ {USB_DEVICE(0x0cf3, 0x9374)},
+ { /* Terminating entry */ },
+};
+
+MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
+
+static struct usb_driver ath6kl_usb_driver = {
+ .name = "ath6kl_usb",
+ .probe = ath6kl_usb_probe,
+ .suspend = ath6kl_usb_pm_suspend,
+ .resume = ath6kl_usb_pm_resume,
+ .disconnect = ath6kl_usb_remove,
+ .id_table = ath6kl_usb_ids,
+ .supports_autosuspend = true,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ath6kl_usb_driver);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices");
+MODULE_LICENSE("Dual BSD/GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
new file mode 100644
index 000000000..b921005ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -0,0 +1,4166 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/in.h>
+#include "core.h"
+#include "debug.h"
+#include "testmode.h"
+#include "trace.h"
+#include "../regd.h"
+#include "../regd_common.h"
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
+
+static const s32 wmi_rate_tbl[][2] = {
+ /* {W/O SGI, with SGI} */
+ {1000, 1000},
+ {2000, 2000},
+ {5500, 5500},
+ {11000, 11000},
+ {6000, 6000},
+ {9000, 9000},
+ {12000, 12000},
+ {18000, 18000},
+ {24000, 24000},
+ {36000, 36000},
+ {48000, 48000},
+ {54000, 54000},
+ {6500, 7200},
+ {13000, 14400},
+ {19500, 21700},
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {58500, 65000},
+ {65000, 72200},
+ {13500, 15000},
+ {27000, 30000},
+ {40500, 45000},
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {121500, 135000},
+ {135000, 150000},
+ {0, 0}
+};
+
+static const s32 wmi_rate_tbl_mcs15[][2] = {
+ /* {W/O SGI, with SGI} */
+ {1000, 1000},
+ {2000, 2000},
+ {5500, 5500},
+ {11000, 11000},
+ {6000, 6000},
+ {9000, 9000},
+ {12000, 12000},
+ {18000, 18000},
+ {24000, 24000},
+ {36000, 36000},
+ {48000, 48000},
+ {54000, 54000},
+ {6500, 7200}, /* HT 20, MCS 0 */
+ {13000, 14400},
+ {19500, 21700},
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {58500, 65000},
+ {65000, 72200},
+ {13000, 14400}, /* HT 20, MCS 8 */
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {78000, 86700},
+ {104000, 115600},
+ {117000, 130000},
+ {130000, 144400}, /* HT 20, MCS 15 */
+ {13500, 15000}, /*HT 40, MCS 0 */
+ {27000, 30000},
+ {40500, 45000},
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {121500, 135000},
+ {135000, 150000},
+ {27000, 30000}, /*HT 40, MCS 8 */
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {162000, 180000},
+ {216000, 240000},
+ {243000, 270000},
+ {270000, 300000}, /*HT 40, MCS 15 */
+ {0, 0}
+};
+
+/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
+static const u8 up_to_ac[] = {
+ WMM_AC_BE,
+ WMM_AC_BK,
+ WMM_AC_BK,
+ WMM_AC_BE,
+ WMM_AC_VI,
+ WMM_AC_VI,
+ WMM_AC_VO,
+ WMM_AC_VO,
+};
+
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
+{
+ if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
+ return;
+
+ wmi->ep_id = ep_id;
+}
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
+{
+ return wmi->ep_id;
+}
+
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+ struct ath6kl_vif *vif, *found = NULL;
+
+ if (WARN_ON(if_idx > (ar->vif_max - 1)))
+ return NULL;
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->fw_vif_idx == if_idx) {
+ found = vif;
+ break;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return found;
+}
+
+/* Performs DIX to 802.3 encapsulation for transmit packets.
+ * Assumes the entire DIX header is contigous and that there is
+ * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
+ */
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr *eth_hdr;
+ size_t new_len;
+ __be16 type;
+ u8 *datap;
+ u16 size;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
+ if (skb_headroom(skb) < size)
+ return -ENOMEM;
+
+ eth_hdr = (struct ethhdr *) skb->data;
+ type = eth_hdr->h_proto;
+
+ if (!is_ethertype(be16_to_cpu(type))) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: pkt is already in 802.3 format\n", __func__);
+ return 0;
+ }
+
+ new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
+
+ skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ eth_hdr->h_proto = cpu_to_be16(new_len);
+
+ memcpy(datap, eth_hdr, sizeof(*eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
+ llc_hdr->dsap = 0xAA;
+ llc_hdr->ssap = 0xAA;
+ llc_hdr->cntl = 0x03;
+ llc_hdr->org_code[0] = 0x0;
+ llc_hdr->org_code[1] = 0x0;
+ llc_hdr->org_code[2] = 0x0;
+ llc_hdr->eth_type = type;
+
+ return 0;
+}
+
+static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 *version, void *tx_meta_info)
+{
+ struct wmi_tx_meta_v1 *v1;
+ struct wmi_tx_meta_v2 *v2;
+
+ if (WARN_ON(skb == NULL || version == NULL))
+ return -EINVAL;
+
+ switch (*version) {
+ case WMI_META_VERSION_1:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v1 = (struct wmi_tx_meta_v1 *) skb->data;
+ v1->pkt_id = 0;
+ v1->rate_plcy_id = 0;
+ *version = WMI_META_VERSION_1;
+ break;
+ case WMI_META_VERSION_2:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v2 = (struct wmi_tx_meta_v2 *) skb->data;
+ memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
+ sizeof(struct wmi_tx_meta_v2));
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, u32 flags,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info, u8 if_idx)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
+ return -EINVAL;
+
+ if (tx_meta_info) {
+ ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
+ if (ret)
+ return ret;
+ }
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *)skb->data;
+ memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
+
+ data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
+
+ if (flags & WMI_DATA_HDR_FLAGS_MORE)
+ data_hdr->info |= WMI_DATA_HDR_MORE;
+
+ if (flags & WMI_DATA_HDR_FLAGS_EOSP)
+ data_hdr->info3 |= cpu_to_le16(WMI_DATA_HDR_EOSP);
+
+ data_hdr->info2 |= cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
+ data_hdr->info3 |= cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
+
+ return 0;
+}
+
+u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
+{
+ struct iphdr *ip_hdr = (struct iphdr *) pkt;
+ u8 ip_pri;
+
+ /*
+ * Determine IPTOS priority
+ *
+ * IP-TOS - 8bits
+ * : DSCP(6-bits) ECN(2-bits)
+ * : DSCP - P2 P1 P0 X X X
+ * where (P2 P1 P0) form 802.1D
+ */
+ ip_pri = ip_hdr->tos >> 5;
+ ip_pri &= 0x7;
+
+ if ((layer2_pri & 0x7) > ip_pri)
+ return (u8) layer2_pri & 0x7;
+ else
+ return ip_pri;
+}
+
+u8 ath6kl_wmi_get_traffic_class(u8 user_priority)
+{
+ return up_to_ac[user_priority & 0x7];
+}
+
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb,
+ u32 layer2_priority, bool wmm_enabled,
+ u8 *ac)
+{
+ struct wmi_data_hdr *data_hdr;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct wmi_create_pstream_cmd cmd;
+ u32 meta_size, hdr_size;
+ u16 ip_type = IP_ETHERTYPE;
+ u8 stream_exist, usr_pri;
+ u8 traffic_class = WMM_AC_BE;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ data_hdr = (struct wmi_data_hdr *) datap;
+
+ meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
+
+ if (!wmm_enabled) {
+ /* If WMM is disabled all traffic goes as BE traffic */
+ usr_pri = 0;
+ } else {
+ hdr_size = sizeof(struct ethhdr);
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
+ sizeof(struct
+ wmi_data_hdr) +
+ meta_size + hdr_size);
+
+ if (llc_hdr->eth_type == htons(ip_type)) {
+ /*
+ * Extract the endpoint info from the TOS field
+ * in the IP header.
+ */
+ usr_pri =
+ ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr),
+ layer2_priority);
+ } else {
+ usr_pri = layer2_priority & 0x7;
+ }
+
+ /*
+ * Queue the EAPOL frames in the same WMM_AC_VO queue
+ * as that of management frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ usr_pri = WMI_VOICE_USER_PRIORITY;
+ }
+
+ /*
+ * workaround for WMM S5
+ *
+ * FIXME: wmi->traffic_class is always 100 so this test doesn't
+ * make sense
+ */
+ if ((wmi->traffic_class == WMM_AC_VI) &&
+ ((usr_pri == 5) || (usr_pri == 4)))
+ usr_pri = 1;
+
+ /* Convert user priority to traffic class */
+ traffic_class = up_to_ac[usr_pri & 0x7];
+
+ wmi_data_hdr_set_up(data_hdr, usr_pri);
+
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(stream_exist & (1 << traffic_class))) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.traffic_class = traffic_class;
+ cmd.user_pri = usr_pri;
+ cmd.inactivity_int =
+ cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
+ /* Implicit streams are created with TSID 0xFF */
+ cmd.tsid = WMI_IMPLICIT_PSTREAM;
+ ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
+ }
+
+ *ac = traffic_class;
+
+ return 0;
+}
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ieee80211_hdr_3addr *pwh, wh;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u32 hdr_size;
+ u8 *datap;
+ __le16 sub_type;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ pwh = (struct ieee80211_hdr_3addr *) datap;
+
+ sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
+
+ memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
+
+ /* Strip off the 802.11 header */
+ if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
+ sizeof(u32));
+ skb_pull(skb, hdr_size);
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
+ skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+ }
+
+ datap = skb->data;
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
+
+ memset(&eth_hdr, 0, sizeof(eth_hdr));
+ eth_hdr.h_proto = llc_hdr->eth_type;
+
+ switch ((le16_to_cpu(wh.frame_control)) &
+ (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+ case 0:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_TODS:
+ memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+ break;
+ }
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ skb_push(skb, sizeof(eth_hdr));
+
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+/*
+ * Performs 802.3 to DIX encapsulation for received packets.
+ * Assumes the entire 802.3 header is contigous.
+ */
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+
+ memcpy(&eth_hdr, datap, sizeof(eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
+ eth_hdr.h_proto = llc_hdr->eth_type;
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
+{
+ struct tx_complete_msg_v1 *msg_v1;
+ struct wmi_tx_complete_event *evt;
+ int index;
+ u16 size;
+
+ evt = (struct wmi_tx_complete_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
+ evt->num_msg, evt->msg_len, evt->msg_type);
+
+ for (index = 0; index < evt->num_msg; index++) {
+ size = sizeof(struct wmi_tx_complete_event) +
+ (index * sizeof(struct tx_complete_msg_v1));
+ msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
+ msg_v1->status, msg_v1->pkt_id,
+ msg_v1->rate_idx, msg_v1->ack_failures);
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
+ int len, struct ath6kl_vif *vif)
+{
+ struct wmi_remain_on_chnl_event *ev;
+ u32 freq;
+ u32 dur;
+ struct ieee80211_channel *chan;
+ struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_remain_on_chnl_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dur = le32_to_cpu(ev->duration);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
+ freq, dur);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
+ if (!chan) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
+ return -EINVAL;
+ }
+ id = vif->last_roc_id;
+ cfg80211_ready_on_channel(&vif->wdev, id, chan,
+ dur, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
+ u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_cancel_remain_on_chnl_event *ev;
+ u32 freq;
+ u32 dur;
+ struct ieee80211_channel *chan;
+ struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_cancel_remain_on_chnl_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dur = le32_to_cpu(ev->duration);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: freq=%u dur=%u status=%u\n",
+ freq, dur, ev->status);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
+ if (!chan) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
+ return -EINVAL;
+ }
+ if (vif->last_cancel_roc_id &&
+ vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+ id = vif->last_cancel_roc_id; /* event for cancel command */
+ else
+ id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+ vif->last_cancel_roc_id = 0;
+ cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_tx_status_event *ev;
+ u32 id;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_tx_status_event *) datap;
+ id = le32_to_cpu(ev->id);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
+ id, ev->ack_status);
+ if (wmi->last_mgmt_tx_frame) {
+ cfg80211_mgmt_tx_status(&vif->wdev, id,
+ wmi->last_mgmt_tx_frame,
+ wmi->last_mgmt_tx_frame_len,
+ !!ev->ack_status, GFP_ATOMIC);
+ kfree(wmi->last_mgmt_tx_frame);
+ wmi->last_mgmt_tx_frame = NULL;
+ wmi->last_mgmt_tx_frame_len = 0;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_p2p_rx_probe_req_event *ev;
+ u32 freq;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_rx_probe_req_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dlen = le16_to_cpu(ev->len);
+ if (datap + len < ev->data + dlen) {
+ ath6kl_err("invalid wmi_p2p_rx_probe_req_event: len=%d dlen=%u\n",
+ len, dlen);
+ return -EINVAL;
+ }
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "rx_probe_req: len=%u freq=%u probe_req_report=%d\n",
+ dlen, freq, vif->probe_req_report);
+
+ if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0);
+
+ return 0;
+}
+
+static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
+{
+ struct wmi_p2p_capabilities_event *ev;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_capabilities_event *) datap;
+ dlen = le16_to_cpu(ev->len);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_capab: len=%u\n", dlen);
+
+ return 0;
+}
+
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_rx_action_event *ev;
+ u32 freq;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_rx_action_event *) datap;
+ freq = le32_to_cpu(ev->freq);
+ dlen = le16_to_cpu(ev->len);
+ if (datap + len < ev->data + dlen) {
+ ath6kl_err("invalid wmi_rx_action_event: len=%d dlen=%u\n",
+ len, dlen);
+ return -EINVAL;
+ }
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0);
+
+ return 0;
+}
+
+static int ath6kl_wmi_p2p_info_event_rx(u8 *datap, int len)
+{
+ struct wmi_p2p_info_event *ev;
+ u32 flags;
+ u16 dlen;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ ev = (struct wmi_p2p_info_event *) datap;
+ flags = le32_to_cpu(ev->info_req_flags);
+ dlen = le16_to_cpu(ev->len);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: flags=%x len=%d\n", flags, dlen);
+
+ if (flags & P2P_FLAG_CAPABILITIES_REQ) {
+ struct wmi_p2p_capabilities *cap;
+ if (dlen < sizeof(*cap))
+ return -EINVAL;
+ cap = (struct wmi_p2p_capabilities *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: GO Power Save = %d\n",
+ cap->go_power_save);
+ }
+
+ if (flags & P2P_FLAG_MACADDR_REQ) {
+ struct wmi_p2p_macaddr *mac;
+ if (dlen < sizeof(*mac))
+ return -EINVAL;
+ mac = (struct wmi_p2p_macaddr *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: MAC Address = %pM\n",
+ mac->mac_addr);
+ }
+
+ if (flags & P2P_FLAG_HMODEL_REQ) {
+ struct wmi_p2p_hmodel *mod;
+ if (dlen < sizeof(*mod))
+ return -EINVAL;
+ mod = (struct wmi_p2p_hmodel *) ev->data;
+ ath6kl_dbg(ATH6KL_DBG_WMI, "p2p_info: P2P Model = %d (%s)\n",
+ mod->p2p_model,
+ mod->p2p_model ? "host" : "firmware");
+ }
+ return 0;
+}
+
+static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
+{
+ struct sk_buff *skb;
+
+ skb = ath6kl_buf_alloc(size);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, size);
+ if (size)
+ memset(skb->data, 0, size);
+
+ return skb;
+}
+
+/* Send a "simple" wmi command -- one with no arguments */
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_cmd_id cmd_id)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
+
+ if (len < sizeof(struct wmi_ready_event_2))
+ return -EINVAL;
+
+ ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
+ le32_to_cpu(ev->sw_version),
+ le32_to_cpu(ev->abi_version), ev->phy_cap);
+
+ return 0;
+}
+
+/*
+ * Mechanism to modify the roaming behavior in the firmware. The lower rssi
+ * at which the station has to roam can be passed with
+ * WMI_SET_LRSSI_SCAN_PARAMS. Subtract 96 from RSSI to get the signal level
+ * in dBm.
+ */
+int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+
+ cmd->info.params.lrssi_scan_period = cpu_to_le16(DEF_LRSSI_SCAN_PERIOD);
+ cmd->info.params.lrssi_scan_threshold = a_cpu_to_sle16(lrssi +
+ DEF_SCAN_FOR_ROAM_INTVL);
+ cmd->info.params.lrssi_roam_threshold = a_cpu_to_sle16(lrssi);
+ cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
+ cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
+
+ ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return 0;
+}
+
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+
+ memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+ cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_intvl)
+{
+ struct sk_buff *skb;
+ struct set_beacon_int_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_beacon_int_cmd *) skb->data;
+
+ cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
+{
+ struct sk_buff *skb;
+ struct set_dtim_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_dtim_cmd *) skb->data;
+
+ cmd->dtim_period = cpu_to_le32(dtim_period);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+
+ cmd->info.roam_mode = mode;
+ cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_connect_event *ev;
+ u8 *pie, *peie;
+
+ if (len < sizeof(struct wmi_connect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_connect_event *) datap;
+
+ if (vif->nw_type == AP_NETWORK) {
+ /* AP mode start/STA connected event */
+ struct net_device *dev = vif->ndev;
+ if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: freq %d bssid %pM (AP started)\n",
+ __func__, le16_to_cpu(ev->u.ap_bss.ch),
+ ev->u.ap_bss.bssid);
+ ath6kl_connect_ap_mode_bss(
+ vif, le16_to_cpu(ev->u.ap_bss.ch));
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: aid %u mac_addr %pM auth=%u keymgmt=%u cipher=%u apsd_info=%u (STA connected)\n",
+ __func__, ev->u.ap_sta.aid,
+ ev->u.ap_sta.mac_addr,
+ ev->u.ap_sta.auth,
+ ev->u.ap_sta.keymgmt,
+ le16_to_cpu(ev->u.ap_sta.cipher),
+ ev->u.ap_sta.apsd_info);
+
+ ath6kl_connect_ap_mode_sta(
+ vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+ ev->u.ap_sta.keymgmt,
+ le16_to_cpu(ev->u.ap_sta.cipher),
+ ev->u.ap_sta.auth, ev->assoc_req_len,
+ ev->assoc_info + ev->beacon_ie_len,
+ ev->u.ap_sta.apsd_info);
+ }
+ return 0;
+ }
+
+ /* STA/IBSS mode connection event */
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi event connect freq %d bssid %pM listen_intvl %d beacon_intvl %d type %d\n",
+ le16_to_cpu(ev->u.sta.ch), ev->u.sta.bssid,
+ le16_to_cpu(ev->u.sta.listen_intvl),
+ le16_to_cpu(ev->u.sta.beacon_intvl),
+ le32_to_cpu(ev->u.sta.nw_type));
+
+ /* Start of assoc rsp IEs */
+ pie = ev->assoc_info + ev->beacon_ie_len +
+ ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
+
+ /* End of assoc rsp IEs */
+ peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
+ ev->assoc_resp_len;
+
+ while (pie < peie) {
+ switch (*pie) {
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
+ pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
+ /* WMM OUT (00:50:F2) */
+ if (pie[1] > 5 &&
+ pie[6] == WMM_PARAM_OUI_SUBTYPE)
+ wmi->is_wmm_enabled = true;
+ }
+ break;
+ }
+
+ if (wmi->is_wmm_enabled)
+ break;
+
+ pie += pie[1] + 2;
+ }
+
+ ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
+ ev->u.sta.bssid,
+ le16_to_cpu(ev->u.sta.listen_intvl),
+ le16_to_cpu(ev->u.sta.beacon_intvl),
+ le32_to_cpu(ev->u.sta.nw_type),
+ ev->beacon_ie_len, ev->assoc_req_len,
+ ev->assoc_resp_len, ev->assoc_info);
+
+ return 0;
+}
+
+static struct country_code_to_enum_rd *
+ath6kl_regd_find_country(u16 countryCode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countryCode == countryCode)
+ return &allCountries[i];
+ }
+
+ return NULL;
+}
+
+static struct reg_dmn_pair_mapping *
+ath6kl_get_regpair(u16 regdmn)
+{
+ int i;
+
+ if (regdmn == NO_ENUMRD)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
+ if (regDomainPairs[i].reg_domain == regdmn)
+ return &regDomainPairs[i];
+ }
+
+ return NULL;
+}
+
+static struct country_code_to_enum_rd *
+ath6kl_regd_find_country_by_rd(u16 regdmn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].regDmnEnum == regdmn)
+ return &allCountries[i];
+ }
+
+ return NULL;
+}
+
+static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
+{
+ struct ath6kl_wmi_regdomain *ev;
+ struct country_code_to_enum_rd *country = NULL;
+ struct reg_dmn_pair_mapping *regpair = NULL;
+ char alpha2[2];
+ u32 reg_code;
+
+ ev = (struct ath6kl_wmi_regdomain *) datap;
+ reg_code = le32_to_cpu(ev->reg_code);
+
+ if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
+ country = ath6kl_regd_find_country((u16) reg_code);
+ } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
+ regpair = ath6kl_get_regpair((u16) reg_code);
+ country = ath6kl_regd_find_country_by_rd((u16) reg_code);
+ if (regpair)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
+ regpair->reg_domain);
+ else
+ ath6kl_warn("Regpair not found reg_code 0x%0x\n",
+ reg_code);
+ }
+
+ if (country && wmi->parent_dev->wiphy_registered) {
+ alpha2[0] = country->isoName[0];
+ alpha2[1] = country->isoName[1];
+
+ regulatory_hint(wmi->parent_dev->wiphy, alpha2);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
+ alpha2[0], alpha2[1]);
+ }
+}
+
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_disconnect_event *ev;
+ wmi->traffic_class = 100;
+
+ if (len < sizeof(struct wmi_disconnect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_disconnect_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi event disconnect proto_reason %d bssid %pM wmi_reason %d assoc_resp_len %d\n",
+ le16_to_cpu(ev->proto_reason_status), ev->bssid,
+ ev->disconn_reason, ev->assoc_resp_len);
+
+ wmi->is_wmm_enabled = false;
+
+ ath6kl_disconnect_event(vif, ev->disconn_reason,
+ ev->bssid, ev->assoc_resp_len, ev->assoc_info,
+ le16_to_cpu(ev->proto_reason_status));
+
+ return 0;
+}
+
+static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_peer_node_event *ev;
+
+ if (len < sizeof(struct wmi_peer_node_event))
+ return -EINVAL;
+
+ ev = (struct wmi_peer_node_event *) datap;
+
+ if (ev->event_code == PEER_NODE_JOIN_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+ else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_tkip_micerr_event *ev;
+
+ if (len < sizeof(struct wmi_tkip_micerr_event))
+ return -EINVAL;
+
+ ev = (struct wmi_tkip_micerr_event *) datap;
+
+ ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
+
+ return 0;
+}
+
+void ath6kl_wmi_sscan_timer(unsigned long ptr)
+{
+ struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+
+ cfg80211_sched_scan_results(vif->ar->wiphy);
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_bss_info_hdr2 *bih;
+ u8 *buf;
+ struct ieee80211_channel *channel;
+ struct ath6kl *ar = wmi->parent_dev;
+ struct cfg80211_bss *bss;
+
+ if (len <= sizeof(struct wmi_bss_info_hdr2))
+ return -EINVAL;
+
+ bih = (struct wmi_bss_info_hdr2 *) datap;
+ buf = datap + sizeof(struct wmi_bss_info_hdr2);
+ len -= sizeof(struct wmi_bss_info_hdr2);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "bss info evt - ch %u, snr %d, rssi %d, bssid \"%pM\" "
+ "frame_type=%d\n",
+ bih->ch, bih->snr, bih->snr - 95, bih->bssid,
+ bih->frame_type);
+
+ if (bih->frame_type != BEACON_FTYPE &&
+ bih->frame_type != PROBERESP_FTYPE)
+ return 0; /* Only update BSS table for now */
+
+ if (bih->frame_type == BEACON_FTYPE &&
+ test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
+ }
+
+ channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
+ if (channel == NULL)
+ return -EINVAL;
+
+ if (len < 8 + 2 + 2)
+ return -EINVAL;
+
+ if (bih->frame_type == BEACON_FTYPE &&
+ test_bit(CONNECTED, &vif->flags) &&
+ memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
+ const u8 *tim;
+ tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
+ len - 8 - 2 - 2);
+ if (tim && tim[1] >= 2) {
+ vif->assoc_bss_dtim_period = tim[3];
+ set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
+ }
+ }
+
+ bss = cfg80211_inform_bss(ar->wiphy, channel,
+ bih->frame_type == BEACON_FTYPE ?
+ CFG80211_BSS_FTYPE_BEACON :
+ CFG80211_BSS_FTYPE_PRESP,
+ bih->bssid, get_unaligned_le64((__le64 *)buf),
+ get_unaligned_le16(((__le16 *)buf) + 5),
+ get_unaligned_le16(((__le16 *)buf) + 4),
+ buf + 8 + 2 + 2, len - 8 - 2 - 2,
+ (bih->snr - 95) * 100, GFP_ATOMIC);
+ if (bss == NULL)
+ return -ENOMEM;
+ cfg80211_put_bss(ar->wiphy, bss);
+
+ /*
+ * Firmware doesn't return any event when scheduled scan has
+ * finished, so we need to use a timer to find out when there are
+ * no more results.
+ *
+ * The timer is started from the first bss info received, otherwise
+ * the timer would not ever fire if the scan interval is short
+ * enough.
+ */
+ if (test_bit(SCHED_SCANNING, &vif->flags) &&
+ !timer_pending(&vif->sched_scan_timer)) {
+ mod_timer(&vif->sched_scan_timer, jiffies +
+ msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
+ }
+
+ return 0;
+}
+
+/* Inactivity timeout of a fatpipe(pstream) at the target */
+static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_pstream_timeout_event *ev;
+
+ if (len < sizeof(struct wmi_pstream_timeout_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pstream_timeout_event *) datap;
+
+ /*
+ * When the pstream (fat pipe == AC) timesout, it means there were
+ * no thinStreams within this pstream & it got implicitly created
+ * due to data flow on this AC. We start the inactivity timer only
+ * for implicitly created pstream. Just reset the host state.
+ */
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[ev->traffic_class] = 0;
+ wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate inactivity to driver layer for this fatpipe (pstream) */
+ ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
+
+ return 0;
+}
+
+static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_bit_rate_reply *reply;
+ s32 rate;
+ u32 sgi, index;
+
+ if (len < sizeof(struct wmi_bit_rate_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_bit_rate_reply *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
+
+ if (reply->rate_index == (s8) RATE_AUTO) {
+ rate = RATE_AUTO;
+ } else {
+ index = reply->rate_index & 0x7f;
+ if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
+ return -EINVAL;
+
+ sgi = (reply->rate_index & 0x80) ? 1 : 0;
+ rate = wmi_rate_tbl[index][sgi];
+ }
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_test_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_tm_rx_event(wmi->parent_dev, datap, len);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_fix_rates_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_channel_list_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tx_pwr_reply *reply;
+
+ if (len < sizeof(struct wmi_tx_pwr_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_tx_pwr_reply *) datap;
+ ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
+
+ return 0;
+}
+
+static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_get_keepalive_cmd))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_scan_complete_event *ev;
+
+ ev = (struct wmi_scan_complete_event *) datap;
+
+ ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
+ wmi->is_probe_ssid = false;
+
+ return 0;
+}
+
+static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
+ int len, struct ath6kl_vif *vif)
+{
+ struct wmi_neighbor_report_event *ev;
+ u8 i;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+ ev = (struct wmi_neighbor_report_event *) datap;
+ if (sizeof(*ev) + ev->num_neighbors * sizeof(struct wmi_neighbor_info)
+ > len) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "truncated neighbor event (num=%d len=%d)\n",
+ ev->num_neighbors, len);
+ return -EINVAL;
+ }
+ for (i = 0; i < ev->num_neighbors; i++) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
+ i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
+ ev->neighbor[i].bss_flags);
+ cfg80211_pmksa_candidate_notify(vif->ndev, i,
+ ev->neighbor[i].bssid,
+ !!(ev->neighbor[i].bss_flags &
+ WMI_PREAUTH_CAPABLE_BSS),
+ GFP_ATOMIC);
+ }
+
+ return 0;
+}
+
+/*
+ * Target is reporting a programming error. This is for
+ * developer aid only. Target only checks a few common violations
+ * and it is responsibility of host to do all error checking.
+ * Behavior of target after wmi error event is undefined.
+ * A reset is recommended.
+ */
+static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ const char *type = "unknown error";
+ struct wmi_cmd_error_event *ev;
+ ev = (struct wmi_cmd_error_event *) datap;
+
+ switch (ev->err_code) {
+ case INVALID_PARAM:
+ type = "invalid parameter";
+ break;
+ case ILLEGAL_STATE:
+ type = "invalid state";
+ break;
+ case INTERNAL_ERROR:
+ type = "internal error";
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
+ ev->cmd_id, type);
+
+ return 0;
+}
+
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ ath6kl_tgt_stats_event(vif, datap, len);
+
+ return 0;
+}
+
+static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi < sq_thresh->upper_threshold[index]) {
+ threshold = (u8) sq_thresh->upper_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi > sq_thresh->lower_threshold[index]) {
+ threshold = (u8) sq_thresh->lower_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
+ struct wmi_rssi_threshold_params_cmd *rssi_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_rssi_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
+ memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_rssi_threshold_event *reply;
+ struct wmi_rssi_threshold_params_cmd cmd;
+ struct sq_threshold_params *sq_thresh;
+ enum wmi_rssi_threshold_val new_threshold;
+ u8 upper_rssi_threshold, lower_rssi_threshold;
+ s16 rssi;
+ int ret;
+
+ if (len < sizeof(struct wmi_rssi_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_rssi_threshold_event *) datap;
+ new_threshold = (enum wmi_rssi_threshold_val) reply->range;
+ rssi = a_sle16_to_cpu(reply->rssi);
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (rssi < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper rssi threshold event: %d\n",
+ rssi);
+ } else if ((rssi < sq_thresh->upper_threshold[1]) &&
+ (rssi >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[2]) &&
+ (rssi >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[3]) &&
+ (rssi >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[4]) &&
+ (rssi >= sq_thresh->upper_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[5]) &&
+ (rssi >= sq_thresh->upper_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
+ } else if (rssi >= sq_thresh->upper_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (rssi > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower rssi threshold event: %d %d\n",
+ rssi, sq_thresh->lower_threshold[0]);
+ } else if ((rssi > sq_thresh->lower_threshold[1]) &&
+ (rssi <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[2]) &&
+ (rssi <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[3]) &&
+ (rssi <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[4]) &&
+ (rssi <= sq_thresh->lower_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[5]) &&
+ (rssi <= sq_thresh->lower_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
+ } else if (rssi <= sq_thresh->lower_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
+ cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure rssi thresholds\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_cac_event *reply;
+ struct ieee80211_tspec_ie *ts;
+ u16 active_tsids, tsinfo;
+ u8 tsid, index;
+ u8 ts_id;
+
+ if (len < sizeof(struct wmi_cac_event))
+ return -EINVAL;
+
+ reply = (struct wmi_cac_event *) datap;
+
+ if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
+ (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK;
+
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, tsid);
+ } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
+ /*
+ * Following assumes that there is only one outstanding
+ * ADDTS request when this event is received
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ for (index = 0; index < sizeof(active_tsids) * 8; index++) {
+ if ((active_tsids >> index) & 1)
+ break;
+ }
+ if (index < (sizeof(active_tsids) * 8))
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, index);
+ }
+
+ /*
+ * Clear active tsids and Add missing handling
+ * for delete qos stream from AP
+ */
+ else if (reply->cac_indication == CAC_INDICATION_DELETE) {
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
+ false);
+ wmi->fat_pipe_exist &= ~(1 << reply->ac);
+ }
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_txe_notify_event *ev;
+ u32 rate, pkts;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ if (vif->sme_state != SME_CONNECTED)
+ return -ENOTCONN;
+
+ ev = (struct wmi_txe_notify_event *) datap;
+ rate = le32_to_cpu(ev->rate);
+ pkts = le32_to_cpu(ev->pkts);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
+ vif->bssid, rate, pkts, vif->txe_intvl);
+
+ cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
+ rate, vif->txe_intvl, GFP_KERNEL);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct sk_buff *skb;
+ struct wmi_txe_notify_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_txe_notify_cmd *) skb->data;
+ cmd->rate = cpu_to_le32(rate);
+ cmd->pkts = cpu_to_le32(pkts);
+ cmd->intvl = cpu_to_le32(intvl);
+
+ return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi)
+{
+ struct sk_buff *skb;
+ struct wmi_set_rssi_filter_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
+ cmd->rssi = rssi;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
+ struct wmi_snr_threshold_params_cmd *snr_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_snr_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
+ memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_snr_threshold_event *reply;
+ struct sq_threshold_params *sq_thresh;
+ struct wmi_snr_threshold_params_cmd cmd;
+ enum wmi_snr_threshold_val new_threshold;
+ u8 upper_snr_threshold, lower_snr_threshold;
+ s16 snr;
+ int ret;
+
+ if (len < sizeof(struct wmi_snr_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_snr_threshold_event *) datap;
+
+ new_threshold = (enum wmi_snr_threshold_val) reply->range;
+ snr = reply->snr;
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target.
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (snr < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper snr threshold event: %d\n",
+ snr);
+ } else if ((snr < sq_thresh->upper_threshold[1]) &&
+ (snr >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[2]) &&
+ (snr >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[3]) &&
+ (snr >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
+ } else if (snr >= sq_thresh->upper_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (snr > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower snr threshold event: %d\n",
+ sq_thresh->lower_threshold[0]);
+ } else if ((snr > sq_thresh->lower_threshold[1]) &&
+ (snr <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD4_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[2]) &&
+ (snr <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD3_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[3]) &&
+ (snr <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD2_BELOW;
+ } else if (snr <= sq_thresh->lower_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = upper_snr_threshold;
+ cmd.thresh_below1_val = lower_snr_threshold;
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "snr: %d, threshold: %d, lower: %d, upper: %d\n",
+ snr, new_threshold,
+ lower_snr_threshold, upper_snr_threshold);
+
+ ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure snr threshold\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ u16 ap_info_entry_size;
+ struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
+ struct wmi_ap_info_v1 *ap_info_v1;
+ u8 index;
+
+ if (len < sizeof(struct wmi_aplist_event) ||
+ ev->ap_list_ver != APLIST_VER1)
+ return -EINVAL;
+
+ ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
+ ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "number of APs in aplist event: %d\n", ev->num_ap);
+
+ if (len < (int) (sizeof(struct wmi_aplist_event) +
+ (ev->num_ap - 1) * ap_info_entry_size))
+ return -EINVAL;
+
+ /* AP list version 1 contents */
+ for (index = 0; index < ev->num_ap; index++) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
+ index, ap_info_v1->bssid, ap_info_v1->channel);
+ ap_info_v1++;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum htc_endpoint_id ep_id = wmi->ep_id;
+ int ret;
+ u16 info1;
+
+ if (WARN_ON(skb == NULL ||
+ (if_idx > (wmi->parent_dev->vif_max - 1)))) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
+ cmd_id, skb->len, sync_flag);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi tx ",
+ skb->data, skb->len);
+
+ if (sync_flag >= END_WMIFLAG) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all data currently queued is transmitted before
+ * the cmd execution. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi, if_idx);
+ }
+
+ skb_push(skb, sizeof(struct wmi_cmd_hdr));
+
+ cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
+ info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+ cmd_hdr->info1 = cpu_to_le16(info1);
+
+ /* Only for OPT_TX_CMD, use BE endpoint. */
+ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
+ ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
+ false, false, 0, NULL, if_idx);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
+ }
+
+ ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all new data queued waits for the command to
+ * execute. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi, if_idx);
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype)
+{
+ struct sk_buff *skb;
+ struct wmi_connect_cmd *cc;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "wmi connect bssid %pM freq %d flags 0x%x ssid_len %d "
+ "type %d dot11_auth %d auth %d pairwise %d group %d\n",
+ bssid, channel, ctrl_flags, ssid_len, nw_type,
+ dot11_auth_mode, auth_mode, pairwise_crypto, group_crypto);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI, NULL, "ssid ", ssid, ssid_len);
+
+ wmi->traffic_class = 100;
+
+ if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
+ return -EINVAL;
+
+ if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_connect_cmd *) skb->data;
+
+ if (ssid_len)
+ memcpy(cc->ssid, ssid, ssid_len);
+
+ cc->ssid_len = ssid_len;
+ cc->nw_type = nw_type;
+ cc->dot11_auth_mode = dot11_auth_mode;
+ cc->auth_mode = auth_mode;
+ cc->prwise_crypto_type = pairwise_crypto;
+ cc->prwise_crypto_len = pairwise_crypto_len;
+ cc->grp_crypto_type = group_crypto;
+ cc->grp_crypto_len = group_crypto_len;
+ cc->ch = cpu_to_le16(channel);
+ cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+ cc->nw_subtype = nw_subtype;
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel)
+{
+ struct sk_buff *skb;
+ struct wmi_reconnect_cmd *cc;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi reconnect bssid %pM freq %d\n",
+ bssid, channel);
+
+ wmi->traffic_class = 100;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_reconnect_cmd *) skb->data;
+ cc->channel = cpu_to_le16(channel);
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi disconnect\n");
+
+ wmi->traffic_class = 100;
+
+ /* Disconnect command does not need to do a SYNC before. */
+ ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time,
+ u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list)
+{
+ struct sk_buff *skb;
+ struct wmi_start_scan_cmd *sc;
+ s8 size;
+ int i, ret;
+
+ size = sizeof(struct wmi_start_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_start_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->num_ch = num_chan;
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+/*
+ * beginscan supports (compared to old startscan) P2P mgmt operations using
+ * station interface, send additional information like supported rates to
+ * advertise and xmit rates for probe requests
+ */
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
+{
+ struct ieee80211_supported_band *sband;
+ struct sk_buff *skb;
+ struct wmi_begin_scan_cmd *sc;
+ s8 size, *supp_rates;
+ int i, band, ret;
+ struct ath6kl *ar = wmi->parent_dev;
+ int num_rates;
+ u32 ratemask;
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ return ath6kl_wmi_startscan_cmd(wmi, if_idx,
+ scan_type, force_fgscan,
+ is_legacy, home_dwell_time,
+ force_scan_interval,
+ num_chan, ch_list);
+ }
+
+ size = sizeof(struct wmi_begin_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_begin_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->no_cck = cpu_to_le32(no_cck);
+ sc->num_ch = num_chan;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = ar->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ if (WARN_ON(band >= ATH6KL_NUM_BANDS))
+ break;
+
+ ratemask = rates[band];
+ supp_rates = sc->supp_rates[band].rates;
+ num_rates = 0;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & ratemask) == 0)
+ continue; /* skip rate */
+ supp_rates[num_rates++] =
+ (u8) (sband->bitrates[i].bitrate / 5);
+ }
+ sc->supp_rates[band].nrates = num_rates;
+ }
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable)
+{
+ struct sk_buff *skb;
+ struct wmi_enable_sched_scan_cmd *sc;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s scheduled scan on vif %d\n",
+ enable ? "enabling" : "disabling", if_idx);
+ sc = (struct wmi_enable_sched_scan_cmd *) skb->data;
+ sc->enable = enable ? 1 : 0;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_ENABLE_SCHED_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+ u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_scan_params_cmd *sc;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_scan_params_cmd *) skb->data;
+ sc->fg_start_period = cpu_to_le16(fg_start_sec);
+ sc->fg_end_period = cpu_to_le16(fg_end_sec);
+ sc->bg_period = cpu_to_le16(bg_sec);
+ sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec);
+ sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec);
+ sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec);
+ sc->short_scan_ratio = short_scan_ratio;
+ sc->scan_ctrl_flags = scan_ctrl_flag;
+ sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
+ sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
+{
+ struct sk_buff *skb;
+ struct wmi_bss_filter_cmd *cmd;
+ int ret;
+
+ if (filter >= LAST_BSS_FILTER)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_filter_cmd *) skb->data;
+ cmd->bss_filter = filter;
+ cmd->ie_mask = cpu_to_le32(ie_mask);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_probed_ssid_cmd *cmd;
+ int ret;
+
+ if (index >= MAX_PROBED_SSIDS)
+ return -EINVAL;
+
+ if (ssid_len > sizeof(cmd->ssid))
+ return -EINVAL;
+
+ if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0))
+ return -EINVAL;
+
+ if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len)
+ return -EINVAL;
+
+ if (flag & SPECIFIC_SSID_FLAG)
+ wmi->is_probe_ssid = true;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probed_ssid_cmd *) skb->data;
+ cmd->entry_index = index;
+ cmd->flag = flag;
+ cmd->ssid_len = ssid_len;
+ memcpy(cmd->ssid, ssid, ssid_len);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
+ u16 listen_beacons)
+{
+ struct sk_buff *skb;
+ struct wmi_listen_int_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_listen_int_cmd *) skb->data;
+ cmd->listen_intvl = cpu_to_le16(listen_interval);
+ cmd->num_beacons = cpu_to_le16(listen_beacons);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
+ u16 bmiss_time, u16 num_beacons)
+{
+ struct sk_buff *skb;
+ struct wmi_bmiss_time_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bmiss_time_cmd *) skb->data;
+ cmd->bmiss_time = cpu_to_le16(bmiss_time);
+ cmd->num_beacons = cpu_to_le16(num_beacons);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BMISS_TIME_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_power_mode_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_power_mode_cmd *) skb->data;
+ cmd->pwr_mode = pwr_mode;
+ wmi->pwr_mode = pwr_mode;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_power_params_cmd *pm;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
+ if (!skb)
+ return -ENOMEM;
+
+ pm = (struct wmi_power_params_cmd *)skb->data;
+ pm->idle_period = cpu_to_le16(idle_period);
+ pm->pspoll_number = cpu_to_le16(ps_poll_num);
+ pm->dtim_policy = cpu_to_le16(dtim_policy);
+ pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy);
+ pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
+ pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
+{
+ struct sk_buff *skb;
+ struct wmi_disc_timeout_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_disc_timeout_cmd *) skb->data;
+ cmd->discon_timeout = timeout;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
+ return ret;
+}
+
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag)
+{
+ struct sk_buff *skb;
+ struct wmi_add_cipher_key_cmd *cmd;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "addkey cmd: key_index=%u key_type=%d key_usage=%d key_len=%d key_op_ctrl=%d\n",
+ key_index, key_type, key_usage, key_len, key_op_ctrl);
+
+ if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
+ (key_material == NULL) || key_rsc_len > 8)
+ return -EINVAL;
+
+ if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+ cmd->key_type = key_type;
+ cmd->key_usage = key_usage;
+ cmd->key_len = key_len;
+ memcpy(cmd->key, key_material, key_len);
+
+ if (key_rsc != NULL)
+ memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
+
+ cmd->key_op_ctrl = key_op_ctrl;
+
+ if (mac_addr)
+ memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
+{
+ struct sk_buff *skb;
+ struct wmi_add_krk_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_krk_cmd *) skb->data;
+ memcpy(cmd->krk, krk, WMI_KRK_LEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_cipher_key_cmd *cmd;
+ int ret;
+
+ if (key_index > WMI_MAX_KEY_INDEX)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
+ const u8 *pmkid, bool set)
+{
+ struct sk_buff *skb;
+ struct wmi_setpmkid_cmd *cmd;
+ int ret;
+
+ if (bssid == NULL)
+ return -EINVAL;
+
+ if (set && pmkid == NULL)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_setpmkid_cmd *) skb->data;
+ memcpy(cmd->bssid, bssid, ETH_ALEN);
+ if (set) {
+ memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_ENABLE;
+ } else {
+ memset(cmd->pmkid, 0, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_DISABLE;
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, u8 if_idx)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *) skb->data;
+ data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
+
+ ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ return ret;
+}
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
+{
+ struct sk_buff *skb;
+ struct wmi_sync_cmd *cmd;
+ struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC];
+ enum htc_endpoint_id ep_id;
+ u8 index, num_pri_streams = 0;
+ int ret = 0;
+
+ memset(data_sync_bufs, 0, sizeof(data_sync_bufs));
+
+ spin_lock_bh(&wmi->lock);
+
+ for (index = 0; index < WMM_NUM_AC; index++) {
+ if (wmi->fat_pipe_exist & (1 << index)) {
+ num_pri_streams++;
+ data_sync_bufs[num_pri_streams - 1].traffic_class =
+ index;
+ }
+ }
+
+ spin_unlock_bh(&wmi->lock);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sync_cmd *) skb->data;
+
+ /*
+ * In the SYNC cmd sent on the control Ep, send a bitmap
+ * of the data eps on which the Data Sync will be sent
+ */
+ cmd->data_sync_map = wmi->fat_pipe_exist;
+
+ for (index = 0; index < num_pri_streams; index++) {
+ data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
+ if (data_sync_bufs[index].skb == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /*
+ * If buffer allocation for any of the dataSync fails,
+ * then do not send the Synchronize cmd on the control ep
+ */
+ if (ret)
+ goto free_cmd_skb;
+
+ /*
+ * Send sync cmd followed by sync data messages on all
+ * endpoints being used
+ */
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ if (ret)
+ goto free_data_skb;
+
+ for (index = 0; index < num_pri_streams; index++) {
+ if (WARN_ON(!data_sync_bufs[index].skb))
+ goto free_data_skb;
+
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
+ data_sync_bufs[index].
+ traffic_class);
+ ret =
+ ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
+ ep_id, if_idx);
+
+ data_sync_bufs[index].skb = NULL;
+
+ if (ret)
+ goto free_data_skb;
+ }
+
+ return 0;
+
+free_cmd_skb:
+ /* free up any resources left over (possibly due to an error) */
+ dev_kfree_skb(skb);
+
+free_data_skb:
+ for (index = 0; index < num_pri_streams; index++)
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
+
+ return ret;
+}
+
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
+ struct wmi_create_pstream_cmd *params)
+{
+ struct sk_buff *skb;
+ struct wmi_create_pstream_cmd *cmd;
+ u8 fatpipe_exist_for_ac = 0;
+ s32 min_phy = 0;
+ s32 nominal_phy = 0;
+ int ret;
+
+ if (!((params->user_pri < 8) &&
+ (params->user_pri <= 0x7) &&
+ (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
+ (params->traffic_direc == UPLINK_TRAFFIC ||
+ params->traffic_direc == DNLINK_TRAFFIC ||
+ params->traffic_direc == BIDIR_TRAFFIC) &&
+ (params->traffic_type == TRAFFIC_TYPE_APERIODIC ||
+ params->traffic_type == TRAFFIC_TYPE_PERIODIC) &&
+ (params->voice_psc_cap == DISABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_ALL_AC) &&
+ (params->tsid == WMI_IMPLICIT_PSTREAM ||
+ params->tsid <= WMI_MAX_THINSTREAM))) {
+ return -EINVAL;
+ }
+
+ /*
+ * Check nominal PHY rate is >= minimalPHY,
+ * so that DUT can allow TSRS IE
+ */
+
+ /* Get the physical rate (units of bps) */
+ min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000);
+
+ /* Check minimal phy < nominal phy rate */
+ if (params->nominal_phy >= min_phy) {
+ /* unit of 500 kbps */
+ nominal_phy = (params->nominal_phy * 1000) / 500;
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n",
+ min_phy, nominal_phy);
+
+ params->nominal_phy = nominal_phy;
+ } else {
+ params->nominal_phy = 0;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending create_pstream_cmd: ac=%d tsid:%d\n",
+ params->traffic_class, params->tsid);
+
+ cmd = (struct wmi_create_pstream_cmd *) skb->data;
+ memcpy(cmd, params, sizeof(*cmd));
+
+ /* This is an implicitly created Fat pipe */
+ if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) {
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ } else {
+ /* explicitly created thin stream within a fat pipe */
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->stream_exist_for_ac[params->traffic_class] |=
+ (1 << params->tsid);
+ /*
+ * If a thinstream becomes active, the fat pipe automatically
+ * becomes active
+ */
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ }
+
+ /*
+ * Indicate activty change to driver layer only if this is the
+ * first TSID to get created in this AC explicitly or an implicit
+ * fat pipe is getting created.
+ */
+ if (!fatpipe_exist_for_ac)
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ params->traffic_class, true);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_pstream_cmd *cmd;
+ u16 active_tsids = 0;
+ int ret;
+
+ if (traffic_class > 3) {
+ ath6kl_err("invalid traffic class: %d\n", traffic_class);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_pstream_cmd *) skb->data;
+ cmd->traffic_class = traffic_class;
+ cmd->tsid = tsid;
+
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(active_tsids & (1 << tsid))) {
+ dev_kfree_skb(skb);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSID %d doesn't exist for traffic class: %d\n",
+ tsid, traffic_class);
+ return -ENODATA;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
+ traffic_class, tsid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
+ SYNC_BEFORE_WMIFLAG);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ traffic_class, false);
+ wmi->fat_pipe_exist &= ~(1 << traffic_class);
+ }
+
+ return ret;
+}
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
+ __be32 ips0, __be32 ips1)
+{
+ struct sk_buff *skb;
+ struct wmi_set_ip_cmd *cmd;
+ int ret;
+
+ /* Multicast address are not valid */
+ if (ipv4_is_multicast(ips0) ||
+ ipv4_is_multicast(ips1))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_ip_cmd *) skb->data;
+ cmd->ips[0] = ips0;
+ cmd->ips[1] = ips1;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
+{
+ u16 active_tsids;
+ u8 stream_exist;
+ int i;
+
+ /*
+ * Relinquish credits from all implicitly created pstreams
+ * since when we go to sleep. If user created explicit
+ * thinstreams exists with in a fatpipe leave them intact
+ * for the user to delete.
+ */
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (stream_exist & (1 << i)) {
+ /*
+ * FIXME: Is this lock & unlock inside
+ * for loop correct? may need rework.
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[i];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * If there are no user created thin streams
+ * delete the fatpipe
+ */
+ if (!active_tsids) {
+ stream_exist &= ~(1 << i);
+ /*
+ * Indicate inactivity to driver layer for
+ * this fatpipe (pstream)
+ */
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ i, false);
+ }
+ }
+ }
+
+ /* FIXME: Can we do this assignment without locking ? */
+ spin_lock_bh(&wmi->lock);
+ wmi->fat_pipe_exist = stream_exist;
+ spin_unlock_bh(&wmi->lock);
+}
+
+static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u64 mcsrate, ratemask[ATH6KL_NUM_BANDS];
+ struct wmi_set_tx_select_rates64_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+
+ /* only check 2.4 and 5 GHz bands, skip the rest */
+ for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].ht_mcs[1];
+ mcsrate <<= 8;
+ mcsrate |= mask->control[band].ht_mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 28;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 64 bit: 2.4:%llx 5:%llx\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u32 mcsrate, ratemask[ATH6KL_NUM_BANDS];
+ struct wmi_set_tx_select_rates32_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+
+ /* only check 2.4 and 5 GHz bands, skip the rest */
+ for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].ht_mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 20;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 32 bit: 2.4:%x 5:%x\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+ ar->fw_capabilities))
+ return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
+ else
+ return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_set_host_sleep_mode_cmd *cmd;
+ int ret;
+
+ if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+ (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+ ath6kl_err("invalid host sleep mode: %d\n", host_mode);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+ if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+ ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+ cmd->asleep = cpu_to_le32(1);
+ } else {
+ cmd->awake = cpu_to_le32(1);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+/* This command has zero length payload */
+static int ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(struct wmi *wmi,
+ struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+
+ set_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+ wake_up(&ar->event_wq);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wow_mode_cmd *cmd;
+ int ret;
+
+ if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+ wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+ ath6kl_err("invalid wow mode: %d\n", wow_mode);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+ cmd->enable_wow = cpu_to_le32(wow_mode);
+ cmd->filter = cpu_to_le32(filter);
+ cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, const u8 *filter,
+ const u8 *mask)
+{
+ struct sk_buff *skb;
+ struct wmi_add_wow_pattern_cmd *cmd;
+ u16 size;
+ u8 *filter_mask;
+ int ret;
+
+ /*
+ * Allocate additional memory in the buffer to hold
+ * filter and mask value, which is twice of filter_size.
+ */
+ size = sizeof(*cmd) + (2 * filter_size);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = list_id;
+ cmd->filter_size = filter_size;
+ cmd->filter_offset = filter_offset;
+
+ memcpy(cmd->filter, filter, filter_size);
+
+ filter_mask = (u8 *) (cmd->filter + filter_size);
+ memcpy(filter_mask, mask, filter_size);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id)
+{
+ struct sk_buff *skb;
+ struct wmi_del_wow_pattern_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = cpu_to_le16(list_id);
+ cmd->filter_id = cpu_to_le16(filter_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
+ enum wmix_command_id cmd_id,
+ enum wmi_sync_flag sync_flag)
+{
+ struct wmix_cmd_hdr *cmd_hdr;
+ int ret;
+
+ skb_push(skb, sizeof(struct wmix_cmd_hdr));
+
+ cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source)
+{
+ struct sk_buff *skb;
+ struct wmix_hb_challenge_resp_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
+ cmd->cookie = cpu_to_le32(cookie);
+ cmd->source = cpu_to_le32(source);
+
+ ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
+{
+ struct ath6kl_wmix_dbglog_cfg_module_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath6kl_wmix_dbglog_cfg_module_cmd *) skb->data;
+ cmd->valid = cpu_to_le32(valid);
+ cmd->config = cpu_to_le32(config);
+
+ ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_DBGLOG_CFG_MODULE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
+{
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
+}
+
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
+{
+ struct sk_buff *skb;
+ struct wmi_set_tx_pwr_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
+ cmd->dbM = dbM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
+{
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
+}
+
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
+{
+ return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
+}
+
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+ u8 preamble_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_set_lpreamble_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
+ cmd->status = status;
+ cmd->preamble_policy = preamble_policy;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
+{
+ struct sk_buff *skb;
+ struct wmi_set_rts_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_rts_cmd *) skb->data;
+ cmd->threshold = cpu_to_le16(threshold);
+
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wmm_txop_cmd *cmd;
+ int ret;
+
+ if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
+ cmd->txop_enable = cfg;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl)
+{
+ struct sk_buff *skb;
+ struct wmi_set_keepalive_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_keepalive_cmd *) skb->data;
+ cmd->keep_alive_intvl = keep_alive_intvl;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
+ return ret;
+}
+
+int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
+ enum ieee80211_band band,
+ struct ath6kl_htcap *htcap)
+{
+ struct sk_buff *skb;
+ struct wmi_set_htcap_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_htcap_cmd *) skb->data;
+
+ /*
+ * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+ * this will be changed in firmware. If at all there is any change in
+ * band value, the host needs to be fixed.
+ */
+ cmd->band = band;
+ cmd->ht_enable = !!htcap->ht_enable;
+ cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20);
+ cmd->ht40_supported =
+ !!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40);
+ cmd->intolerant_40mhz =
+ !!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT);
+ cmd->max_ampdu_len_exp = htcap->ampdu_factor;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n",
+ cmd->band, cmd->ht_enable, cmd->ht40_supported,
+ cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz,
+ cmd->max_ampdu_len_exp);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(len);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb->data, buf, len);
+
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on)
+{
+ struct sk_buff *skb;
+ struct wmi_mcast_filter_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mcast_filter_cmd *) skb->data;
+ cmd->mcast_all_enable = mc_all_on;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_MCAST_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
+ u8 *filter, bool add_filter)
+{
+ struct sk_buff *skb;
+ struct wmi_mcast_filter_add_del_cmd *cmd;
+ int ret;
+
+ if ((filter[0] != 0x33 || filter[1] != 0x33) &&
+ (filter[0] != 0x01 || filter[1] != 0x00 ||
+ filter[2] != 0x5e || filter[3] > 0x7f)) {
+ ath6kl_warn("invalid multicast filter address\n");
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mcast_filter_add_del_cmd *) skb->data;
+ memcpy(cmd->mcast_mac, filter, ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ add_filter ? WMI_SET_MCAST_FILTER_CMDID :
+ WMI_DEL_MCAST_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
+{
+ struct sk_buff *skb;
+ struct wmi_sta_bmiss_enhance_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
+ cmd->enable = enhance ? 1 : 0;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_STA_BMISS_ENHANCE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2)
+{
+ struct sk_buff *skb;
+ struct wmi_set_regdomain_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_regdomain_cmd *) skb->data;
+ memcpy(cmd->iso_name, alpha2, 2);
+
+ return ath6kl_wmi_cmd_send(wmi, 0, skb,
+ WMI_SET_REGDOMAIN_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+ u8 sgi = 0;
+ s32 ret;
+
+ if (rate_index == RATE_AUTO)
+ return 0;
+
+ /* SGI is stored as the MSB of the rate_index */
+ if (rate_index & RATE_INDEX_MSB) {
+ rate_index &= RATE_INDEX_WITHOUT_SGI_MASK;
+ sgi = 1;
+ }
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
+ ar->fw_capabilities)) {
+ if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl_mcs15)))
+ return 0;
+
+ ret = wmi_rate_tbl_mcs15[(u32) rate_index][sgi];
+ } else {
+ if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl)))
+ return 0;
+
+ ret = wmi_rate_tbl[(u32) rate_index][sgi];
+ }
+
+ return ret;
+}
+
+static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
+ u32 len)
+{
+ struct wmi_pmkid_list_reply *reply;
+ u32 expected_len;
+
+ if (len < sizeof(struct wmi_pmkid_list_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_pmkid_list_reply *)datap;
+ expected_len = sizeof(reply->num_pmkid) +
+ le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN;
+
+ if (len < expected_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
+
+ aggr_recv_addba_req_evt(vif, cmd->tid,
+ le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
+
+ return 0;
+}
+
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
+
+ aggr_recv_delba_req_evt(vif, cmd->tid);
+
+ return 0;
+}
+
+/* AP mode functions */
+
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p)
+{
+ struct sk_buff *skb;
+ struct wmi_connect_cmd *cm;
+ int res;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
+ if (!skb)
+ return -ENOMEM;
+
+ cm = (struct wmi_connect_cmd *) skb->data;
+ memcpy(cm, p, sizeof(*cm));
+
+ res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+ NO_SYNC_WMIFLAG);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: nw_type=%u auth_mode=%u ch=%u ctrl_flags=0x%x-> res=%d\n",
+ __func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch),
+ le32_to_cpu(p->ctrl_flags), res);
+ return res;
+}
+
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+ u16 reason)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_set_mlme_cmd *cm;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cm));
+ if (!skb)
+ return -ENOMEM;
+
+ cm = (struct wmi_ap_set_mlme_cmd *) skb->data;
+ memcpy(cm->mac, mac, ETH_ALEN);
+ cm->reason = cpu_to_le16(reason);
+ cm->cmd = cmd;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd,
+ cm->reason);
+
+ return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_hidden_ssid_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_hidden_ssid_cmd *) skb->data;
+ cmd->hidden_ssid = enable ? 1 : 0;
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_HIDDEN_SSID_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+/* This command will be used to enable/disable AP uAPSD feature */
+int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable)
+{
+ struct wmi_ap_set_apsd_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_set_apsd_cmd *)skb->data;
+ cmd->enable = enable;
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_APSD_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi, u8 if_idx,
+ u16 aid, u16 bitmap, u32 flags)
+{
+ struct wmi_ap_apsd_buffered_traffic_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_apsd_buffered_traffic_cmd *)skb->data;
+ cmd->aid = cpu_to_le16(aid);
+ cmd->bitmap = cpu_to_le16(bitmap);
+ cmd->flags = cpu_to_le32(flags);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_pspoll_event *ev;
+
+ if (len < sizeof(struct wmi_pspoll_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pspoll_event *) datap;
+
+ ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
+
+ return 0;
+}
+
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ ath6kl_dtimexpiry_event(vif);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+ bool flag)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_set_pvb_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
+ cmd->aid = cpu_to_le16(aid);
+ cmd->rsvd = cpu_to_le16(0);
+ cmd->flag = cpu_to_le32(flag);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_ver,
+ bool rx_dot11_hdr, bool defrag_on_host)
+{
+ struct sk_buff *skb;
+ struct wmi_rx_frame_format_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
+ cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0;
+ cmd->defrag_on_host = defrag_on_host ? 1 : 0;
+ cmd->meta_ver = rx_meta_ver;
+
+ /* Delete the local aggr state, on host */
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len)
+{
+ struct sk_buff *skb;
+ struct wmi_set_appie_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "set_appie_cmd: mgmt_frm_type=%u ie_len=%u\n",
+ mgmt_frm_type, ie_len);
+ p = (struct wmi_set_appie_cmd *) skb->data;
+ p->mgmt_frm_type = mgmt_frm_type;
+ p->ie_len = ie_len;
+
+ if (ie != NULL && ie_len > 0)
+ memcpy(p->ie_info, ie, ie_len);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
+ const u8 *ie_info, u8 ie_len)
+{
+ struct sk_buff *skb;
+ struct wmi_set_ie_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n",
+ ie_id, ie_field, ie_len);
+ p = (struct wmi_set_ie_cmd *) skb->data;
+ p->ie_id = ie_id;
+ p->ie_field = ie_field;
+ p->ie_len = ie_len;
+ if (ie_info && ie_len > 0)
+ memcpy(p->ie_info, ie_info, ie_len);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
+{
+ struct sk_buff *skb;
+ struct wmi_disable_11b_rates_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "disable_11b_rates_cmd: disable=%u\n",
+ disable);
+ cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
+ cmd->disable = disable ? 1 : 0;
+
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
+{
+ struct sk_buff *skb;
+ struct wmi_remain_on_chnl_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl_cmd: freq=%u dur=%u\n",
+ freq, dur);
+ p = (struct wmi_remain_on_chnl_cmd *) skb->data;
+ p->freq = cpu_to_le32(freq);
+ p->duration = cpu_to_le32(dur);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+/* ath6kl_wmi_send_action_cmd is to be deprecated. Use
+ * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
+ u32 freq, u32 wait, const u8 *data,
+ u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_send_action_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
+ p = (struct wmi_send_action_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
+ u32 freq, u32 wait, const u8 *data,
+ u16 data_len, u32 no_cck)
+{
+ struct sk_buff *skb;
+ struct wmi_send_mgmt_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
+ p = (struct wmi_send_mgmt_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->no_cck = cpu_to_le32(no_cck);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck)
+{
+ int status;
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ status = __ath6kl_wmi_send_mgmt_cmd(ar->wmi, if_idx, id, freq,
+ wait, data, data_len,
+ no_cck);
+ } else {
+ status = ath6kl_wmi_send_action_cmd(ar->wmi, if_idx, id, freq,
+ wait, data, data_len);
+ }
+
+ return status;
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_p2p_probe_response_cmd *p;
+ size_t cmd_len = sizeof(*p) + data_len;
+
+ if (data_len == 0)
+ cmd_len++; /* work around target minimum length requirement */
+
+ skb = ath6kl_wmi_get_new_buf(cmd_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_probe_response_cmd: freq=%u dst=%pM len=%u\n",
+ freq, dst, data_len);
+ p = (struct wmi_p2p_probe_response_cmd *) skb->data;
+ p->freq = cpu_to_le32(freq);
+ memcpy(p->destination_addr, dst, ETH_ALEN);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
+{
+ struct sk_buff *skb;
+ struct wmi_probe_req_report_cmd *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "probe_report_req_cmd: enable=%u\n",
+ enable);
+ p = (struct wmi_probe_req_report_cmd *) skb->data;
+ p->enable = enable ? 1 : 0;
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
+{
+ struct sk_buff *skb;
+ struct wmi_get_p2p_info *p;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*p));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "info_req_cmd: flags=%x\n",
+ info_req_flags);
+ p = (struct wmi_get_p2p_info *) skb->data;
+ p->info_req_flags = cpu_to_le32(info_req_flags);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
+{
+ ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
+ return ath6kl_wmi_simple_cmd(wmi, if_idx,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+}
+
+int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
+{
+ struct sk_buff *skb;
+ struct wmi_set_inact_period_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_inact_period_cmd *) skb->data;
+ cmd->inact_period = cpu_to_le32(inact_timeout);
+ cmd->num_null_func = 0;
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static void ath6kl_wmi_hb_challenge_resp_event(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmix_hb_challenge_resp_cmd *cmd;
+
+ if (len < sizeof(struct wmix_hb_challenge_resp_cmd))
+ return;
+
+ cmd = (struct wmix_hb_challenge_resp_cmd *) datap;
+ ath6kl_recovery_hb_event(wmi->parent_dev,
+ le32_to_cpu(cmd->cookie));
+}
+
+static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmix_cmd_hdr *cmd;
+ u32 len;
+ u16 id;
+ u8 *datap;
+ int ret = 0;
+
+ if (skb->len < sizeof(struct wmix_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ return -EINVAL;
+ }
+
+ cmd = (struct wmix_cmd_hdr *) skb->data;
+ id = le32_to_cpu(cmd->cmd_id);
+
+ skb_pull(skb, sizeof(struct wmix_cmd_hdr));
+
+ datap = skb->data;
+ len = skb->len;
+
+ switch (id) {
+ case WMIX_HB_CHALLENGE_RESP_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n");
+ ath6kl_wmi_hb_challenge_resp_event(wmi, datap, len);
+ break;
+ case WMIX_DBGLOG_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len);
+ ath6kl_debug_fwlog_event(wmi->parent_dev, datap, len);
+ break;
+ default:
+ ath6kl_warn("unknown cmd id 0x%x\n", id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
+/* Process interface specific wmi events, caller would free the datap */
+static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id,
+ u8 *datap, u32 len)
+{
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+ if (!vif) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Wmi event for unavailable vif, vif_index:%d\n",
+ if_idx);
+ return -EINVAL;
+ }
+
+ switch (cmd_id) {
+ case WMI_CONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
+ return ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
+ case WMI_DISCONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
+ return ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
+ case WMI_TKIP_MICERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
+ return ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
+ case WMI_BSSINFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
+ return ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
+ case WMI_NEIGHBOR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
+ return ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+ vif);
+ case WMI_SCAN_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
+ return ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
+ case WMI_REPORT_STATISTICS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
+ return ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
+ case WMI_CAC_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
+ return ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
+ case WMI_PSPOLL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
+ return ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
+ case WMI_DTIMEXPIRY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
+ return ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
+ case WMI_ADDBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
+ return ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
+ case WMI_DELBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
+ return ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
+ case WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID");
+ return ath6kl_wmi_host_sleep_mode_cmd_prcd_evt_rx(wmi, vif);
+ case WMI_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
+ return ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
+ case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
+ return ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
+ len, vif);
+ case WMI_TX_STATUS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
+ return ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
+ case WMI_RX_PROBE_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
+ return ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
+ case WMI_RX_ACTION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
+ return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
+ case WMI_TXE_NOTIFY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TXE_NOTIFY_EVENTID\n");
+ return ath6kl_wmi_txe_notify_event_rx(wmi, datap, len, vif);
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_proc_events(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd;
+ int ret = 0;
+ u32 len;
+ u16 id;
+ u8 if_idx;
+ u8 *datap;
+
+ cmd = (struct wmi_cmd_hdr *) skb->data;
+ id = le16_to_cpu(cmd->cmd_id);
+ if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
+
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ datap = skb->data;
+ len = skb->len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "wmi rx id %d len %d\n", id, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
+ datap, len);
+
+ switch (id) {
+ case WMI_GET_BITRATE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
+ ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_CHANNEL_LIST_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n");
+ ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_TX_PWR_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n");
+ ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len);
+ break;
+ case WMI_READY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
+ ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
+ break;
+ case WMI_PEER_NODE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
+ ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
+ break;
+ case WMI_REGDOMAIN_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
+ ath6kl_wmi_regdomain_event(wmi, datap, len);
+ break;
+ case WMI_PSTREAM_TIMEOUT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
+ ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
+ break;
+ case WMI_CMDERROR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
+ ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
+ break;
+ case WMI_RSSI_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_ERROR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n");
+ break;
+ case WMI_OPT_RX_FRAME_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n");
+ /* this event has been deprecated */
+ break;
+ case WMI_REPORT_ROAM_TBL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
+ break;
+ case WMI_EXTENSION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
+ ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
+ break;
+ case WMI_CHANNEL_CHANGE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
+ break;
+ case WMI_REPORT_ROAM_DATA_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n");
+ break;
+ case WMI_TEST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TEST_EVENTID\n");
+ ret = ath6kl_wmi_test_rx(wmi, datap, len);
+ break;
+ case WMI_GET_FIXRATES_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
+ ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len);
+ break;
+ case WMI_TX_RETRY_ERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n");
+ break;
+ case WMI_SNR_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_LQ_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n");
+ break;
+ case WMI_APLIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n");
+ ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len);
+ break;
+ case WMI_GET_KEEPALIVE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n");
+ ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_WOW_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
+ break;
+ case WMI_GET_PMKID_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
+ ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
+ break;
+ case WMI_SET_PARAMS_REPLY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
+ break;
+ case WMI_ADDBA_RESP_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
+ break;
+ case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
+ break;
+ case WMI_REPORT_BTCOEX_STATS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_STATS_EVENTID\n");
+ break;
+ case WMI_TX_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
+ ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
+ break;
+ case WMI_P2P_CAPABILITIES_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
+ ret = ath6kl_wmi_p2p_capabilities_event_rx(datap, len);
+ break;
+ case WMI_P2P_INFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
+ ret = ath6kl_wmi_p2p_info_event_rx(datap, len);
+ break;
+ default:
+ /* may be the event is interface specific */
+ ret = ath6kl_wmi_proc_events_vif(wmi, if_idx, id, datap, len);
+ break;
+ }
+
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+/* Control Path */
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
+{
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (skb->len < sizeof(struct wmi_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ trace_ath6kl_wmi_event(skb->data, skb->len);
+
+ return ath6kl_wmi_proc_events(wmi, skb);
+}
+
+void ath6kl_wmi_reset(struct wmi *wmi)
+{
+ spin_lock_bh(&wmi->lock);
+
+ wmi->fat_pipe_exist = 0;
+ memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac));
+
+ spin_unlock_bh(&wmi->lock);
+}
+
+void *ath6kl_wmi_init(struct ath6kl *dev)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ spin_lock_init(&wmi->lock);
+
+ wmi->parent_dev = dev;
+
+ wmi->pwr_mode = REC_POWER;
+
+ ath6kl_wmi_reset(wmi);
+
+ return wmi;
+}
+
+void ath6kl_wmi_shutdown(struct wmi *wmi)
+{
+ if (!wmi)
+ return;
+
+ kfree(wmi->last_mgmt_tx_frame);
+ kfree(wmi);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
new file mode 100644
index 000000000..19f88b4a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -0,0 +1,2731 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all the
+ * commands and events. Commands are messages from the host to the WM.
+ * Events and Replies are messages from the WM to the host.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+#include <linux/ieee80211.h>
+
+#include "htc.h"
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+#define WMI_CONTROL_MSG_MAX_LEN 256
+#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600)
+
+#define IP_ETHERTYPE 0x0800
+
+#define WMI_IMPLICIT_PSTREAM 0xFF
+#define WMI_MAX_THINSTREAM 15
+
+#define SSID_IE_LEN_INDEX 13
+
+/* Host side link management data structures */
+#define SIG_QUALITY_THRESH_LVLS 6
+#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+
+#define A_BAND_24GHZ 0
+#define A_BAND_5GHZ 1
+#define ATH6KL_NUM_BANDS 2
+
+/* in ms */
+#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
+
+/*
+ * There are no signed versions of __le16 and __le32, so for a temporary
+ * solution come up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s16 __bitwise a_sle16;
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32) cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32) val);
+}
+
+static inline a_sle16 a_cpu_to_sle16(s16 val)
+{
+ return (__force a_sle16) cpu_to_le16(val);
+}
+
+static inline s16 a_sle16_to_cpu(a_sle16 val)
+{
+ return le16_to_cpu((__force __le16) val);
+}
+
+struct sq_threshold_params {
+ s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS];
+ s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS];
+ u32 upper_threshold_valid_count;
+ u32 lower_threshold_valid_count;
+ u32 polling_interval;
+ u8 weight;
+ u8 last_rssi;
+ u8 last_rssi_poll_event;
+};
+
+struct wmi_data_sync_bufs {
+ u8 traffic_class;
+ struct sk_buff *skb;
+};
+
+/* WMM stream classes */
+#define WMM_NUM_AC 4
+#define WMM_AC_BE 0 /* best effort */
+#define WMM_AC_BK 1 /* background */
+#define WMM_AC_VI 2 /* video */
+#define WMM_AC_VO 3 /* voice */
+
+#define WMI_VOICE_USER_PRIORITY 0x7
+
+struct wmi {
+ u16 stream_exist_for_ac[WMM_NUM_AC];
+ u8 fat_pipe_exist;
+ struct ath6kl *parent_dev;
+ u8 pwr_mode;
+
+ /* protects fat_pipe_exist and stream_exist_for_ac */
+ spinlock_t lock;
+ enum htc_endpoint_id ep_id;
+ struct sq_threshold_params
+ sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
+ bool is_wmm_enabled;
+ u8 traffic_class;
+ bool is_probe_ssid;
+
+ u8 *last_mgmt_tx_frame;
+ size_t last_mgmt_tx_frame_len;
+ u8 saved_pwr_mode;
+};
+
+struct host_app_area {
+ __le32 wmi_protocol_ver;
+} __packed;
+
+enum wmi_msg_type {
+ DATA_MSGTYPE = 0x0,
+ CNTL_MSGTYPE,
+ SYNC_MSGTYPE,
+ OPT_MSGTYPE,
+};
+
+/*
+ * Macros for operating on WMI_DATA_HDR (info) field
+ */
+
+#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
+#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
+#define WMI_DATA_HDR_UP_MASK 0x07
+#define WMI_DATA_HDR_UP_SHIFT 2
+
+/* In AP mode, the same bit (b5) is used to indicate Power save state in
+ * the Rx dir and More data bit state in the tx direction.
+ */
+#define WMI_DATA_HDR_PS_MASK 0x1
+#define WMI_DATA_HDR_PS_SHIFT 5
+
+#define WMI_DATA_HDR_MORE 0x20
+
+enum wmi_data_hdr_data_type {
+ WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
+ WMI_DATA_HDR_DATA_TYPE_802_11,
+
+ /* used to be used for the PAL */
+ WMI_DATA_HDR_DATA_TYPE_ACL,
+};
+
+/* Bitmap of data header flags */
+enum wmi_data_hdr_flags {
+ WMI_DATA_HDR_FLAGS_MORE = 0x1,
+ WMI_DATA_HDR_FLAGS_EOSP = 0x2,
+ WMI_DATA_HDR_FLAGS_UAPSD = 0x4,
+};
+
+#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
+#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
+
+/* Macros for operating on WMI_DATA_HDR (info2) field */
+#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
+#define WMI_DATA_HDR_SEQNO_SHIFT 0
+
+#define WMI_DATA_HDR_AMSDU_MASK 0x1
+#define WMI_DATA_HDR_AMSDU_SHIFT 12
+
+#define WMI_DATA_HDR_META_MASK 0x7
+#define WMI_DATA_HDR_META_SHIFT 13
+
+#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF
+#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8
+
+/* Macros for operating on WMI_DATA_HDR (info3) field */
+#define WMI_DATA_HDR_IF_IDX_MASK 0xF
+
+#define WMI_DATA_HDR_TRIG 0x10
+#define WMI_DATA_HDR_EOSP 0x10
+
+struct wmi_data_hdr {
+ s8 rssi;
+
+ /*
+ * usage of 'info' field(8-bit):
+ *
+ * b1:b0 - WMI_MSG_TYPE
+ * b4:b3:b2 - UP(tid)
+ * b5 - Used in AP mode.
+ * More-data in tx dir, PS in rx.
+ * b7:b6 - Dot3 header(0),
+ * Dot11 Header(1),
+ * ACL data(2)
+ */
+ u8 info;
+
+ /*
+ * usage of 'info2' field(16-bit):
+ *
+ * b11:b0 - seq_no
+ * b12 - A-MSDU?
+ * b15:b13 - META_DATA_VERSION 0 - 7
+ */
+ __le16 info2;
+
+ /*
+ * usage of info3, 16-bit:
+ * b3:b0 - Interface index
+ * b4 - uAPSD trigger in rx & EOSP in tx
+ * b15:b5 - Reserved
+ */
+ __le16 info3;
+} __packed;
+
+static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr)
+{
+ return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK;
+}
+
+static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr,
+ u8 usr_pri)
+{
+ dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT);
+ dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT;
+}
+
+static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr)
+{
+ u8 data_type;
+
+ data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) &
+ WMI_DATA_HDR_DATA_TYPE_MASK;
+ return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11);
+}
+
+static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) &
+ WMI_DATA_HDR_SEQNO_MASK;
+}
+
+static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) &
+ WMI_DATA_HDR_AMSDU_MASK;
+}
+
+static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK;
+}
+
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+ return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
+/* Tx meta version definitions */
+#define WMI_MAX_TX_META_SZ 12
+#define WMI_META_VERSION_1 0x01
+#define WMI_META_VERSION_2 0x02
+
+/* Flag to signal to FW to calculate TCP checksum */
+#define WMI_META_V2_FLAG_CSUM_OFFLOAD 0x01
+
+struct wmi_tx_meta_v1 {
+ /* packet ID to identify the tx request */
+ u8 pkt_id;
+
+ /* rate policy to be used for the tx of this frame */
+ u8 rate_plcy_id;
+} __packed;
+
+struct wmi_tx_meta_v2 {
+ /*
+ * Offset from start of the WMI header for csum calculation to
+ * begin.
+ */
+ u8 csum_start;
+
+ /* offset from start of WMI header where final csum goes */
+ u8 csum_dest;
+
+ /* no of bytes over which csum is calculated */
+ u8 csum_flags;
+} __packed;
+
+struct wmi_rx_meta_v1 {
+ u8 status;
+
+ /* rate index mapped to rate at which this packet was received. */
+ u8 rix;
+
+ /* rssi of packet */
+ u8 rssi;
+
+ /* rf channel during packet reception */
+ u8 channel;
+
+ __le16 flags;
+} __packed;
+
+struct wmi_rx_meta_v2 {
+ __le16 csum;
+
+ /* bit 0 set -partial csum valid bit 1 set -test mode */
+ u8 csum_flags;
+} __packed;
+
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le16 cmd_id;
+
+ /* info1 - 16 bits
+ * b03:b00 - id
+ * b15:b04 - unused */
+ __le16 info1;
+
+ /* for alignment */
+ __le16 reserved;
+} __packed;
+
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+ return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
+/* List of WMI commands */
+enum wmi_cmd_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_RECONNECT_CMDID,
+ WMI_DISCONNECT_CMDID,
+ WMI_SYNCHRONIZE_CMDID,
+ WMI_CREATE_PSTREAM_CMDID,
+ WMI_DELETE_PSTREAM_CMDID,
+ /* WMI_START_SCAN_CMDID is to be deprecated. Use
+ * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
+ WMI_START_SCAN_CMDID,
+ WMI_SET_SCAN_PARAMS_CMDID,
+ WMI_SET_BSS_FILTER_CMDID,
+ WMI_SET_PROBED_SSID_CMDID, /* 10 */
+ WMI_SET_LISTEN_INT_CMDID,
+ WMI_SET_BMISS_TIME_CMDID,
+ WMI_SET_DISC_TIMEOUT_CMDID,
+ WMI_GET_CHANNEL_LIST_CMDID,
+ WMI_SET_BEACON_INT_CMDID,
+ WMI_GET_STATISTICS_CMDID,
+ WMI_SET_CHANNEL_PARAMS_CMDID,
+ WMI_SET_POWER_MODE_CMDID,
+ WMI_SET_IBSS_PM_CAPS_CMDID,
+ WMI_SET_POWER_PARAMS_CMDID, /* 20 */
+ WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
+ WMI_ADD_CIPHER_KEY_CMDID,
+ WMI_DELETE_CIPHER_KEY_CMDID,
+ WMI_ADD_KRK_CMDID,
+ WMI_DELETE_KRK_CMDID,
+ WMI_SET_PMKID_CMDID,
+ WMI_SET_TX_PWR_CMDID,
+ WMI_GET_TX_PWR_CMDID,
+ WMI_SET_ASSOC_INFO_CMDID,
+ WMI_ADD_BAD_AP_CMDID, /* 30 */
+ WMI_DELETE_BAD_AP_CMDID,
+ WMI_SET_TKIP_COUNTERMEASURES_CMDID,
+ WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
+ WMI_SET_ACCESS_PARAMS_CMDID,
+ WMI_SET_RETRY_LIMITS_CMDID,
+ WMI_SET_OPT_MODE_CMDID,
+ WMI_OPT_TX_FRAME_CMDID,
+ WMI_SET_VOICE_PKT_SIZE_CMDID,
+ WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
+ WMI_SET_ROAM_CTRL_CMDID,
+ WMI_GET_ROAM_TBL_CMDID,
+ WMI_GET_ROAM_DATA_CMDID,
+ WMI_ENABLE_RM_CMDID,
+ WMI_SET_MAX_OFFHOME_DURATION_CMDID,
+ WMI_EXTENSION_CMDID, /* Non-wireless extensions */
+ WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ WMI_LQ_THRESHOLD_PARAMS_CMDID,
+ WMI_SET_LPREAMBLE_CMDID,
+ WMI_SET_RTS_CMDID, /* 50 */
+ WMI_CLR_RSSI_SNR_CMDID,
+ WMI_SET_FIXRATES_CMDID,
+ WMI_GET_FIXRATES_CMDID,
+ WMI_SET_AUTH_MODE_CMDID,
+ WMI_SET_REASSOC_MODE_CMDID,
+ WMI_SET_WMM_CMDID,
+ WMI_SET_WMM_TXOP_CMDID,
+ WMI_TEST_CMDID,
+
+ /* COEX AR6002 only */
+ WMI_SET_BT_STATUS_CMDID,
+ WMI_SET_BT_PARAMS_CMDID, /* 60 */
+
+ WMI_SET_KEEPALIVE_CMDID,
+ WMI_GET_KEEPALIVE_CMDID,
+ WMI_SET_APPIE_CMDID,
+ WMI_GET_APPIE_CMDID,
+ WMI_SET_WSC_STATUS_CMDID,
+
+ /* Wake on Wireless */
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ WMI_SET_WOW_MODE_CMDID,
+ WMI_GET_WOW_LIST_CMDID,
+ WMI_ADD_WOW_PATTERN_CMDID,
+ WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
+
+ WMI_SET_FRAMERATES_CMDID,
+ WMI_SET_AP_PS_CMDID,
+ WMI_SET_QOS_SUPP_CMDID,
+ WMI_SET_IE_CMDID,
+
+ /* WMI_THIN_RESERVED_... mark the start and end
+ * values for WMI_THIN_RESERVED command IDs. These
+ * command IDs can be found in wmi_thin.h */
+ WMI_THIN_RESERVED_START = 0x8000,
+ WMI_THIN_RESERVED_END = 0x8fff,
+
+ /* Developer commands starts at 0xF000 */
+ WMI_SET_BITRATE_CMDID = 0xF000,
+ WMI_GET_BITRATE_CMDID,
+ WMI_SET_WHALPARAM_CMDID,
+ WMI_SET_MAC_ADDRESS_CMDID,
+ WMI_SET_AKMP_PARAMS_CMDID,
+ WMI_SET_PMKID_LIST_CMDID,
+ WMI_GET_PMKID_LIST_CMDID,
+ WMI_ABORT_SCAN_CMDID,
+ WMI_SET_TARGET_EVENT_REPORT_CMDID,
+
+ /* Unused */
+ WMI_UNUSED1,
+ WMI_UNUSED2,
+
+ /* AP mode commands */
+ WMI_AP_HIDDEN_SSID_CMDID,
+ WMI_AP_SET_NUM_STA_CMDID,
+ WMI_AP_ACL_POLICY_CMDID,
+ WMI_AP_ACL_MAC_LIST_CMDID,
+ WMI_AP_CONFIG_COMMIT_CMDID,
+ WMI_AP_SET_MLME_CMDID,
+ WMI_AP_SET_PVB_CMDID,
+ WMI_AP_CONN_INACT_CMDID,
+ WMI_AP_PROT_SCAN_TIME_CMDID,
+ WMI_AP_SET_COUNTRY_CMDID,
+ WMI_AP_SET_DTIM_CMDID,
+ WMI_AP_MODE_STAT_CMDID,
+
+ WMI_SET_IP_CMDID,
+ WMI_SET_PARAMS_CMDID,
+ WMI_SET_MCAST_FILTER_CMDID,
+ WMI_DEL_MCAST_FILTER_CMDID,
+
+ WMI_ALLOW_AGGR_CMDID,
+ WMI_ADDBA_REQ_CMDID,
+ WMI_DELBA_REQ_CMDID,
+ WMI_SET_HT_CAP_CMDID,
+ WMI_SET_HT_OP_CMDID,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ WMI_SET_TX_SGI_PARAM_CMDID,
+ WMI_SET_RATE_POLICY_CMDID,
+
+ WMI_HCI_CMD_CMDID,
+ WMI_RX_FRAME_FORMAT_CMDID,
+ WMI_SET_THIN_MODE_CMDID,
+ WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
+
+ WMI_AP_SET_11BG_RATESET_CMDID,
+ WMI_SET_PMK_CMDID,
+ WMI_MCAST_FILTER_CMDID,
+
+ /* COEX CMDID AR6003 */
+ WMI_SET_BTCOEX_FE_ANT_CMDID,
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
+ WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
+ WMI_SET_BTCOEX_DEBUG_CMDID,
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
+ WMI_GET_BTCOEX_STATS_CMDID,
+ WMI_GET_BTCOEX_CONFIG_CMDID,
+
+ WMI_SET_DFS_ENABLE_CMDID, /* F034 */
+ WMI_SET_DFS_MINRSSITHRESH_CMDID,
+ WMI_SET_DFS_MAXPULSEDUR_CMDID,
+ WMI_DFS_RADAR_DETECTED_CMDID,
+
+ /* P2P commands */
+ WMI_P2P_SET_CONFIG_CMDID, /* F038 */
+ WMI_WPS_SET_CONFIG_CMDID,
+ WMI_SET_REQ_DEV_ATTR_CMDID,
+ WMI_P2P_FIND_CMDID,
+ WMI_P2P_STOP_FIND_CMDID,
+ WMI_P2P_GO_NEG_START_CMDID,
+ WMI_P2P_LISTEN_CMDID,
+
+ WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
+ WMI_SET_PROMISCUOUS_MODE_CMDID,
+ WMI_RX_FRAME_FILTER_CMDID,
+ WMI_SET_CHANNEL_CMDID,
+
+ /* WAC commands */
+ WMI_ENABLE_WAC_CMDID,
+ WMI_WAC_SCAN_REPLY_CMDID,
+ WMI_WAC_CTRL_REQ_CMDID,
+ WMI_SET_DIV_PARAMS_CMDID,
+
+ WMI_GET_PMK_CMDID,
+ WMI_SET_PASSPHRASE_CMDID,
+ WMI_SEND_ASSOC_RES_CMDID,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID,
+
+ /* ACS command, consists of sub-commands */
+ WMI_ACS_CTRL_CMDID,
+ WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
+ WMI_SET_TBD_TIME_CMDID, /*added for wmiconfig command for TBD */
+
+ /* Pktlog cmds */
+ WMI_PKTLOG_ENABLE_CMDID,
+ WMI_PKTLOG_DISABLE_CMDID,
+
+ /* More P2P Cmds */
+ WMI_P2P_GO_NEG_REQ_RSP_CMDID,
+ WMI_P2P_GRP_INIT_CMDID,
+ WMI_P2P_GRP_FORMATION_DONE_CMDID,
+ WMI_P2P_INVITE_CMDID,
+ WMI_P2P_INVITE_REQ_RSP_CMDID,
+ WMI_P2P_PROV_DISC_REQ_CMDID,
+ WMI_P2P_SET_CMDID,
+
+ WMI_GET_RFKILL_MODE_CMDID,
+ WMI_SET_RFKILL_MODE_CMDID,
+ WMI_AP_SET_APSD_CMDID,
+ WMI_AP_APSD_BUFFERED_TRAFFIC_CMDID,
+
+ WMI_P2P_SDPD_TX_CMDID, /* F05C */
+ WMI_P2P_STOP_SDPD_CMDID,
+ WMI_P2P_CANCEL_CMDID,
+ /* Ultra low power store / recall commands */
+ WMI_STORERECALL_CONFIGURE_CMDID,
+ WMI_STORERECALL_RECALL_CMDID,
+ WMI_STORERECALL_HOST_READY_CMDID,
+ WMI_FORCE_TARGET_ASSERT_CMDID,
+
+ WMI_SET_PROBED_SSID_EX_CMDID,
+ WMI_SET_NETWORK_LIST_OFFLOAD_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ WMI_ADD_WOW_EXT_PATTERN_CMDID,
+ WMI_GTK_OFFLOAD_OP_CMDID,
+ WMI_REMAIN_ON_CHNL_CMDID,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+ /* WMI_SEND_ACTION_CMDID is to be deprecated. Use
+ * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
+ WMI_SEND_ACTION_CMDID,
+ WMI_PROBE_REQ_REPORT_CMDID,
+ WMI_DISABLE_11B_RATES_CMDID,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
+ WMI_GET_P2P_INFO_CMDID,
+ WMI_AP_JOIN_BSS_CMDID,
+
+ WMI_SMPS_ENABLE_CMDID,
+ WMI_SMPS_CONFIG_CMDID,
+ WMI_SET_RATECTRL_PARM_CMDID,
+ /* LPL specific commands*/
+ WMI_LPL_FORCE_ENABLE_CMDID,
+ WMI_LPL_SET_POLICY_CMDID,
+ WMI_LPL_GET_POLICY_CMDID,
+ WMI_LPL_GET_HWSTATE_CMDID,
+ WMI_LPL_SET_PARAMS_CMDID,
+ WMI_LPL_GET_PARAMS_CMDID,
+
+ WMI_SET_BUNDLE_PARAM_CMDID,
+
+ /*GreenTx specific commands*/
+
+ WMI_GREENTX_PARAMS_CMDID,
+
+ WMI_RTT_MEASREQ_CMDID,
+ WMI_RTT_CAPREQ_CMDID,
+ WMI_RTT_STATUSREQ_CMDID,
+
+ /* WPS Commands */
+ WMI_WPS_START_CMDID,
+ WMI_GET_WPS_STATUS_CMDID,
+
+ /* More P2P commands */
+ WMI_SET_NOA_CMDID,
+ WMI_GET_NOA_CMDID,
+ WMI_SET_OPPPS_CMDID,
+ WMI_GET_OPPPS_CMDID,
+ WMI_ADD_PORT_CMDID,
+ WMI_DEL_PORT_CMDID,
+
+ /* 802.11w cmd */
+ WMI_SET_RSN_CAP_CMDID,
+ WMI_GET_RSN_CAP_CMDID,
+ WMI_SET_IGTK_CMDID,
+
+ WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID,
+ WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID,
+
+ WMI_SEND_MGMT_CMDID,
+ WMI_BEGIN_SCAN_CMDID,
+
+ WMI_SET_BLACK_LIST,
+ WMI_SET_MCASTRATE,
+
+ WMI_STA_BMISS_ENHANCE_CMDID,
+
+ WMI_SET_REGDOMAIN_CMDID,
+
+ WMI_SET_RSSI_FILTER_CMDID,
+
+ WMI_SET_KEEP_ALIVE_EXT,
+
+ WMI_VOICE_DETECTION_ENABLE_CMDID,
+
+ WMI_SET_TXE_NOTIFY_CMDID,
+
+ WMI_SET_RECOVERY_TEST_PARAMETER_CMDID, /*0xf094*/
+
+ WMI_ENABLE_SCHED_SCAN_CMDID,
+};
+
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ,
+ WMI_FRAME_PROBE_RESP,
+ WMI_FRAME_ASSOC_REQ,
+ WMI_FRAME_ASSOC_RESP,
+ WMI_NUM_MGMT_FRAME
+};
+
+enum wmi_ie_field_type {
+ WMI_RSN_IE_CAPB = 0x1,
+ WMI_IE_FULL = 0xFF, /* indicats full IE */
+};
+
+/* WMI_CONNECT_CMDID */
+enum network_type {
+ INFRA_NETWORK = 0x01,
+ ADHOC_NETWORK = 0x02,
+ ADHOC_CREATOR = 0x04,
+ AP_NETWORK = 0x10,
+};
+
+enum network_subtype {
+ SUBTYPE_NONE,
+ SUBTYPE_BT,
+ SUBTYPE_P2PDEV,
+ SUBTYPE_P2PCLIENT,
+ SUBTYPE_P2PGO,
+};
+
+enum dot11_auth_mode {
+ OPEN_AUTH = 0x01,
+ SHARED_AUTH = 0x02,
+
+ /* different from IEEE_AUTH_MODE definitions */
+ LEAP_AUTH = 0x04,
+};
+
+enum auth_mode {
+ NONE_AUTH = 0x01,
+ WPA_AUTH = 0x02,
+ WPA2_AUTH = 0x04,
+ WPA_PSK_AUTH = 0x08,
+ WPA2_PSK_AUTH = 0x10,
+ WPA_AUTH_CCKM = 0x20,
+ WPA2_AUTH_CCKM = 0x40,
+};
+
+#define WMI_MAX_KEY_INDEX 3
+
+#define WMI_MAX_KEY_LEN 32
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering. In particular beware
+ * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
+ */
+#define ATH6KL_CIPHER_WEP 0
+#define ATH6KL_CIPHER_TKIP 1
+#define ATH6KL_CIPHER_AES_OCB 2
+#define ATH6KL_CIPHER_AES_CCM 3
+#define ATH6KL_CIPHER_CKIP 5
+#define ATH6KL_CIPHER_CCKM_KRK 6
+#define ATH6KL_CIPHER_NONE 7 /* pseudo value */
+
+/*
+ * 802.11 rate set.
+ */
+#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */
+
+#define ATH_OUI_TYPE 0x01
+#define WPA_OUI_TYPE 0x01
+#define WMM_PARAM_OUI_SUBTYPE 0x01
+#define WMM_OUI_TYPE 0x02
+#define WSC_OUT_TYPE 0x04
+
+enum wmi_connect_ctrl_flags_bits {
+ CONNECT_ASSOC_POLICY_USER = 0x0001,
+ CONNECT_SEND_REASSOC = 0x0002,
+ CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ CONNECT_DO_NOT_DEAUTH = 0x0080,
+ CONNECT_WPS_FLAG = 0x0100,
+};
+
+struct wmi_connect_cmd {
+ u8 nw_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto_type;
+ u8 prwise_crypto_len;
+ u8 grp_crypto_type;
+ u8 grp_crypto_len;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le32 ctrl_flags;
+ u8 nw_subtype;
+} __packed;
+
+/* WMI_RECONNECT_CMDID */
+struct wmi_reconnect_cmd {
+ /* channel hint */
+ __le16 channel;
+
+ /* mandatory if set */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* WMI_ADD_CIPHER_KEY_CMDID */
+enum key_usage {
+ PAIRWISE_USAGE = 0x00,
+ GROUP_USAGE = 0x01,
+
+ /* default Tx Key - static WEP only */
+ TX_USAGE = 0x02,
+};
+
+/*
+ * Bit Flag
+ * Bit 0 - Initialise TSC - default is Initialize
+ */
+#define KEY_OP_INIT_TSC 0x01
+#define KEY_OP_INIT_RSC 0x02
+
+/* default initialise the TSC & RSC */
+#define KEY_OP_INIT_VAL 0x03
+#define KEY_OP_VALID_MASK 0x03
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+
+ /* enum key_usage */
+ u8 key_usage;
+
+ u8 key_len;
+
+ /* key replay sequence counter */
+ u8 key_rsc[8];
+
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ /* additional key control info */
+ u8 key_op_ctrl;
+
+ u8 key_mac_addr[ETH_ALEN];
+} __packed;
+
+/* WMI_DELETE_CIPHER_KEY_CMDID */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+} __packed;
+
+#define WMI_KRK_LEN 16
+
+/* WMI_ADD_KRK_CMDID */
+struct wmi_add_krk_cmd {
+ u8 krk[WMI_KRK_LEN];
+} __packed;
+
+/* WMI_SETPMKID_CMDID */
+
+#define WMI_PMKID_LEN 16
+
+enum pmkid_enable_flg {
+ PMKID_DISABLE = 0,
+ PMKID_ENABLE = 1,
+};
+
+struct wmi_setpmkid_cmd {
+ u8 bssid[ETH_ALEN];
+
+ /* enum pmkid_enable_flg */
+ u8 enable;
+
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_START_SCAN_CMD */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_supp_rates {
+ u8 nrates;
+ u8 rates[ATH6KL_RATE_MAXSIZE];
+};
+
+struct wmi_begin_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* no CCK rates */
+ __le32 no_cck;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* Supported rates to advertise in the probe request frames */
+ struct wmi_supp_rates supp_rates[ATH6KL_NUM_BANDS];
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* wmi_start_scan_cmd is to be deprecated. Use
+ * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/*
+ * Warning: scan control flag value of 0xFF is used to disable
+ * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
+ * flags here
+ */
+enum wmi_scan_ctrl_flags_bits {
+ /* set if can scan in the connect cmd */
+ CONNECT_SCAN_CTRL_FLAGS = 0x01,
+
+ /* set if scan for the SSID it is already connected to */
+ SCAN_CONNECTED_CTRL_FLAGS = 0x02,
+
+ /* set if enable active scan */
+ ACTIVE_SCAN_CTRL_FLAGS = 0x04,
+
+ /* set if enable roam scan when bmiss and lowrssi */
+ ROAM_SCAN_CTRL_FLAGS = 0x08,
+
+ /* set if follows customer BSSINFO reporting rule */
+ REPORT_BSSINFO_CTRL_FLAGS = 0x10,
+
+ /* if disabled, target doesn't scan after a disconnect event */
+ ENABLE_AUTO_CTRL_FLAGS = 0x20,
+
+ /*
+ * Scan complete event with canceled status will be generated when
+ * a scan is prempted before it gets completed.
+ */
+ ENABLE_SCAN_ABORT_EVENT = 0x40
+};
+
+struct wmi_scan_params_cmd {
+ /* sec */
+ __le16 fg_start_period;
+
+ /* sec */
+ __le16 fg_end_period;
+
+ /* sec */
+ __le16 bg_period;
+
+ /* msec */
+ __le16 maxact_chdwell_time;
+
+ /* msec */
+ __le16 pas_chdwell_time;
+
+ /* how many shorts scan for one long */
+ u8 short_scan_ratio;
+
+ u8 scan_ctrl_flags;
+
+ /* msec */
+ __le16 minact_chdwell_time;
+
+ /* max active scans per ssid */
+ __le16 maxact_scan_per_ssid;
+
+ /* msecs */
+ __le32 max_dfsch_act_time;
+} __packed;
+
+/* WMI_ENABLE_SCHED_SCAN_CMDID */
+struct wmi_enable_sched_scan_cmd {
+ u8 enable;
+} __packed;
+
+/* WMI_SET_BSS_FILTER_CMDID */
+enum wmi_bss_filter {
+ /* no beacons forwarded */
+ NONE_BSS_FILTER = 0x0,
+
+ /* all beacons forwarded */
+ ALL_BSS_FILTER,
+
+ /* only beacons matching profile */
+ PROFILE_FILTER,
+
+ /* all but beacons matching profile */
+ ALL_BUT_PROFILE_FILTER,
+
+ /* only beacons matching current BSS */
+ CURRENT_BSS_FILTER,
+
+ /* all but beacons matching BSS */
+ ALL_BUT_BSS_FILTER,
+
+ /* beacons matching probed ssid */
+ PROBED_SSID_FILTER,
+
+ /* beacons matching matched ssid */
+ MATCHED_SSID_FILTER,
+
+ /* marker only */
+ LAST_BSS_FILTER,
+};
+
+struct wmi_bss_filter_cmd {
+ /* see, enum wmi_bss_filter */
+ u8 bss_filter;
+
+ /* for alignment */
+ u8 reserved1;
+
+ /* for alignment */
+ __le16 reserved2;
+
+ __le32 ie_mask;
+} __packed;
+
+/* WMI_SET_PROBED_SSID_CMDID */
+#define MAX_PROBED_SSIDS 16
+
+enum wmi_ssid_flag {
+ /* disables entry */
+ DISABLE_SSID_FLAG = 0,
+
+ /* probes specified ssid */
+ SPECIFIC_SSID_FLAG = 0x01,
+
+ /* probes for any ssid */
+ ANY_SSID_FLAG = 0x02,
+
+ /* match for ssid */
+ MATCH_SSID_FLAG = 0x08,
+};
+
+struct wmi_probed_ssid_cmd {
+ /* 0 to MAX_PROBED_SSIDS - 1 */
+ u8 entry_index;
+
+ /* see, enum wmi_ssid_flg */
+ u8 flag;
+
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_LISTEN_INT_CMDID
+ * The Listen interval is between 15 and 3000 TUs
+ */
+struct wmi_listen_int_cmd {
+ __le16 listen_intvl;
+ __le16 num_beacons;
+} __packed;
+
+/* WMI_SET_BMISS_TIME_CMDID */
+struct wmi_bmiss_time_cmd {
+ __le16 bmiss_time;
+ __le16 num_beacons;
+};
+
+/* WMI_STA_ENHANCE_BMISS_CMDID */
+struct wmi_sta_bmiss_enhance_cmd {
+ u8 enable;
+} __packed;
+
+struct wmi_set_regdomain_cmd {
+ u8 length;
+ u8 iso_name[2];
+} __packed;
+
+/* WMI_SET_POWER_MODE_CMDID */
+enum wmi_power_mode {
+ REC_POWER = 0x01,
+ MAX_PERF_POWER,
+};
+
+struct wmi_power_mode_cmd {
+ /* see, enum wmi_power_mode */
+ u8 pwr_mode;
+} __packed;
+
+/*
+ * Policy to determine whether power save failure event should be sent to
+ * host during scanning
+ */
+enum power_save_fail_event_policy {
+ SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
+ IGNORE_PS_FAIL_DURING_SCAN = 2,
+};
+
+struct wmi_power_params_cmd {
+ /* msec */
+ __le16 idle_period;
+
+ __le16 pspoll_number;
+ __le16 dtim_policy;
+ __le16 tx_wakeup_policy;
+ __le16 num_tx_to_wakeup;
+ __le16 ps_fail_event_policy;
+} __packed;
+
+/*
+ * Ratemask for below modes should be passed
+ * to WMI_SET_TX_SELECT_RATES_CMDID.
+ * AR6003 has 32 bit mask for each modes.
+ * First 12 bits for legacy rates, 13 to 20
+ * bits for HT 20 rates and 21 to 28 bits for
+ * HT 40 rates
+ */
+enum wmi_mode_phy {
+ WMI_RATES_MODE_11A = 0,
+ WMI_RATES_MODE_11G,
+ WMI_RATES_MODE_11B,
+ WMI_RATES_MODE_11GONLY,
+ WMI_RATES_MODE_11A_HT20,
+ WMI_RATES_MODE_11G_HT20,
+ WMI_RATES_MODE_11A_HT40,
+ WMI_RATES_MODE_11G_HT40,
+ WMI_RATES_MODE_MAX
+};
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates32_cmd {
+ __le32 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates64_cmd {
+ __le64 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
+/* WMI_SET_DISC_TIMEOUT_CMDID */
+struct wmi_disc_timeout_cmd {
+ /* seconds */
+ u8 discon_timeout;
+} __packed;
+
+enum dir_type {
+ UPLINK_TRAFFIC = 0,
+ DNLINK_TRAFFIC = 1,
+ BIDIR_TRAFFIC = 2,
+};
+
+enum voiceps_cap_type {
+ DISABLE_FOR_THIS_AC = 0,
+ ENABLE_FOR_THIS_AC = 1,
+ ENABLE_FOR_ALL_AC = 2,
+};
+
+enum traffic_type {
+ TRAFFIC_TYPE_APERIODIC = 0,
+ TRAFFIC_TYPE_PERIODIC = 1,
+};
+
+/* WMI_SYNCHRONIZE_CMDID */
+struct wmi_sync_cmd {
+ u8 data_sync_map;
+} __packed;
+
+/* WMI_CREATE_PSTREAM_CMDID */
+struct wmi_create_pstream_cmd {
+ /* msec */
+ __le32 min_service_int;
+
+ /* msec */
+ __le32 max_service_int;
+
+ /* msec */
+ __le32 inactivity_int;
+
+ /* msec */
+ __le32 suspension_int;
+
+ __le32 service_start_time;
+
+ /* in bps */
+ __le32 min_data_rate;
+
+ /* in bps */
+ __le32 mean_data_rate;
+
+ /* in bps */
+ __le32 peak_data_rate;
+
+ __le32 max_burst_size;
+ __le32 delay_bound;
+
+ /* in bps */
+ __le32 min_phy_rate;
+
+ __le32 sba;
+ __le32 medium_time;
+
+ /* in octects */
+ __le16 nominal_msdu;
+
+ /* in octects */
+ __le16 max_msdu;
+
+ u8 traffic_class;
+
+ /* see, enum dir_type */
+ u8 traffic_direc;
+
+ u8 rx_queue_num;
+
+ /* see, enum traffic_type */
+ u8 traffic_type;
+
+ /* see, enum voiceps_cap_type */
+ u8 voice_psc_cap;
+ u8 tsid;
+
+ /* 802.1D user priority */
+ u8 user_pri;
+
+ /* nominal phy rate */
+ u8 nominal_phy;
+} __packed;
+
+/* WMI_DELETE_PSTREAM_CMDID */
+struct wmi_delete_pstream_cmd {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+ u8 tsid;
+} __packed;
+
+/* WMI_SET_CHANNEL_PARAMS_CMDID */
+enum wmi_phy_mode {
+ WMI_11A_MODE = 0x1,
+ WMI_11G_MODE = 0x2,
+ WMI_11AG_MODE = 0x3,
+ WMI_11B_MODE = 0x4,
+ WMI_11GONLY_MODE = 0x5,
+ WMI_11G_HT20 = 0x6,
+};
+
+#define WMI_MAX_CHANNELS 32
+
+/*
+ * WMI_RSSI_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling. Threshold values are
+ * in the ascending order, and should agree to:
+ * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
+ * < highThreshold_upperVal)
+ */
+
+struct wmi_rssi_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* lowest of upper */
+ a_sle16 thresh_above1_val;
+
+ a_sle16 thresh_above2_val;
+ a_sle16 thresh_above3_val;
+ a_sle16 thresh_above4_val;
+ a_sle16 thresh_above5_val;
+
+ /* highest of upper */
+ a_sle16 thresh_above6_val;
+
+ /* lowest of bellow */
+ a_sle16 thresh_below1_val;
+
+ a_sle16 thresh_below2_val;
+ a_sle16 thresh_below3_val;
+ a_sle16 thresh_below4_val;
+ a_sle16 thresh_below5_val;
+
+ /* highest of bellow */
+ a_sle16 thresh_below6_val;
+
+ /* "alpha" */
+ u8 weight;
+
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SNR_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling.
+ */
+
+struct wmi_snr_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* "alpha" */
+ u8 weight;
+
+ /* lowest of uppper */
+ u8 thresh_above1_val;
+
+ u8 thresh_above2_val;
+ u8 thresh_above3_val;
+
+ /* highest of upper */
+ u8 thresh_above4_val;
+
+ /* lowest of bellow */
+ u8 thresh_below1_val;
+
+ u8 thresh_below2_val;
+ u8 thresh_below3_val;
+
+ /* highest of bellow */
+ u8 thresh_below4_val;
+
+ u8 reserved[3];
+} __packed;
+
+/* Don't report BSSs with signal (RSSI) below this threshold */
+struct wmi_set_rssi_filter_cmd {
+ s8 rssi;
+} __packed;
+
+enum wmi_preamble_policy {
+ WMI_IGNORE_BARKER_IN_ERP = 0,
+ WMI_FOLLOW_BARKER_IN_ERP,
+};
+
+struct wmi_set_lpreamble_cmd {
+ u8 status;
+ u8 preamble_policy;
+} __packed;
+
+struct wmi_set_rts_cmd {
+ __le16 threshold;
+} __packed;
+
+/* WMI_SET_TX_PWR_CMDID */
+struct wmi_set_tx_pwr_cmd {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_tx_pwr_reply {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_report_sleep_state_event {
+ __le32 sleep_state;
+};
+
+enum wmi_report_sleep_status {
+ WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0,
+ WMI_REPORT_SLEEP_STATUS_IS_AWAKE
+};
+enum target_event_report_config {
+ /* default */
+ DISCONN_EVT_IN_RECONN = 0,
+
+ NO_DISCONN_EVT_IN_RECONN
+};
+
+struct wmi_mcast_filter_cmd {
+ u8 mcast_all_enable;
+} __packed;
+
+#define ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE 6
+struct wmi_mcast_filter_add_del_cmd {
+ u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
+} __packed;
+
+struct wmi_set_htcap_cmd {
+ u8 band;
+ u8 ht_enable;
+ u8 ht40_supported;
+ u8 ht20_sgi;
+ u8 ht40_sgi;
+ u8 intolerant_40mhz;
+ u8 max_ampdu_len_exp;
+} __packed;
+
+/* Command Replies */
+
+/* WMI_GET_CHANNEL_LIST_CMDID reply */
+struct wmi_channel_list_reply {
+ u8 reserved;
+
+ /* number of channels in reply */
+ u8 num_ch;
+
+ /* channel in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* List of Events (target to host) */
+enum wmi_event_id {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID,
+ WMI_DISCONNECT_EVENTID,
+ WMI_BSSINFO_EVENTID,
+ WMI_CMDERROR_EVENTID,
+ WMI_REGDOMAIN_EVENTID,
+ WMI_PSTREAM_TIMEOUT_EVENTID,
+ WMI_NEIGHBOR_REPORT_EVENTID,
+ WMI_TKIP_MICERR_EVENTID,
+ WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
+ WMI_REPORT_STATISTICS_EVENTID,
+ WMI_RSSI_THRESHOLD_EVENTID,
+ WMI_ERROR_REPORT_EVENTID,
+ WMI_OPT_RX_FRAME_EVENTID,
+ WMI_REPORT_ROAM_TBL_EVENTID,
+ WMI_EXTENSION_EVENTID,
+ WMI_CAC_EVENTID,
+ WMI_SNR_THRESHOLD_EVENTID,
+ WMI_LQ_THRESHOLD_EVENTID,
+ WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
+ WMI_REPORT_ROAM_DATA_EVENTID,
+ WMI_TEST_EVENTID,
+ WMI_APLIST_EVENTID,
+ WMI_GET_WOW_LIST_EVENTID,
+ WMI_GET_PMKID_LIST_EVENTID,
+ WMI_CHANNEL_CHANGE_EVENTID,
+ WMI_PEER_NODE_EVENTID,
+ WMI_PSPOLL_EVENTID,
+ WMI_DTIMEXPIRY_EVENTID,
+ WMI_WLAN_VERSION_EVENTID,
+ WMI_SET_PARAMS_REPLY_EVENTID,
+ WMI_ADDBA_REQ_EVENTID, /*0x1020 */
+ WMI_ADDBA_RESP_EVENTID,
+ WMI_DELBA_REQ_EVENTID,
+ WMI_TX_COMPLETE_EVENTID,
+ WMI_HCI_EVENT_EVENTID,
+ WMI_ACL_DATA_EVENTID,
+ WMI_REPORT_SLEEP_STATE_EVENTID,
+ WMI_REPORT_BTCOEX_STATS_EVENTID,
+ WMI_REPORT_BTCOEX_CONFIG_EVENTID,
+ WMI_GET_PMK_EVENTID,
+
+ /* DFS Events */
+ WMI_DFS_HOST_ATTACH_EVENTID,
+ WMI_DFS_HOST_INIT_EVENTID,
+ WMI_DFS_RESET_DELAYLINES_EVENTID,
+ WMI_DFS_RESET_RADARQ_EVENTID,
+ WMI_DFS_RESET_AR_EVENTID,
+ WMI_DFS_RESET_ARQ_EVENTID,
+ WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
+ WMI_DFS_SET_BANGRADAR_EVENTID,
+ WMI_DFS_SET_DEBUGLEVEL_EVENTID,
+ WMI_DFS_PHYERR_EVENTID,
+
+ /* CCX Evants */
+ WMI_CCX_RM_STATUS_EVENTID,
+
+ /* P2P Events */
+ WMI_P2P_GO_NEG_RESULT_EVENTID,
+
+ WMI_WAC_SCAN_DONE_EVENTID,
+ WMI_WAC_REPORT_BSS_EVENTID,
+ WMI_WAC_START_WPS_EVENTID,
+ WMI_WAC_CTRL_REQ_REPLY_EVENTID,
+
+ WMI_REPORT_WMM_PARAMS_EVENTID,
+ WMI_WAC_REJECT_WPS_EVENTID,
+
+ /* More P2P Events */
+ WMI_P2P_GO_NEG_REQ_EVENTID,
+ WMI_P2P_INVITE_REQ_EVENTID,
+ WMI_P2P_INVITE_RCVD_RESULT_EVENTID,
+ WMI_P2P_INVITE_SENT_RESULT_EVENTID,
+ WMI_P2P_PROV_DISC_RESP_EVENTID,
+ WMI_P2P_PROV_DISC_REQ_EVENTID,
+
+ /* RFKILL Events */
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_RFKILL_GET_MODE_CMD_EVENTID,
+
+ WMI_P2P_START_SDPD_EVENTID,
+ WMI_P2P_SDPD_RX_EVENTID,
+
+ WMI_SET_HOST_SLEEP_MODE_CMD_PROCESSED_EVENTID = 0x1047,
+
+ WMI_THIN_RESERVED_START_EVENTID = 0x8000,
+ /* Events in this range are reserved for thinmode */
+ WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
+
+ WMI_SET_CHANNEL_EVENTID,
+ WMI_ASSOC_REQ_EVENTID,
+
+ /* Generic ACS event */
+ WMI_ACS_EVENTID,
+ WMI_STORERECALL_STORE_EVENTID,
+ WMI_WOW_EXT_WAKE_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_NETWORK_LIST_OFFLOAD_EVENTID,
+ WMI_REMAIN_ON_CHNL_EVENTID,
+ WMI_CANCEL_REMAIN_ON_CHNL_EVENTID,
+ WMI_TX_STATUS_EVENTID,
+ WMI_RX_PROBE_REQ_EVENTID,
+ WMI_P2P_CAPABILITIES_EVENTID,
+ WMI_RX_ACTION_EVENTID,
+ WMI_P2P_INFO_EVENTID,
+
+ /* WPS Events */
+ WMI_WPS_GET_STATUS_EVENTID,
+ WMI_WPS_PROFILE_EVENTID,
+
+ /* more P2P events */
+ WMI_NOA_INFO_EVENTID,
+ WMI_OPPPS_INFO_EVENTID,
+ WMI_PORT_STATUS_EVENTID,
+
+ /* 802.11w */
+ WMI_GET_RSN_CAP_EVENTID,
+
+ WMI_TXE_NOTIFY_EVENTID,
+};
+
+struct wmi_ready_event_2 {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac_addr[ETH_ALEN];
+ u8 phy_cap;
+} __packed;
+
+/* WMI_PHY_CAPABILITY */
+enum wmi_phy_cap {
+ WMI_11A_CAP = 0x01,
+ WMI_11G_CAP = 0x02,
+ WMI_11AG_CAP = 0x03,
+ WMI_11AN_CAP = 0x04,
+ WMI_11GN_CAP = 0x05,
+ WMI_11AGN_CAP = 0x06,
+};
+
+/* Connect Event */
+struct wmi_connect_event {
+ union {
+ struct {
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le16 listen_intvl;
+ __le16 beacon_intvl;
+ __le32 nw_type;
+ } sta;
+ struct {
+ u8 phymode;
+ u8 aid;
+ u8 mac_addr[ETH_ALEN];
+ u8 auth;
+ u8 keymgmt;
+ __le16 cipher;
+ u8 apsd_info;
+ u8 unused[3];
+ } ap_sta;
+ struct {
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ u8 unused[8];
+ } ap_bss;
+ } u;
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/* Disconnect Event */
+enum wmi_disconnect_reason {
+ NO_NETWORK_AVAIL = 0x01,
+
+ /* bmiss */
+ LOST_LINK = 0x02,
+
+ DISCONNECT_CMD = 0x03,
+ BSS_DISCONNECTED = 0x04,
+ AUTH_FAILED = 0x05,
+ ASSOC_FAILED = 0x06,
+ NO_RESOURCES_AVAIL = 0x07,
+ CSERV_DISCONNECT = 0x08,
+ INVALID_PROFILE = 0x0a,
+ DOT11H_CHANNEL_SWITCH = 0x0b,
+ PROFILE_MISMATCH = 0x0c,
+ CONNECTION_EVICTED = 0x0d,
+ IBSS_MERGE = 0xe,
+};
+
+/* AP mode disconnect proto_reasons */
+enum ap_disconnect_reason {
+ WMI_AP_REASON_STA_LEFT = 101,
+ WMI_AP_REASON_FROM_HOST = 102,
+ WMI_AP_REASON_COMM_TIMEOUT = 103,
+ WMI_AP_REASON_MAX_STA = 104,
+ WMI_AP_REASON_ACL = 105,
+ WMI_AP_REASON_STA_ROAM = 106,
+ WMI_AP_REASON_DFS_CHANNEL = 107,
+};
+
+#define ATH6KL_COUNTRY_RD_SHIFT 16
+
+struct ath6kl_wmi_regdomain {
+ __le32 reg_code;
+};
+
+struct wmi_disconnect_event {
+ /* reason code, see 802.11 spec. */
+ __le16 proto_reason_status;
+
+ /* set if known */
+ u8 bssid[ETH_ALEN];
+
+ /* see WMI_DISCONNECT_REASON */
+ u8 disconn_reason;
+
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/*
+ * BSS Info Event.
+ * Mechanism used to inform host of the presence and characteristic of
+ * wireless networks present. Consists of bss info header followed by
+ * the beacon or probe-response frame body. The 802.11 header is no included.
+ */
+enum wmi_bi_ftype {
+ BEACON_FTYPE = 0x1,
+ PROBERESP_FTYPE,
+ ACTION_MGMT_FTYPE,
+ PROBEREQ_FTYPE,
+};
+
+#define DEF_LRSSI_SCAN_PERIOD 5
+#define DEF_LRSSI_ROAM_THRESHOLD 20
+#define DEF_LRSSI_ROAM_FLOOR 60
+#define DEF_SCAN_FOR_ROAM_INTVL 2
+
+enum wmi_roam_ctrl {
+ WMI_FORCE_ROAM = 1,
+ WMI_SET_ROAM_MODE,
+ WMI_SET_HOST_BIAS,
+ WMI_SET_LRSSI_SCAN_PARAMS,
+};
+
+enum wmi_roam_mode {
+ WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+ WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+ WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
+struct bss_bias {
+ u8 bssid[ETH_ALEN];
+ s8 bias;
+} __packed;
+
+struct bss_bias_info {
+ u8 num_bss;
+ struct bss_bias bss_bias[0];
+} __packed;
+
+struct low_rssi_scan_params {
+ __le16 lrssi_scan_period;
+ a_sle16 lrssi_scan_threshold;
+ a_sle16 lrssi_roam_threshold;
+ u8 roam_rssi_floor;
+ u8 reserved[1];
+} __packed;
+
+struct roam_ctrl_cmd {
+ union {
+ u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+ u8 roam_mode; /* WMI_SET_ROAM_MODE */
+ struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+ struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+ */
+ } __packed info;
+ u8 roam_ctrl;
+} __packed;
+
+struct set_beacon_int_cmd {
+ __le32 beacon_intvl;
+} __packed;
+
+struct set_dtim_cmd {
+ __le32 dtim_period;
+} __packed;
+
+/* BSS INFO HDR version 2.0 */
+struct wmi_bss_info_hdr2 {
+ __le16 ch; /* frequency in MHz */
+
+ /* see, enum wmi_bi_ftype */
+ u8 frame_type;
+
+ u8 snr; /* note: rssi = snr - 95 dBm */
+ u8 bssid[ETH_ALEN];
+ __le16 ie_mask;
+} __packed;
+
+/* Command Error Event */
+enum wmi_error_code {
+ INVALID_PARAM = 0x01,
+ ILLEGAL_STATE = 0x02,
+ INTERNAL_ERROR = 0x03,
+};
+
+struct wmi_cmd_error_event {
+ __le16 cmd_id;
+ u8 err_code;
+} __packed;
+
+struct wmi_pstream_timeout_event {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+} __packed;
+
+/*
+ * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
+ * the host of BSS's it has found that matches the current profile.
+ * It can be used by the host to cache PMKs and/to initiate pre-authentication
+ * if the BSS supports it. The first bssid is always the current associated
+ * BSS.
+ * The bssid and bssFlags information repeats according to the number
+ * or APs reported.
+ */
+enum wmi_bss_flags {
+ WMI_DEFAULT_BSS_FLAGS = 0x00,
+ WMI_PREAUTH_CAPABLE_BSS = 0x01,
+ WMI_PMKID_VALID_BSS = 0x02,
+};
+
+struct wmi_neighbor_info {
+ u8 bssid[ETH_ALEN];
+ u8 bss_flags; /* enum wmi_bss_flags */
+} __packed;
+
+struct wmi_neighbor_report_event {
+ u8 num_neighbors;
+ struct wmi_neighbor_info neighbor[0];
+} __packed;
+
+/* TKIP MIC Error Event */
+struct wmi_tkip_micerr_event {
+ u8 key_id;
+ u8 is_mcast;
+} __packed;
+
+enum wmi_scan_status {
+ WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
+/* WMI_SCAN_COMPLETE_EVENTID */
+struct wmi_scan_complete_event {
+ a_sle32 status;
+} __packed;
+
+#define MAX_OPT_DATA_LEN 1400
+
+/*
+ * Special frame receive Event.
+ * Mechanism used to inform host of the receiption of the special frames.
+ * Consists of special frame info header followed by special frame body.
+ * The 802.11 header is not included.
+ */
+struct wmi_opt_rx_info_hdr {
+ __le16 ch;
+ u8 frame_type;
+ s8 snr;
+ u8 src_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* Reporting statistic */
+struct tx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 rts_success_cnt;
+ __le32 pkt_per_ac[4];
+ __le32 err_per_ac[4];
+
+ __le32 err;
+ __le32 fail_cnt;
+ __le32 retry_cnt;
+ __le32 mult_retry_cnt;
+ __le32 rts_fail_cnt;
+ a_sle32 ucast_rate;
+} __packed;
+
+struct rx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 frgment_pkt;
+
+ __le32 err;
+ __le32 crc_err;
+ __le32 key_cache_miss;
+ __le32 decrypt_err;
+ __le32 dupl_frame;
+ a_sle32 ucast_rate;
+} __packed;
+
+#define RATE_INDEX_WITHOUT_SGI_MASK 0x7f
+#define RATE_INDEX_MSB 0x80
+
+struct tkip_ccmp_stats {
+ __le32 tkip_local_mic_fail;
+ __le32 tkip_cnter_measures_invoked;
+ __le32 tkip_replays;
+ __le32 tkip_fmt_err;
+ __le32 ccmp_fmt_err;
+ __le32 ccmp_replays;
+} __packed;
+
+struct pm_stats {
+ __le32 pwr_save_failure_cnt;
+ __le16 stop_tx_failure_cnt;
+ __le16 atim_tx_failure_cnt;
+ __le16 atim_rx_failure_cnt;
+ __le16 bcn_rx_failure_cnt;
+} __packed;
+
+struct cserv_stats {
+ __le32 cs_bmiss_cnt;
+ __le32 cs_low_rssi_cnt;
+ __le16 cs_connect_cnt;
+ __le16 cs_discon_cnt;
+ a_sle16 cs_ave_beacon_rssi;
+ __le16 cs_roam_count;
+ a_sle16 cs_rssi;
+ u8 cs_snr;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+} __packed;
+
+struct wlan_net_stats {
+ struct tx_stats tx;
+ struct rx_stats rx;
+ struct tkip_ccmp_stats tkip_ccmp_stats;
+} __packed;
+
+struct arp_stats {
+ __le32 arp_received;
+ __le32 arp_matched;
+ __le32 arp_replied;
+} __packed;
+
+struct wlan_wow_stats {
+ __le32 wow_pkt_dropped;
+ __le16 wow_evt_discarded;
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+} __packed;
+
+struct wmi_target_stats {
+ __le32 lq_val;
+ a_sle32 noise_floor_calib;
+ struct pm_stats pm_stats;
+ struct wlan_net_stats stats;
+ struct wlan_wow_stats wow_stats;
+ struct arp_stats arp_stats;
+ struct cserv_stats cserv_stats;
+} __packed;
+
+/*
+ * WMI_RSSI_THRESHOLD_EVENTID.
+ * Indicate the RSSI events to host. Events are indicated when we breach a
+ * thresold value.
+ */
+enum wmi_rssi_threshold_val {
+ WMI_RSSI_THRESHOLD1_ABOVE = 0,
+ WMI_RSSI_THRESHOLD2_ABOVE,
+ WMI_RSSI_THRESHOLD3_ABOVE,
+ WMI_RSSI_THRESHOLD4_ABOVE,
+ WMI_RSSI_THRESHOLD5_ABOVE,
+ WMI_RSSI_THRESHOLD6_ABOVE,
+ WMI_RSSI_THRESHOLD1_BELOW,
+ WMI_RSSI_THRESHOLD2_BELOW,
+ WMI_RSSI_THRESHOLD3_BELOW,
+ WMI_RSSI_THRESHOLD4_BELOW,
+ WMI_RSSI_THRESHOLD5_BELOW,
+ WMI_RSSI_THRESHOLD6_BELOW
+};
+
+struct wmi_rssi_threshold_event {
+ a_sle16 rssi;
+ u8 range;
+} __packed;
+
+enum wmi_snr_threshold_val {
+ WMI_SNR_THRESHOLD1_ABOVE = 1,
+ WMI_SNR_THRESHOLD1_BELOW,
+ WMI_SNR_THRESHOLD2_ABOVE,
+ WMI_SNR_THRESHOLD2_BELOW,
+ WMI_SNR_THRESHOLD3_ABOVE,
+ WMI_SNR_THRESHOLD3_BELOW,
+ WMI_SNR_THRESHOLD4_ABOVE,
+ WMI_SNR_THRESHOLD4_BELOW
+};
+
+struct wmi_snr_threshold_event {
+ /* see, enum wmi_snr_threshold_val */
+ u8 range;
+
+ u8 snr;
+} __packed;
+
+/* WMI_REPORT_ROAM_TBL_EVENTID */
+#define MAX_ROAM_TBL_CAND 5
+
+struct wmi_bss_roam_info {
+ a_sle32 roam_util;
+ u8 bssid[ETH_ALEN];
+ s8 rssi;
+ s8 rssidt;
+ s8 last_rssi;
+ s8 util;
+ s8 bias;
+
+ /* for alignment */
+ u8 reserved;
+} __packed;
+
+struct wmi_target_roam_tbl {
+ __le16 roam_mode;
+ __le16 num_entries;
+ struct wmi_bss_roam_info info[];
+} __packed;
+
+/* WMI_CAC_EVENTID */
+enum cac_indication {
+ CAC_INDICATION_ADMISSION = 0x00,
+ CAC_INDICATION_ADMISSION_RESP = 0x01,
+ CAC_INDICATION_DELETE = 0x02,
+ CAC_INDICATION_NO_RESP = 0x03,
+};
+
+#define WMM_TSPEC_IE_LEN 63
+
+struct wmi_cac_event {
+ u8 ac;
+ u8 cac_indication;
+ u8 status_code;
+ u8 tspec_suggestion[WMM_TSPEC_IE_LEN];
+} __packed;
+
+/* WMI_APLIST_EVENTID */
+
+enum aplist_ver {
+ APLIST_VER1 = 1,
+};
+
+struct wmi_ap_info_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 channel;
+} __packed;
+
+union wmi_ap_info {
+ struct wmi_ap_info_v1 ap_info_v1;
+} __packed;
+
+struct wmi_aplist_event {
+ u8 ap_list_ver;
+ u8 num_ap;
+ union wmi_ap_info ap_list[1];
+} __packed;
+
+/* Developer Commands */
+
+/*
+ * WMI_SET_BITRATE_CMDID
+ *
+ * Get bit rate cmd uses same definition as set bit rate cmd
+ */
+enum wmi_bit_rate {
+ RATE_AUTO = -1,
+ RATE_1Mb = 0,
+ RATE_2Mb = 1,
+ RATE_5_5Mb = 2,
+ RATE_11Mb = 3,
+ RATE_6Mb = 4,
+ RATE_9Mb = 5,
+ RATE_12Mb = 6,
+ RATE_18Mb = 7,
+ RATE_24Mb = 8,
+ RATE_36Mb = 9,
+ RATE_48Mb = 10,
+ RATE_54Mb = 11,
+ RATE_MCS_0_20 = 12,
+ RATE_MCS_1_20 = 13,
+ RATE_MCS_2_20 = 14,
+ RATE_MCS_3_20 = 15,
+ RATE_MCS_4_20 = 16,
+ RATE_MCS_5_20 = 17,
+ RATE_MCS_6_20 = 18,
+ RATE_MCS_7_20 = 19,
+ RATE_MCS_0_40 = 20,
+ RATE_MCS_1_40 = 21,
+ RATE_MCS_2_40 = 22,
+ RATE_MCS_3_40 = 23,
+ RATE_MCS_4_40 = 24,
+ RATE_MCS_5_40 = 25,
+ RATE_MCS_6_40 = 26,
+ RATE_MCS_7_40 = 27,
+};
+
+struct wmi_bit_rate_reply {
+ /* see, enum wmi_bit_rate */
+ s8 rate_index;
+} __packed;
+
+/*
+ * WMI_SET_FIXRATES_CMDID
+ *
+ * Get fix rates cmd uses same definition as set fix rates cmd
+ */
+struct wmi_fix_rates_reply {
+ /* see wmi_bit_rate */
+ __le32 fix_rate_mask;
+} __packed;
+
+enum roam_data_type {
+ /* get the roam time data */
+ ROAM_DATA_TIME = 1,
+};
+
+struct wmi_target_roam_time {
+ __le32 disassoc_time;
+ __le32 no_txrx_time;
+ __le32 assoc_time;
+ __le32 allow_txrx_time;
+ u8 disassoc_bssid[ETH_ALEN];
+ s8 disassoc_bss_rssi;
+ u8 assoc_bssid[ETH_ALEN];
+ s8 assoc_bss_rssi;
+} __packed;
+
+enum wmi_txop_cfg {
+ WMI_TXOP_DISABLED = 0,
+ WMI_TXOP_ENABLED
+};
+
+struct wmi_set_wmm_txop_cmd {
+ u8 txop_enable;
+} __packed;
+
+struct wmi_set_keepalive_cmd {
+ u8 keep_alive_intvl;
+} __packed;
+
+struct wmi_get_keepalive_cmd {
+ __le32 configured;
+ u8 keep_alive_intvl;
+} __packed;
+
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+struct wmi_set_ie_cmd {
+ u8 ie_id;
+ u8 ie_field; /* enum wmi_ie_field_type */
+ u8 ie_len;
+ u8 reserved;
+ u8 ie_info[0];
+} __packed;
+
+/* Notify the WSC registration status to the target */
+#define WSC_REG_ACTIVE 1
+#define WSC_REG_INACTIVE 0
+
+#define WOW_MAX_FILTERS_PER_LIST 4
+#define WOW_PATTERN_SIZE 64
+
+#define MAC_MAX_FILTERS_PER_LIST 4
+
+struct wow_filter {
+ u8 wow_valid_filter;
+ u8 wow_filter_id;
+ u8 wow_filter_size;
+ u8 wow_filter_offset;
+ u8 wow_filter_mask[WOW_PATTERN_SIZE];
+ u8 wow_filter_pattern[WOW_PATTERN_SIZE];
+} __packed;
+
+#define MAX_IP_ADDRS 2
+
+struct wmi_set_ip_cmd {
+ /* IP in network byte order */
+ __be32 ips[MAX_IP_ADDRS];
+} __packed;
+
+enum ath6kl_wow_filters {
+ WOW_FILTER_SSID = BIT(1),
+ WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
+ WOW_FILTER_OPTION_EAP_REQ = BIT(3),
+ WOW_FILTER_OPTION_PATTERNS = BIT(4),
+ WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
+ WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
+ WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
+ WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
+ WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
+ WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
+ WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
+ WOW_FILTER_OPTION_TEST_MODE = BIT(15),
+};
+
+enum ath6kl_host_mode {
+ ATH6KL_HOST_MODE_AWAKE,
+ ATH6KL_HOST_MODE_ASLEEP,
+};
+
+struct wmi_set_host_sleep_mode_cmd {
+ __le32 awake;
+ __le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+ ATH6KL_WOW_MODE_DISABLE,
+ ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+ __le32 enable_wow;
+ __le32 filter;
+ __le16 host_req_delay;
+} __packed;
+
+struct wmi_add_wow_pattern_cmd {
+ u8 filter_list_id;
+ u8 filter_size;
+ u8 filter_offset;
+ u8 filter[0];
+} __packed;
+
+struct wmi_del_wow_pattern_cmd {
+ __le16 filter_list_id;
+ __le16 filter_id;
+} __packed;
+
+/* WMI_SET_TXE_NOTIFY_CMDID */
+struct wmi_txe_notify_cmd {
+ __le32 rate;
+ __le32 pkts;
+ __le32 intvl;
+} __packed;
+
+/* WMI_TXE_NOTIFY_EVENTID */
+struct wmi_txe_notify_event {
+ __le32 rate;
+ __le32 pkts;
+} __packed;
+
+/* WMI_SET_AKMP_PARAMS_CMD */
+
+struct wmi_pmkid {
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_GET_PMKID_LIST_CMD Reply */
+struct wmi_pmkid_list_reply {
+ __le32 num_pmkid;
+ u8 bssid_list[ETH_ALEN][1];
+ struct wmi_pmkid pmkid_list[1];
+} __packed;
+
+/* WMI_ADDBA_REQ_EVENTID */
+struct wmi_addba_req_event {
+ u8 tid;
+ u8 win_sz;
+ __le16 st_seq_no;
+
+ /* f/w response for ADDBA Req; OK (0) or failure (!=0) */
+ u8 status;
+} __packed;
+
+/* WMI_ADDBA_RESP_EVENTID */
+struct wmi_addba_resp_event {
+ u8 tid;
+
+ /* OK (0), failure (!=0) */
+ u8 status;
+
+ /* three values: not supported(0), 3839, 8k */
+ __le16 amsdu_sz;
+} __packed;
+
+/* WMI_DELBA_EVENTID
+ * f/w received a DELBA for peer and processed it.
+ * Host is notified of this
+ */
+struct wmi_delba_event {
+ u8 tid;
+ u8 is_peer_initiator;
+ __le16 reason_code;
+} __packed;
+
+#define PEER_NODE_JOIN_EVENT 0x00
+#define PEER_NODE_LEAVE_EVENT 0x01
+#define PEER_FIRST_NODE_JOIN_EVENT 0x10
+#define PEER_LAST_NODE_LEAVE_EVENT 0x11
+
+struct wmi_peer_node_event {
+ u8 event_code;
+ u8 peer_mac_addr[ETH_ALEN];
+} __packed;
+
+/* Transmit complete event data structure(s) */
+
+/* version 1 of tx complete msg */
+struct tx_complete_msg_v1 {
+#define TX_COMPLETE_STATUS_SUCCESS 0
+#define TX_COMPLETE_STATUS_RETRIES 1
+#define TX_COMPLETE_STATUS_NOLINK 2
+#define TX_COMPLETE_STATUS_TIMEOUT 3
+#define TX_COMPLETE_STATUS_OTHER 4
+
+ u8 status;
+
+ /* packet ID to identify parent packet */
+ u8 pkt_id;
+
+ /* rate index on successful transmission */
+ u8 rate_idx;
+
+ /* number of ACK failures in tx attempt */
+ u8 ack_failures;
+} __packed;
+
+struct wmi_tx_complete_event {
+ /* no of tx comp msgs following this struct */
+ u8 num_msg;
+
+ /* length in bytes for each individual msg following this struct */
+ u8 msg_len;
+
+ /* version of tx complete msg data following this struct */
+ u8 msg_type;
+
+ /* individual messages follow this header */
+ u8 reserved;
+} __packed;
+
+/*
+ * ------- AP Mode definitions --------------
+ */
+
+/*
+ * !!! Warning !!!
+ * -Changing the following values needs compilation of both driver and firmware
+ */
+#define AP_MAX_NUM_STA 10
+
+/* Spl. AID used to set DTIM flag in the beacons */
+#define MCAST_AID 0xFF
+
+#define DEF_AP_COUNTRY_CODE "US "
+
+/* Used with WMI_AP_SET_NUM_STA_CMDID */
+
+/*
+ * Used with WMI_AP_SET_MLME_CMDID
+ */
+
+/* MLME Commands */
+#define WMI_AP_MLME_ASSOC 1 /* associate station */
+#define WMI_AP_DISASSOC 2 /* disassociate station */
+#define WMI_AP_DEAUTH 3 /* deauthenticate station */
+#define WMI_AP_MLME_AUTHORIZE 4 /* authorize station */
+#define WMI_AP_MLME_UNAUTHORIZE 5 /* unauthorize station */
+
+struct wmi_ap_set_mlme_cmd {
+ u8 mac[ETH_ALEN];
+ __le16 reason; /* 802.11 reason code */
+ u8 cmd; /* operation to perform (WMI_AP_*) */
+} __packed;
+
+struct wmi_ap_set_pvb_cmd {
+ __le32 flag;
+ __le16 rsvd;
+ __le16 aid;
+} __packed;
+
+struct wmi_rx_frame_format_cmd {
+ /* version of meta data for rx packets <0 = default> (0-7 = valid) */
+ u8 meta_ver;
+
+ /*
+ * 1 == leave .11 header intact,
+ * 0 == replace .11 header with .3 <default>
+ */
+ u8 dot11_hdr;
+
+ /*
+ * 1 == defragmentation is performed by host,
+ * 0 == performed by target <default>
+ */
+ u8 defrag_on_host;
+
+ /* for alignment */
+ u8 reserved[1];
+} __packed;
+
+struct wmi_ap_hidden_ssid_cmd {
+ u8 hidden_ssid;
+} __packed;
+
+struct wmi_set_inact_period_cmd {
+ __le32 inact_period;
+ u8 num_null_func;
+} __packed;
+
+/* AP mode events */
+struct wmi_ap_set_apsd_cmd {
+ u8 enable;
+} __packed;
+
+enum wmi_ap_apsd_buffered_traffic_flags {
+ WMI_AP_APSD_NO_DELIVERY_FRAMES = 0x1,
+};
+
+struct wmi_ap_apsd_buffered_traffic_cmd {
+ __le16 aid;
+ __le16 bitmap;
+ __le32 flags;
+} __packed;
+
+/* WMI_PS_POLL_EVENT */
+struct wmi_pspoll_event {
+ __le16 aid;
+} __packed;
+
+struct wmi_per_sta_stat {
+ __le32 tx_bytes;
+ __le32 tx_pkts;
+ __le32 tx_error;
+ __le32 tx_discard;
+ __le32 rx_bytes;
+ __le32 rx_pkts;
+ __le32 rx_error;
+ __le32 rx_discard;
+ __le32 aid;
+} __packed;
+
+struct wmi_ap_mode_stat {
+ __le32 action;
+ struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1];
+} __packed;
+
+/* End of AP mode definitions */
+
+struct wmi_remain_on_chnl_cmd {
+ __le32 freq;
+ __le32 duration;
+} __packed;
+
+/* wmi_send_action_cmd is to be deprecated. Use
+ * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
+struct wmi_send_action_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_send_mgmt_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le32 no_cck;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_tx_status_event {
+ __le32 id;
+ u8 ack_status;
+} __packed;
+
+struct wmi_probe_req_report_cmd {
+ u8 enable;
+} __packed;
+
+struct wmi_disable_11b_rates_cmd {
+ u8 disable;
+} __packed;
+
+struct wmi_set_appie_extended_cmd {
+ u8 role_id;
+ u8 mgmt_frm_type;
+ u8 ie_len;
+ u8 ie_info[0];
+} __packed;
+
+struct wmi_remain_on_chnl_event {
+ __le32 freq;
+ __le32 duration;
+} __packed;
+
+struct wmi_cancel_remain_on_chnl_event {
+ __le32 freq;
+ __le32 duration;
+ u8 status;
+} __packed;
+
+struct wmi_rx_action_event {
+ __le32 freq;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_capabilities_event {
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_rx_probe_req_event {
+ __le32 freq;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
+#define P2P_FLAG_MACADDR_REQ (0x00000002)
+#define P2P_FLAG_HMODEL_REQ (0x00000002)
+
+struct wmi_get_p2p_info {
+ __le32 info_req_flags;
+} __packed;
+
+struct wmi_p2p_info_event {
+ __le32 info_req_flags;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+struct wmi_p2p_capabilities {
+ u8 go_power_save;
+} __packed;
+
+struct wmi_p2p_macaddr {
+ u8 mac_addr[ETH_ALEN];
+} __packed;
+
+struct wmi_p2p_hmodel {
+ u8 p2p_model;
+} __packed;
+
+struct wmi_p2p_probe_response_cmd {
+ __le32 freq;
+ u8 destination_addr[ETH_ALEN];
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/* Extended WMI (WMIX)
+ *
+ * Extended WMIX commands are encapsulated in a WMI message with
+ * cmd=WMI_EXTENSION_CMD.
+ *
+ * Extended WMI commands are those that are needed during wireless
+ * operation, but which are not really wireless commands. This allows,
+ * for instance, platform-specific commands. Extended WMI commands are
+ * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
+ * Extended WMI events are similarly embedded in a WMI event message with
+ * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
+ */
+struct wmix_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+enum wmix_command_id {
+ WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
+ WMIX_DSETDATA_REPLY_CMDID,
+ WMIX_GPIO_OUTPUT_SET_CMDID,
+ WMIX_GPIO_INPUT_GET_CMDID,
+ WMIX_GPIO_REGISTER_SET_CMDID,
+ WMIX_GPIO_REGISTER_GET_CMDID,
+ WMIX_GPIO_INTR_ACK_CMDID,
+ WMIX_HB_CHALLENGE_RESP_CMDID,
+ WMIX_DBGLOG_CFG_MODULE_CMDID,
+ WMIX_PROF_CFG_CMDID, /* 0x200a */
+ WMIX_PROF_ADDR_SET_CMDID,
+ WMIX_PROF_START_CMDID,
+ WMIX_PROF_STOP_CMDID,
+ WMIX_PROF_COUNT_GET_CMDID,
+};
+
+enum wmix_event_id {
+ WMIX_DSETOPENREQ_EVENTID = 0x3001,
+ WMIX_DSETCLOSE_EVENTID,
+ WMIX_DSETDATAREQ_EVENTID,
+ WMIX_GPIO_INTR_EVENTID,
+ WMIX_GPIO_DATA_EVENTID,
+ WMIX_GPIO_ACK_EVENTID,
+ WMIX_HB_CHALLENGE_RESP_EVENTID,
+ WMIX_DBGLOG_EVENTID,
+ WMIX_PROF_COUNT_EVENTID,
+};
+
+/*
+ * ------Error Detection support-------
+ */
+
+/*
+ * WMIX_HB_CHALLENGE_RESP_CMDID
+ * Heartbeat Challenge Response command
+ */
+struct wmix_hb_challenge_resp_cmd {
+ __le32 cookie;
+ __le32 source;
+} __packed;
+
+struct ath6kl_wmix_dbglog_cfg_module_cmd {
+ __le32 valid;
+ __le32 config;
+} __packed;
+
+/* End of Extended WMI (WMIX) */
+
+enum wmi_sync_flag {
+ NO_SYNC_WMIFLAG = 0,
+
+ /* transmit all queued data before cmd */
+ SYNC_BEFORE_WMIFLAG,
+
+ /* any new data waits until cmd execs */
+ SYNC_AFTER_WMIFLAG,
+
+ SYNC_BOTH_WMIFLAG,
+
+ /* end marker */
+ END_WMIFLAG
+};
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, u32 flags,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info, u8 if_idx);
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb, u32 layer2_priority,
+ bool wmm_enabled, u8 *ac);
+
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck,
+ u32 *rates);
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid);
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+ u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid);
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
+ u16 listen_beacons);
+int ath6kl_wmi_bmisstime_cmd(struct wmi *wmi, u8 if_idx,
+ u16 bmiss_time, u16 num_beacons);
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy);
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
+ struct wmi_create_pstream_cmd *pstream);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+ u8 preamble_policy);
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
+ const u8 *pmkid, bool set);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl);
+int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
+ enum ieee80211_band band,
+ struct ath6kl_htcap *htcap);
+int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
+
+s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index);
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
+ __be32 ips0, __be32 ips1);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, const u8 *filter,
+ const u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id);
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
+int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_interval);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
+int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
+int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
+ u8 *filter, bool add_filter);
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+ u32 rate, u32 pkts, u32 intvl);
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2);
+
+/* AP mode uAPSD */
+int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
+
+int ath6kl_wmi_set_apsd_bfrd_traf(struct wmi *wmi,
+ u8 if_idx, u16 aid,
+ u16 bitmap, u32 flags);
+
+u8 ath6kl_wmi_get_traffic_class(u8 user_priority);
+
+u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri);
+/* AP mode */
+int ath6kl_wmi_ap_hidden_ssid(struct wmi *wmi, u8 if_idx, bool enable);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p);
+
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+ const u8 *mac, u16 reason);
+
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_version,
+ bool rx_dot11_hdr, bool defrag_on_host);
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
+
+int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
+ const u8 *ie_info, u8 ie_len);
+
+/* P2P */
+int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
+
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ u32 dur);
+
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck);
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len);
+
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
+
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
+
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
+
+int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
+
+void ath6kl_wmi_sscan_timer(unsigned long ptr);
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
+void *ath6kl_wmi_init(struct ath6kl *devt);
+void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
new file mode 100644
index 000000000..fee0cadb0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -0,0 +1,178 @@
+config ATH9K_HW
+ tristate
+config ATH9K_COMMON
+ tristate
+ select ATH_COMMON
+ select DEBUG_FS
+ select RELAY
+config ATH9K_DFS_DEBUGFS
+ def_bool y
+ depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
+
+config ATH9K_BTCOEX_SUPPORT
+ bool "Atheros bluetooth coexistence support"
+ depends on (ATH9K || ATH9K_HTC)
+ default y
+ ---help---
+ Say Y, if you want to use the ath9k/ath9k_htc radios together with
+ Bluetooth modules in the same system.
+
+config ATH9K
+ tristate "Atheros 802.11n wireless cards support"
+ depends on MAC80211 && HAS_DMA
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
+ of chipsets. For a specific list of supported external
+ cards, laptops that already ship with these cards and
+ APs that come with these cards refer to ath9k wiki
+ products page:
+
+ http://wireless.kernel.org/en/users/Drivers/ath9k/products
+
+ If you choose to build a module, it'll be called ath9k.
+
+config ATH9K_PCI
+ bool "Atheros ath9k PCI/PCIe bus support"
+ default y
+ depends on ATH9K && PCI
+ ---help---
+ This option enables the PCI bus support in ath9k.
+
+ Say Y, if you have a compatible PCI/PCIe wireless card.
+
+config ATH9K_AHB
+ bool "Atheros ath9k AHB bus support"
+ depends on ATH9K
+ default n
+ ---help---
+ This option enables the AHB bus support in ath9k.
+
+ Say Y, if you have a SoC with a compatible built-in
+ wireless MAC. Say N if unsure.
+
+config ATH9K_DEBUGFS
+ bool "Atheros ath9k debugging"
+ depends on ATH9K && DEBUG_FS
+ select MAC80211_DEBUGFS
+ select RELAY
+ ---help---
+ Say Y, if you need access to ath9k's statistics for
+ interrupts, rate control, etc.
+
+ Also required for changing debug message flags at run time.
+
+config ATH9K_STATION_STATISTICS
+ bool "Detailed station statistics"
+ depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
+ select MAC80211_DEBUGFS
+ default n
+ ---help---
+ This option enables detailed statistics for association stations.
+
+config ATH9K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH9K && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath9k. There is no way to dynamically detect if a card was DFS
+ certified and as such this is left as a build time option. This
+ option should only be enabled by system integrators that can
+ guarantee that all the platforms that their kernel will run on
+ have obtained appropriate regulatory body certification for a
+ respective Atheros card by using ath9k on the target shipping
+ platforms.
+
+ This is currently only a placeholder for future DFS support,
+ as DFS support requires more components that still need to be
+ developed. At this point enabling this option won't do anything
+ except increase code size.
+
+config ATH9K_DYNACK
+ bool "Atheros ath9k ACK timeout estimation algorithm (EXPERIMENTAL)"
+ depends on ATH9K
+ default n
+ ---help---
+ This option enables ath9k dynamic ACK timeout estimation algorithm
+ based on ACK frame RX timestamp, TX frame timestamp and frame
+ duration
+
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
+config ATH9K_WOW
+ bool "Wake on Wireless LAN support (EXPERIMENTAL)"
+ depends on ATH9K && PM
+ default n
+ ---help---
+ This option enables Wake on Wireless LAN support for certain cards.
+ Currently, AR9462 is supported.
+
+config ATH9K_RFKILL
+ bool "Atheros ath9k rfkill support" if EXPERT
+ depends on ATH9K
+ depends on RFKILL=y || RFKILL=ATH9K
+ default y
+ help
+ Say Y to have ath9k poll the RF-Kill GPIO every couple of
+ seconds. Turn off to save power, but enable it if you have
+ a platform that can toggle the RF-Kill GPIO.
+
+config ATH9K_CHANNEL_CONTEXT
+ bool "Channel Context support"
+ depends on ATH9K
+ default n
+ ---help---
+ This option enables channel context support in ath9k, which is needed
+ for multi-channel concurrency. Enable this if P2P PowerSave support
+ is required.
+
+config ATH9K_PCOEM
+ bool "Atheros ath9k support for PC OEM cards" if EXPERT
+ depends on ATH9K
+ default y
+
+config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
+
+config ATH9K_HTC_DEBUGFS
+ bool "Atheros ath9k_htc debugging"
+ depends on ATH9K_HTC && DEBUG_FS
+ ---help---
+ Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
new file mode 100644
index 000000000..ecda613c2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -0,0 +1,76 @@
+ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
+ main.o \
+ recv.o \
+ xmit.o \
+ link.o \
+ antenna.o \
+ channel.o
+
+ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
+ath9k-$(CONFIG_ATH9K_PCI) += pci.o
+ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
+ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
+ath9k-$(CONFIG_ATH9K_WOW) += wow.o
+
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+
+ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
+
+obj-$(CONFIG_ATH9K) += ath9k.o
+
+ath9k_hw-y:= \
+ ar9002_hw.o \
+ ar9003_hw.o \
+ hw.o \
+ ar9003_phy.o \
+ ar9002_phy.o \
+ ar5008_phy.o \
+ ar9002_calib.o \
+ ar9003_calib.o \
+ calib.o \
+ eeprom.o \
+ eeprom_def.o \
+ eeprom_4k.o \
+ eeprom_9287.o \
+ ani.o \
+ mac.o \
+ ar9002_mac.o \
+ ar9003_mac.o \
+ ar9003_eeprom.o \
+ ar9003_paprd.o
+
+ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
+
+ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
+ ar9003_mci.o \
+ ar9003_aic.o
+
+ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
+
+ath9k_hw-$(CONFIG_ATH9K_DYNACK) += dynack.o
+
+obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
+
+obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
+ath9k_common-y:= common.o \
+ common-init.o \
+ common-beacon.o \
+ common-debug.o \
+ common-spectral.o
+
+ath9k_htc-y += htc_hst.o \
+ hif_usb.o \
+ wmi.o \
+ htc_drv_txrx.o \
+ htc_drv_main.o \
+ htc_drv_beacon.o \
+ htc_drv_init.o \
+ htc_drv_gpio.o
+
+ath9k_htc-$(CONFIG_ATH9K_HTC_DEBUGFS) += htc_drv_debug.o
+
+obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
new file mode 100644
index 000000000..bd4a1a655
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/platform_device.h>
+#include <linux/ath9k_platform.h>
+#include <linux/module.h>
+#include "ath9k.h"
+
+static const struct platform_device_id ath9k_platform_id_table[] = {
+ {
+ .name = "ath9k",
+ .driver_data = AR5416_AR9100_DEVID,
+ },
+ {
+ .name = "ar933x_wmac",
+ .driver_data = AR9300_DEVID_AR9330,
+ },
+ {
+ .name = "ar934x_wmac",
+ .driver_data = AR9300_DEVID_AR9340,
+ },
+ {
+ .name = "qca955x_wmac",
+ .driver_data = AR9300_DEVID_QCA955X,
+ },
+ {
+ .name = "qca953x_wmac",
+ .driver_data = AR9300_DEVID_AR953X,
+ },
+ {
+ .name = "qca956x_wmac",
+ .driver_data = AR9300_DEVID_QCA956X,
+ },
+ {},
+};
+
+/* return bus cachesize in 4B word units */
+static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_softc *sc = (struct ath_softc *)common->priv;
+ struct platform_device *pdev = to_platform_device(sc->dev);
+ struct ath9k_platform_data *pdata;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
+ ath_err(common,
+ "%s: flash read failed, offset %08x is out of range\n",
+ __func__, off);
+ return false;
+ }
+
+ *data = pdata->eeprom_data[off];
+ return true;
+}
+
+static struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
+ .read_cachesize = ath_ahb_read_cachesize,
+ .eeprom_read = ath_ahb_eeprom_read,
+};
+
+static int ath_ahb_probe(struct platform_device *pdev)
+{
+ void __iomem *mem;
+ struct ath_softc *sc;
+ struct ieee80211_hw *hw;
+ struct resource *res;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ int irq;
+ int ret = 0;
+ struct ath_hw *ah;
+ char hw_name[64];
+
+ if (!dev_get_platdata(&pdev->dev)) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource found\n");
+ return -ENXIO;
+ }
+
+ mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ irq = res->start;
+
+ ath9k_fill_chanctx_ops();
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ return -ENOMEM;
+ }
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ platform_set_drvdata(pdev, hw);
+
+ sc = hw->priv;
+ sc->hw = hw;
+ sc->dev = &pdev->dev;
+ sc->mem = mem;
+ sc->irq = irq;
+
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_free_hw;
+ }
+
+ ret = ath9k_init_device(id->driver_data, sc, &ath_ahb_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
+ }
+
+ ah = sc->sc_ah;
+ ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+ hw_name, (unsigned long)mem, irq);
+
+ return 0;
+
+ err_irq:
+ free_irq(irq, sc);
+ err_free_hw:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+static int ath_ahb_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+
+ if (hw) {
+ struct ath_softc *sc = hw->priv;
+
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ath_ahb_driver = {
+ .probe = ath_ahb_probe,
+ .remove = ath_ahb_remove,
+ .driver = {
+ .name = "ath9k",
+ },
+ .id_table = ath9k_platform_id_table,
+};
+
+MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table);
+
+int ath_ahb_init(void)
+{
+ return platform_driver_register(&ath_ahb_driver);
+}
+
+void ath_ahb_exit(void)
+{
+ platform_driver_unregister(&ath_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
new file mode 100644
index 000000000..25e45e4d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include "hw.h"
+#include "hw-ops.h"
+
+struct ani_ofdm_level_entry {
+ int spur_immunity_level;
+ int fir_step_level;
+ int ofdm_weak_signal_on;
+};
+
+/* values here are relative to the INI */
+
+/*
+ * Legend:
+ *
+ * SI: Spur immunity
+ * FS: FIR Step
+ * WS: OFDM / CCK Weak Signal detection
+ * MRC-CCK: Maximal Ratio Combining for CCK
+ */
+
+static const struct ani_ofdm_level_entry ofdm_level_table[] = {
+ /* SI FS WS */
+ { 0, 0, 1 }, /* lvl 0 */
+ { 1, 1, 1 }, /* lvl 1 */
+ { 2, 2, 1 }, /* lvl 2 */
+ { 3, 2, 1 }, /* lvl 3 (default) */
+ { 4, 3, 1 }, /* lvl 4 */
+ { 5, 4, 1 }, /* lvl 5 */
+ { 6, 5, 1 }, /* lvl 6 */
+ { 7, 6, 1 }, /* lvl 7 */
+ { 7, 7, 1 }, /* lvl 8 */
+ { 7, 8, 0 } /* lvl 9 */
+};
+#define ATH9K_ANI_OFDM_NUM_LEVEL \
+ ARRAY_SIZE(ofdm_level_table)
+#define ATH9K_ANI_OFDM_MAX_LEVEL \
+ (ATH9K_ANI_OFDM_NUM_LEVEL-1)
+#define ATH9K_ANI_OFDM_DEF_LEVEL \
+ 3 /* default level - matches the INI settings */
+
+/*
+ * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm.
+ * With OFDM for single stream you just add up all antenna inputs, you're
+ * only interested in what you get after FFT. Signal aligment is also not
+ * required for OFDM because any phase difference adds up in the frequency
+ * domain.
+ *
+ * MRC requires extra work for use with CCK. You need to align the antenna
+ * signals from the different antenna before you can add the signals together.
+ * You need aligment of signals as CCK is in time domain, so addition can cancel
+ * your signal completely if phase is 180 degrees (think of adding sine waves).
+ * You also need to remove noise before the addition and this is where ANI
+ * MRC CCK comes into play. One of the antenna inputs may be stronger but
+ * lower SNR, so just adding after alignment can be dangerous.
+ *
+ * Regardless of alignment in time, the antenna signals add constructively after
+ * FFT and improve your reception. For more information:
+ *
+ * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ */
+
+struct ani_cck_level_entry {
+ int fir_step_level;
+ int mrc_cck_on;
+};
+
+static const struct ani_cck_level_entry cck_level_table[] = {
+ /* FS MRC-CCK */
+ { 0, 1 }, /* lvl 0 */
+ { 1, 1 }, /* lvl 1 */
+ { 2, 1 }, /* lvl 2 (default) */
+ { 3, 1 }, /* lvl 3 */
+ { 4, 0 }, /* lvl 4 */
+ { 5, 0 }, /* lvl 5 */
+ { 6, 0 }, /* lvl 6 */
+ { 7, 0 }, /* lvl 7 (only for high rssi) */
+ { 8, 0 } /* lvl 8 (only for high rssi) */
+};
+
+#define ATH9K_ANI_CCK_NUM_LEVEL \
+ ARRAY_SIZE(cck_level_table)
+#define ATH9K_ANI_CCK_MAX_LEVEL \
+ (ATH9K_ANI_CCK_NUM_LEVEL-1)
+#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
+ (ATH9K_ANI_CCK_NUM_LEVEL-3)
+#define ATH9K_ANI_CCK_DEF_LEVEL \
+ 2 /* default level - matches the INI settings */
+
+static void ath9k_hw_update_mibstats(struct ath_hw *ah,
+ struct ath9k_mib_stats *stats)
+{
+ u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
+ AR_FCS_FAIL, AR_BEACON_CNT};
+ u32 data[5];
+
+ REG_READ_MULTI(ah, &addr[0], &data[0], 5);
+ /* AR_RTS_OK */
+ stats->rts_good += data[0];
+ /* AR_RTS_FAIL */
+ stats->rts_bad += data[1];
+ /* AR_ACK_FAIL */
+ stats->ackrcv_bad += data[2];
+ /* AR_FCS_FAIL */
+ stats->fcs_bad += data[3];
+ /* AR_BEACON_CNT */
+ stats->beacons += data[4];
+}
+
+static void ath9k_ani_restart(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!ah->curchan)
+ return;
+
+ aniState = &ah->ani;
+ aniState->listenTime = 0;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ aniState->ofdmPhyErrCount = 0;
+ aniState->cckPhyErrCount = 0;
+}
+
+/* Adjust the OFDM Noise Immunity Level */
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
+ bool scan)
+{
+ struct ar5416AniState *aniState = &ah->ani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+ bool weak_sig;
+
+ ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->ofdmNoiseImmunityLevel,
+ immunityLevel, BEACON_RSSI(ah),
+ ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
+
+ if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
+ immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+
+ if (!scan)
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ entry_ofdm->spur_immunity_level);
+
+ if (aniState->firstepLevel != entry_ofdm->fir_step_level &&
+ entry_ofdm->fir_step_level >= entry_cck->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_ofdm->fir_step_level);
+
+ weak_sig = entry_ofdm->ofdm_weak_signal_on;
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
+ weak_sig = true;
+ /*
+ * Newer chipsets are better at dealing with high PHY error counts -
+ * keep weak signal detection enabled when no RSSI threshold is
+ * available to determine if it is needed (mode != STA)
+ */
+ else if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_STATION)
+ weak_sig = true;
+
+ /* Older chipsets are more sensitive to high PHY error counts */
+ else if (!AR_SREV_9300_20_OR_LATER(ah) &&
+ aniState->ofdmNoiseImmunityLevel >= 8)
+ weak_sig = false;
+
+ if (aniState->ofdmWeakSigDetect != weak_sig)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ weak_sig);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return;
+
+ if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
+ }
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!ah->curchan)
+ return;
+
+ aniState = &ah->ani;
+
+ if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
+}
+
+/*
+ * Set the ANI settings to match an CCK level.
+ */
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
+ bool scan)
+{
+ struct ar5416AniState *aniState = &ah->ani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->cckNoiseImmunityLevel, immunityLevel,
+ BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
+
+ if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
+ immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
+ immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
+ immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
+
+ if (!scan)
+ aniState->cckNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->firstepLevel != entry_cck->fir_step_level &&
+ entry_cck->fir_step_level >= entry_ofdm->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_cck->fir_step_level);
+
+ /* Skip MRC CCK for pre AR9003 families */
+ if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) ||
+ AR_SREV_9565(ah) || AR_SREV_9561(ah))
+ return;
+
+ if (aniState->mrcCCK != entry_cck->mrc_cck_on)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_MRC_CCK,
+ entry_cck->mrc_cck_on);
+}
+
+static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!ah->curchan)
+ return;
+
+ aniState = &ah->ani;
+
+ if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
+ false);
+}
+
+/*
+ * only lower either OFDM or CCK errors per turn
+ * we lower the other one next time
+ */
+static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ aniState = &ah->ani;
+
+ /* lower OFDM noise immunity */
+ if (aniState->ofdmNoiseImmunityLevel > 0 &&
+ (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1,
+ false);
+ return;
+ }
+
+ /* lower CCK noise immunity */
+ if (aniState->cckNoiseImmunityLevel > 0)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1,
+ false);
+}
+
+/*
+ * Restore the ANI parameters in the HAL and reset the statistics.
+ * This routine should be called for every hardware reset and for
+ * every channel change.
+ */
+void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
+{
+ struct ar5416AniState *aniState = &ah->ani;
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ofdm_nil, cck_nil;
+
+ if (!ah->curchan)
+ return;
+
+ BUG_ON(aniState == NULL);
+ ah->stats.ast_ani_reset++;
+
+ ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
+ aniState->ofdmNoiseImmunityLevel);
+ cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
+ aniState->cckNoiseImmunityLevel);
+
+ if (is_scanning ||
+ (ah->opmode != NL80211_IFTYPE_STATION &&
+ ah->opmode != NL80211_IFTYPE_ADHOC)) {
+ /*
+ * If we're scanning or in AP mode, the defaults (ini)
+ * should be in place. For an AP we assume the historical
+ * levels for this channel are probably outdated so start
+ * from defaults instead.
+ */
+ if (aniState->ofdmNoiseImmunityLevel !=
+ ATH9K_ANI_OFDM_DEF_LEVEL ||
+ aniState->cckNoiseImmunityLevel !=
+ ATH9K_ANI_CCK_DEF_LEVEL) {
+ ath_dbg(common, ANI,
+ "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+
+ ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL;
+ cck_nil = ATH9K_ANI_CCK_DEF_LEVEL;
+ }
+ } else {
+ /*
+ * restore historical levels for this channel
+ */
+ ath_dbg(common, ANI,
+ "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+ }
+ ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
+ ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
+
+ ath9k_ani_restart(ah);
+}
+
+static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar5416AniState *aniState = &ah->ani;
+ u32 phyCnt1, phyCnt2;
+ int32_t listenTime;
+
+ ath_hw_cycle_counters_update(common);
+ listenTime = ath_hw_get_listen_time(common);
+
+ if (listenTime <= 0) {
+ ah->stats.ast_ani_lneg_or_lzero++;
+ ath9k_ani_restart(ah);
+ return false;
+ }
+
+ aniState->listenTime += listenTime;
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = phyCnt1;
+
+ ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = phyCnt2;
+
+ return true;
+}
+
+void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 ofdmPhyErrRate, cckPhyErrRate;
+
+ if (!ah->curchan)
+ return;
+
+ aniState = &ah->ani;
+ if (!ath9k_hw_ani_read_counters(ah))
+ return;
+
+ ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
+ aniState->listenTime;
+ cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
+ aniState->listenTime;
+
+ ath_dbg(common, ANI,
+ "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate, aniState->ofdmsTurn);
+
+ if (aniState->listenTime > ah->aniperiod) {
+ if (cckPhyErrRate < ah->config.cck_trig_low &&
+ ofdmPhyErrRate < ah->config.ofdm_trig_low) {
+ ath9k_hw_ani_lower_immunity(ah);
+ aniState->ofdmsTurn = !aniState->ofdmsTurn;
+ } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) {
+ ath9k_hw_ani_ofdm_err_trigger(ah);
+ aniState->ofdmsTurn = false;
+ } else if (cckPhyErrRate > ah->config.cck_trig_high) {
+ ath9k_hw_ani_cck_err_trigger(ah);
+ aniState->ofdmsTurn = true;
+ }
+ ath9k_ani_restart(ah);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_ani_monitor);
+
+void ath9k_enable_mib_counters(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_dbg(common, ANI, "Enable MIB counters\n");
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+ REG_WRITE(ah, AR_MIBC,
+ ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
+ & 0x0f);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+/* Freeze the MIB counters, get the stats and then clear them */
+void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_dbg(common, ANI, "Disable MIB counters\n");
+
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC);
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
+
+void ath9k_hw_ani_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar5416AniState *ani = &ah->ani;
+
+ ath_dbg(common, ANI, "Initialize ANI\n");
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+ }
+
+ ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
+ ani->ofdmsTurn = true;
+ ani->ofdmWeakSigDetect = true;
+ ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+ ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+
+ /*
+ * since we expect some ongoing maintenance on the tables, let's sanity
+ * check here default level should not modify INI setting.
+ */
+ ah->aniperiod = ATH9K_ANI_PERIOD;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
+
+ ath9k_ani_restart(ah);
+ ath9k_enable_mib_counters(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
new file mode 100644
index 000000000..c40965b4c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ANI_H
+#define ANI_H
+
+#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
+
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
+
+#define ATH9K_ANI_OFDM_TRIG_LOW 400
+#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
+
+#define ATH9K_ANI_CCK_TRIG_HIGH 600
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
+#define ATH9K_ANI_CCK_TRIG_LOW 300
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
+
+#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
+#define ATH9K_ANI_FIRSTEP_LVL 2
+
+#define ATH9K_ANI_RSSI_THR_HIGH 40
+#define ATH9K_ANI_RSSI_THR_LOW 7
+
+#define ATH9K_ANI_PERIOD 300
+
+/* in ms */
+#define ATH9K_ANI_POLLINTERVAL 1000
+
+#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
+#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
+#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
+#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
+
+/* values here are relative to the INI */
+
+enum ath9k_ani_cmd {
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1,
+ ATH9K_ANI_FIRSTEP_LEVEL = 0x2,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4,
+ ATH9K_ANI_MRC_CCK = 0x8,
+ ATH9K_ANI_ALL = 0xfff
+};
+
+struct ath9k_mib_stats {
+ u32 ackrcv_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 beacons;
+};
+
+/* INI default values for ANI registers */
+struct ath9k_ani_default {
+ u16 m1ThreshLow;
+ u16 m2ThreshLow;
+ u16 m1Thresh;
+ u16 m2Thresh;
+ u16 m2CountThr;
+ u16 m2CountThrLow;
+ u16 m1ThreshLowExt;
+ u16 m2ThreshLowExt;
+ u16 m1ThreshExt;
+ u16 m2ThreshExt;
+ u16 firstep;
+ u16 firstepLow;
+ u16 cycpwrThr1;
+ u16 cycpwrThr1Ext;
+};
+
+struct ar5416AniState {
+ u8 noiseImmunityLevel;
+ u8 ofdmNoiseImmunityLevel;
+ u8 cckNoiseImmunityLevel;
+ bool ofdmsTurn;
+ u8 mrcCCK;
+ u8 spurImmunityLevel;
+ u8 firstepLevel;
+ bool ofdmWeakSigDetect;
+ u32 listenTime;
+ u32 ofdmPhyErrCount;
+ u32 cckPhyErrCount;
+ struct ath9k_ani_default iniDef;
+};
+
+struct ar5416Stats {
+ u32 ast_ani_spurup;
+ u32 ast_ani_spurdown;
+ u32 ast_ani_ofdmon;
+ u32 ast_ani_ofdmoff;
+ u32 ast_ani_cckhigh;
+ u32 ast_ani_ccklow;
+ u32 ast_ani_stepup;
+ u32 ast_ani_stepdown;
+ u32 ast_ani_ofdmerrs;
+ u32 ast_ani_cckerrs;
+ u32 ast_ani_reset;
+ u32 ast_ani_lneg_or_lzero;
+ u32 avgbrssi;
+ struct ath9k_mib_stats ast_mibstats;
+};
+#define ah_mibStats stats.ast_mibstats
+
+void ath9k_enable_mib_counters(struct ath_hw *ah);
+void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
+void ath9k_hw_ani_init(struct ath_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
new file mode 100644
index 000000000..a3668433d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*
+ * AR9285
+ * ======
+ *
+ * EEPROM has 2 4-bit fields containing the card configuration.
+ *
+ * antdiv_ctl1:
+ * ------------
+ * bb_enable_ant_div_lnadiv : 1
+ * bb_ant_div_alt_gaintb : 1
+ * bb_ant_div_main_gaintb : 1
+ * bb_enable_ant_fast_div : 1
+ *
+ * antdiv_ctl2:
+ * -----------
+ * bb_ant_div_alt_lnaconf : 2
+ * bb_ant_div_main_lnaconf : 2
+ *
+ * The EEPROM bits are used as follows:
+ * ------------------------------------
+ *
+ * bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0
+ * 1 -> Antenna config Alt/Main uses gaintable 1
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_enable_ant_fast_div - Enable fast antenna diversity.
+ * Set in AR_PHY_CCK_DETECT.
+ *
+ * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ * 10=LNA1
+ * 01=LNA2
+ * 11=LNA1+LNA2
+ * 00=LNA1-LNA2
+ *
+ * AR9485 / AR9565 / AR9331
+ * ========================
+ *
+ * The same bits are present in the EEPROM, but the location in the
+ * EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
+ *
+ * ant_div_alt_lnaconf ==> bit 0~1
+ * ant_div_main_lnaconf ==> bit 2~3
+ * ant_div_alt_gaintb ==> bit 4
+ * ant_div_main_gaintb ==> bit 5
+ * enable_ant_div_lnadiv ==> bit 6
+ * enable_ant_fast_div ==> bit 7
+ */
+
+static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
+ int alt_ratio, int maxdelta,
+ int mindelta, int main_rssi_avg,
+ int alt_rssi_avg, int pkt_count)
+{
+ if (pkt_count <= 50)
+ return false;
+
+ if (alt_rssi_avg > main_rssi_avg + mindelta)
+ return true;
+
+ if (alt_ratio >= antcomb->ant_ratio2 &&
+ alt_rssi_avg >= antcomb->low_rssi_thresh &&
+ (alt_rssi_avg > main_rssi_avg + maxdelta))
+ return true;
+
+ return false;
+}
+
+static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg)
+{
+ bool result, set1, set2;
+
+ result = set1 = set2 = false;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ set1 = true;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ set2 = true;
+
+ switch (conf->div_group) {
+ case 0:
+ if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
+ result = true;
+ break;
+ case 1:
+ case 2:
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
+ (alt_ratio > antcomb->ant_ratio))
+ result = true;
+
+ break;
+ case 3:
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
+ (alt_ratio > antcomb->ant_ratio))
+ result = true;
+
+ break;
+ }
+
+ return result;
+}
+
+static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf ant_conf,
+ int main_rssi_avg)
+{
+ antcomb->quick_scan_cnt = 0;
+
+ if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+
+ switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
+ case 0x10: /* LNA2 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x20: /* LNA1 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x23: /* LNA1 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf)
+{
+ /* set alt to the conf with maximun ratio */
+ if (antcomb->first_ratio && antcomb->second_ratio) {
+ if (antcomb->rssi_second > antcomb->rssi_third) {
+ /* first alt*/
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2*/
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ } else {
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ }
+ } else if (antcomb->first_ratio) {
+ /* first alt */
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ } else if (antcomb->second_ratio) {
+ /* second alt */
+ if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ } else {
+ /* main is largest */
+ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->main_conf;
+ }
+}
+
+static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg,
+ int alt_ratio)
+{
+ /* alt_good */
+ switch (antcomb->quick_scan_cnt) {
+ case 0:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ break;
+ case 1:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_second = alt_rssi_avg;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ /* main is LNA1 */
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else {
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ }
+ break;
+ case 2:
+ antcomb->alt_good = false;
+ antcomb->scan_not_start = false;
+ antcomb->scan = false;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_third = alt_rssi_avg;
+
+ switch(antcomb->second_quick_scan_conf) {
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ break;
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+ break;
+ default:
+ break;
+ }
+
+ if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
+ div_ant_conf->lna1_lna2_switch_delta)
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else {
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ }
+
+ ath_ant_set_alt_ratio(antcomb, div_ant_conf);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio)
+{
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+
+ if (ant_conf->div_group == 0) {
+ /* Adjust the fast_div_bias based on main and alt lna conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x20: /* LNA1 A-B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 1) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 2) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ default:
+ break;
+ }
+
+ if (antcomb->fast_div_bias)
+ ant_conf->fast_div_bias = antcomb->fast_div_bias;
+ } else if (ant_conf->div_group == 3) {
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x39;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x3f;
+ break;
+ case 0x13: /* LNA2 A+B */
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x20: /* LNA1 A-B */
+ ant_conf->fast_div_bias = 0x3;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x3;
+ break;
+ case 0x23: /* LNA1 A+B */
+ ant_conf->fast_div_bias = 0x3;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf,
+ int curr_alt_set, int alt_rssi_avg,
+ int main_rssi_avg)
+{
+ switch (curr_alt_set) {
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ antcomb->rssi_lna1 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ antcomb->rssi_lna2 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ antcomb->rssi_add = alt_rssi_avg;
+ antcomb->scan = true;
+ /* set to A-B */
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+ antcomb->rssi_sub = alt_rssi_avg;
+ antcomb->scan = false;
+ if (antcomb->rssi_lna2 >
+ (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
+ /* use LNA2 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA1 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ }
+ } else {
+ /* use LNA1 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA2 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg, int curr_main_set,
+ int curr_alt_set)
+{
+ bool ret = false;
+
+ if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg)) {
+ if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+ /*
+ * Switch main and alt LNA.
+ */
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+
+ ret = true;
+ } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+ /*
+ Set alt to another LNA.
+ */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
+{
+ int alt_ratio;
+
+ if (!antcomb->scan || !antcomb->alt_good)
+ return false;
+
+ if (time_after(jiffies, antcomb->scan_start_time +
+ msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+ return true;
+
+ if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ if (alt_ratio < antcomb->ant_ratio)
+ return true;
+ }
+
+ return false;
+}
+
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
+{
+ struct ath_hw_antcomb_conf div_ant_conf;
+ struct ath_ant_comb *antcomb = &sc->ant_comb;
+ int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
+ int curr_main_set;
+ int main_rssi = rs->rs_rssi_ctl[0];
+ int alt_rssi = rs->rs_rssi_ctl[1];
+ int rx_ant_conf, main_ant_conf;
+ bool short_scan = false, ret;
+
+ rx_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_CURRENT_SHIFT) &
+ ATH_ANT_RX_MASK;
+ main_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_MAIN_SHIFT) &
+ ATH_ANT_RX_MASK;
+
+ if (alt_rssi >= antcomb->low_rssi_thresh) {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
+ } else {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
+ }
+
+ /* Record packet only when both main_rssi and alt_rssi is positive */
+ if (main_rssi > 0 && alt_rssi > 0) {
+ antcomb->total_pkt_count++;
+ antcomb->main_total_rssi += main_rssi;
+ antcomb->alt_total_rssi += alt_rssi;
+
+ if (main_ant_conf == rx_ant_conf)
+ antcomb->main_recv_cnt++;
+ else
+ antcomb->alt_recv_cnt++;
+ }
+
+ if (main_ant_conf == rx_ant_conf) {
+ ANT_STAT_INC(ANT_MAIN, recv_cnt);
+ ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+ } else {
+ ANT_STAT_INC(ANT_ALT, recv_cnt);
+ ANT_LNA_INC(ANT_ALT, rx_ant_conf);
+ }
+
+ /* Short scan check */
+ short_scan = ath_ant_short_scan_check(antcomb);
+
+ if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
+ rs->rs_moreaggr) && !short_scan)
+ return;
+
+ if (antcomb->total_pkt_count) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ main_rssi_avg = (antcomb->main_total_rssi /
+ antcomb->total_pkt_count);
+ alt_rssi_avg = (antcomb->alt_total_rssi /
+ antcomb->total_pkt_count);
+ }
+
+ ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
+ curr_alt_set = div_ant_conf.alt_lna_conf;
+ curr_main_set = div_ant_conf.main_lna_conf;
+ antcomb->count++;
+
+ if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
+ if (alt_ratio > antcomb->ant_ratio) {
+ ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
+ main_rssi_avg);
+ antcomb->alt_good = true;
+ } else {
+ antcomb->alt_good = false;
+ }
+
+ antcomb->count = 0;
+ antcomb->scan = true;
+ antcomb->scan_not_start = true;
+ }
+
+ if (!antcomb->scan) {
+ ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg,
+ curr_main_set, curr_alt_set);
+ if (ret)
+ goto div_comb_done;
+ }
+
+ if (!antcomb->scan &&
+ (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
+ goto div_comb_done;
+
+ if (!antcomb->scan_not_start) {
+ ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
+ alt_rssi_avg, main_rssi_avg);
+ } else {
+ if (!antcomb->alt_good) {
+ antcomb->scan_not_start = false;
+ /* Set alt to another LNA */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+ goto div_comb_done;
+ }
+ ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+ main_rssi_avg, alt_rssi_avg,
+ alt_ratio);
+ antcomb->quick_scan_cnt++;
+ }
+
+div_comb_done:
+ ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
+ ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+ ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
+
+ antcomb->scan_start_time = jiffies;
+ antcomb->total_pkt_count = 0;
+ antcomb->main_total_rssi = 0;
+ antcomb->alt_total_rssi = 0;
+ antcomb->main_recv_cnt = 0;
+ antcomb->alt_recv_cnt = 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 000000000..467ccfae2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+static const u32 ar5416Modes[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134},
+ {0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b},
+ {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
+ {0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80},
+ {0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120},
+ {0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar5416Common[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00007010, 0x00000000},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x000080c0, 0x2a82301a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88000010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x00008300, 0x00000000},
+ {0x00008304, 0x00000000},
+ {0x00008308, 0x00000000},
+ {0x0000830c, 0x00000000},
+ {0x00008310, 0x00000000},
+ {0x00008314, 0x00000000},
+ {0x00008318, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00070000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xad848e19},
+ {0x00009810, 0x7d14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x00009840, 0x206a002e},
+ {0x0000984c, 0x1284233c},
+ {0x00009854, 0x00000859},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x05100000},
+ {0x0000a920, 0x05100000},
+ {0x0000b920, 0x05100000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280b212},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5d50e188},
+ {0x00009958, 0x00081fff},
+ {0x0000c95c, 0x004b6a8e},
+ {0x0000c968, 0x000003ce},
+ {0x00009970, 0x190fb515},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x001fff00},
+ {0x000099ac, 0x00000000},
+ {0x000099b0, 0x03051000},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000200},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x000000aa},
+ {0x000099fc, 0x00001042},
+ {0x00009b00, 0x00000000},
+ {0x00009b04, 0x00000001},
+ {0x00009b08, 0x00000002},
+ {0x00009b0c, 0x00000003},
+ {0x00009b10, 0x00000004},
+ {0x00009b14, 0x00000005},
+ {0x00009b18, 0x00000008},
+ {0x00009b1c, 0x00000009},
+ {0x00009b20, 0x0000000a},
+ {0x00009b24, 0x0000000b},
+ {0x00009b28, 0x0000000c},
+ {0x00009b2c, 0x0000000d},
+ {0x00009b30, 0x00000010},
+ {0x00009b34, 0x00000011},
+ {0x00009b38, 0x00000012},
+ {0x00009b3c, 0x00000013},
+ {0x00009b40, 0x00000014},
+ {0x00009b44, 0x00000015},
+ {0x00009b48, 0x00000018},
+ {0x00009b4c, 0x00000019},
+ {0x00009b50, 0x0000001a},
+ {0x00009b54, 0x0000001b},
+ {0x00009b58, 0x0000001c},
+ {0x00009b5c, 0x0000001d},
+ {0x00009b60, 0x00000020},
+ {0x00009b64, 0x00000021},
+ {0x00009b68, 0x00000022},
+ {0x00009b6c, 0x00000023},
+ {0x00009b70, 0x00000024},
+ {0x00009b74, 0x00000025},
+ {0x00009b78, 0x00000028},
+ {0x00009b7c, 0x00000029},
+ {0x00009b80, 0x0000002a},
+ {0x00009b84, 0x0000002b},
+ {0x00009b88, 0x0000002c},
+ {0x00009b8c, 0x0000002d},
+ {0x00009b90, 0x00000030},
+ {0x00009b94, 0x00000031},
+ {0x00009b98, 0x00000032},
+ {0x00009b9c, 0x00000033},
+ {0x00009ba0, 0x00000034},
+ {0x00009ba4, 0x00000035},
+ {0x00009ba8, 0x00000035},
+ {0x00009bac, 0x00000035},
+ {0x00009bb0, 0x00000035},
+ {0x00009bb4, 0x00000035},
+ {0x00009bb8, 0x00000035},
+ {0x00009bbc, 0x00000035},
+ {0x00009bc0, 0x00000035},
+ {0x00009bc4, 0x00000035},
+ {0x00009bc8, 0x00000035},
+ {0x00009bcc, 0x00000035},
+ {0x00009bd0, 0x00000035},
+ {0x00009bd4, 0x00000035},
+ {0x00009bd8, 0x00000035},
+ {0x00009bdc, 0x00000035},
+ {0x00009be0, 0x00000035},
+ {0x00009be4, 0x00000035},
+ {0x00009be8, 0x00000035},
+ {0x00009bec, 0x00000035},
+ {0x00009bf0, 0x00000035},
+ {0x00009bf4, 0x00000035},
+ {0x00009bf8, 0x00000010},
+ {0x00009bfc, 0x0000001a},
+ {0x0000a210, 0x40806333},
+ {0x0000a214, 0x00106c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x018830c6},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x00000bb5},
+ {0x0000a22c, 0x00000011},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a23c, 0x13c889af},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00007bb6},
+ {0x0000a248, 0x0fff3ffc},
+ {0x0000a24c, 0x00000001},
+ {0x0000a250, 0x0000a000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cc75380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0e79e5c6},
+ {0x0000b26c, 0x0e79e5c6},
+ {0x0000c26c, 0x0e79e5c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000a27c, 0x051701ce},
+ {0x0000a338, 0x00000000},
+ {0x0000a33c, 0x00000000},
+ {0x0000a340, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a348, 0x3fffffff},
+ {0x0000a34c, 0x3fffffff},
+ {0x0000a350, 0x3fffffff},
+ {0x0000a354, 0x0003ffff},
+ {0x0000a358, 0x79a8aa1f},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x08000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+};
+
+static const u32 ar5416Bank0[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x1e5795e5},
+ {0x000098e0, 0x02008020},
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ /* Addr 5G 2G */
+ {0x00009a00, 0x00000000, 0x00000000},
+ {0x00009a04, 0x00000040, 0x00000040},
+ {0x00009a08, 0x00000080, 0x00000080},
+ {0x00009a0c, 0x000001a1, 0x00000141},
+ {0x00009a10, 0x000001e1, 0x00000181},
+ {0x00009a14, 0x00000021, 0x000001c1},
+ {0x00009a18, 0x00000061, 0x00000001},
+ {0x00009a1c, 0x00000168, 0x00000041},
+ {0x00009a20, 0x000001a8, 0x000001a8},
+ {0x00009a24, 0x000001e8, 0x000001e8},
+ {0x00009a28, 0x00000028, 0x00000028},
+ {0x00009a2c, 0x00000068, 0x00000068},
+ {0x00009a30, 0x00000189, 0x000000a8},
+ {0x00009a34, 0x000001c9, 0x00000169},
+ {0x00009a38, 0x00000009, 0x000001a9},
+ {0x00009a3c, 0x00000049, 0x000001e9},
+ {0x00009a40, 0x00000089, 0x00000029},
+ {0x00009a44, 0x00000170, 0x00000069},
+ {0x00009a48, 0x000001b0, 0x00000190},
+ {0x00009a4c, 0x000001f0, 0x000001d0},
+ {0x00009a50, 0x00000030, 0x00000010},
+ {0x00009a54, 0x00000070, 0x00000050},
+ {0x00009a58, 0x00000191, 0x00000090},
+ {0x00009a5c, 0x000001d1, 0x00000151},
+ {0x00009a60, 0x00000011, 0x00000191},
+ {0x00009a64, 0x00000051, 0x000001d1},
+ {0x00009a68, 0x00000091, 0x00000011},
+ {0x00009a6c, 0x000001b8, 0x00000051},
+ {0x00009a70, 0x000001f8, 0x00000198},
+ {0x00009a74, 0x00000038, 0x000001d8},
+ {0x00009a78, 0x00000078, 0x00000018},
+ {0x00009a7c, 0x00000199, 0x00000058},
+ {0x00009a80, 0x000001d9, 0x00000098},
+ {0x00009a84, 0x00000019, 0x00000159},
+ {0x00009a88, 0x00000059, 0x00000199},
+ {0x00009a8c, 0x00000099, 0x000001d9},
+ {0x00009a90, 0x000000d9, 0x00000019},
+ {0x00009a94, 0x000000f9, 0x00000059},
+ {0x00009a98, 0x000000f9, 0x00000099},
+ {0x00009a9c, 0x000000f9, 0x000000d9},
+ {0x00009aa0, 0x000000f9, 0x000000f9},
+ {0x00009aa4, 0x000000f9, 0x000000f9},
+ {0x00009aa8, 0x000000f9, 0x000000f9},
+ {0x00009aac, 0x000000f9, 0x000000f9},
+ {0x00009ab0, 0x000000f9, 0x000000f9},
+ {0x00009ab4, 0x000000f9, 0x000000f9},
+ {0x00009ab8, 0x000000f9, 0x000000f9},
+ {0x00009abc, 0x000000f9, 0x000000f9},
+ {0x00009ac0, 0x000000f9, 0x000000f9},
+ {0x00009ac4, 0x000000f9, 0x000000f9},
+ {0x00009ac8, 0x000000f9, 0x000000f9},
+ {0x00009acc, 0x000000f9, 0x000000f9},
+ {0x00009ad0, 0x000000f9, 0x000000f9},
+ {0x00009ad4, 0x000000f9, 0x000000f9},
+ {0x00009ad8, 0x000000f9, 0x000000f9},
+ {0x00009adc, 0x000000f9, 0x000000f9},
+ {0x00009ae0, 0x000000f9, 0x000000f9},
+ {0x00009ae4, 0x000000f9, 0x000000f9},
+ {0x00009ae8, 0x000000f9, 0x000000f9},
+ {0x00009aec, 0x000000f9, 0x000000f9},
+ {0x00009af0, 0x000000f9, 0x000000f9},
+ {0x00009af4, 0x000000f9, 0x000000f9},
+ {0x00009af8, 0x000000f9, 0x000000f9},
+ {0x00009afc, 0x000000f9, 0x000000f9},
+};
+
+static const u32 ar5416Bank1[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x02108421},
+ {0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x0e73ff17},
+ {0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3[][3] = {
+ /* Addr 5G 2G */
+ {0x000098f0, 0x01400018, 0x01c00018},
+};
+
+static const u32 ar5416Bank6[][3] = {
+ /* Addr 5G 2G */
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00e00000, 0x00e00000},
+ {0x0000989c, 0x005e0000, 0x005e0000},
+ {0x0000989c, 0x00120000, 0x00120000},
+ {0x0000989c, 0x00620000, 0x00620000},
+ {0x0000989c, 0x00020000, 0x00020000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x40ff0000, 0x40ff0000},
+ {0x0000989c, 0x005f0000, 0x005f0000},
+ {0x0000989c, 0x00870000, 0x00870000},
+ {0x0000989c, 0x00f90000, 0x00f90000},
+ {0x0000989c, 0x007b0000, 0x007b0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00f50000, 0x00f50000},
+ {0x0000989c, 0x00dc0000, 0x00dc0000},
+ {0x0000989c, 0x00110000, 0x00110000},
+ {0x0000989c, 0x006100a8, 0x006100a8},
+ {0x0000989c, 0x004210a2, 0x004210a2},
+ {0x0000989c, 0x0014008f, 0x0014008f},
+ {0x0000989c, 0x00c40003, 0x00c40003},
+ {0x0000989c, 0x003000f2, 0x003000f2},
+ {0x0000989c, 0x00440016, 0x00440016},
+ {0x0000989c, 0x00410040, 0x00410040},
+ {0x0000989c, 0x0001805e, 0x0001805e},
+ {0x0000989c, 0x0000c0ab, 0x0000c0ab},
+ {0x0000989c, 0x000000f1, 0x000000f1},
+ {0x0000989c, 0x00002081, 0x00002081},
+ {0x0000989c, 0x000000d4, 0x000000d4},
+ {0x000098d0, 0x0000000f, 0x0010000f},
+};
+
+static const u32 ar5416Bank6TPC[][3] = {
+ /* Addr 5G 2G */
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00e00000, 0x00e00000},
+ {0x0000989c, 0x005e0000, 0x005e0000},
+ {0x0000989c, 0x00120000, 0x00120000},
+ {0x0000989c, 0x00620000, 0x00620000},
+ {0x0000989c, 0x00020000, 0x00020000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x40ff0000, 0x40ff0000},
+ {0x0000989c, 0x005f0000, 0x005f0000},
+ {0x0000989c, 0x00870000, 0x00870000},
+ {0x0000989c, 0x00f90000, 0x00f90000},
+ {0x0000989c, 0x007b0000, 0x007b0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00f50000, 0x00f50000},
+ {0x0000989c, 0x00dc0000, 0x00dc0000},
+ {0x0000989c, 0x00110000, 0x00110000},
+ {0x0000989c, 0x006100a8, 0x006100a8},
+ {0x0000989c, 0x00423022, 0x00423022},
+ {0x0000989c, 0x201400df, 0x201400df},
+ {0x0000989c, 0x00c40002, 0x00c40002},
+ {0x0000989c, 0x003000f2, 0x003000f2},
+ {0x0000989c, 0x00440016, 0x00440016},
+ {0x0000989c, 0x00410040, 0x00410040},
+ {0x0000989c, 0x0001805e, 0x0001805e},
+ {0x0000989c, 0x0000c0ab, 0x0000c0ab},
+ {0x0000989c, 0x000000e1, 0x000000e1},
+ {0x0000989c, 0x00007081, 0x00007081},
+ {0x0000989c, 0x000000d4, 0x000000d4},
+ {0x000098d0, 0x0000000f, 0x0010000f},
+};
+
+static const u32 ar5416Bank7[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000500},
+ {0x0000989c, 0x00000800},
+ {0x000098cc, 0x0000000e},
+};
+
+static const u32 ar5416Addac[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000003},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x0000000c},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000030},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000060},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000058},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x000098c4, 0x00000000},
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 000000000..6c23d2795
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1364 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "../regd.h"
+#include "ar9002_phy.h"
+#include "ar5008_initvals.h"
+
+/* All code below is for AR5008, AR9001, AR9002 */
+
+#define AR5008_OFDM_RATES 8
+#define AR5008_HT_SS_RATES 8
+#define AR5008_HT_DS_RATES 8
+
+#define AR5008_HT20_SHIFT 16
+#define AR5008_HT40_SHIFT 24
+
+#define AR5008_11NA_OFDM_SHIFT 0
+#define AR5008_11NA_HT_SS_SHIFT 8
+#define AR5008_11NA_HT_DS_SHIFT 16
+
+#define AR5008_11NG_OFDM_SHIFT 4
+#define AR5008_11NG_HT_SS_SHIFT 12
+#define AR5008_11NG_HT_DS_SHIFT 20
+
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
+static const struct ar5416IniArray bank0 = STATIC_INI_ARRAY(ar5416Bank0);
+static const struct ar5416IniArray bank1 = STATIC_INI_ARRAY(ar5416Bank1);
+static const struct ar5416IniArray bank2 = STATIC_INI_ARRAY(ar5416Bank2);
+static const struct ar5416IniArray bank3 = STATIC_INI_ARRAY(ar5416Bank3);
+static const struct ar5416IniArray bank7 = STATIC_INI_ARRAY(ar5416Bank7);
+
+static void ar5008_write_bank6(struct ath_hw *ah, unsigned int *writecnt)
+{
+ struct ar5416IniArray *array = &ah->iniBank6;
+ u32 *data = ah->analogBank6Data;
+ int r;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (r = 0; r < array->ia_rows; r++) {
+ REG_WRITE(ah, INI_RA(array, r, 0), data[r]);
+ DO_DELAY(*writecnt);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+/**
+ * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ * if 2 GHz band
+ * if forceBiasAuto
+ * if synth_freq < 2412
+ * bias = 0
+ * else if 2412 <= synth_freq <= 2422
+ * bias = 1
+ * else // synth_freq > 2422
+ * bias = 2
+ * else if forceBias > 0
+ * bias = forceBias & 7
+ * else
+ * no change, use value from ini file
+ * else
+ * no change, invalid band
+ *
+ * 1st Mod:
+ * 2422 also uses value of 2
+ * <approved>
+ *
+ * 2nd Mod:
+ * Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 tmp_reg;
+ int reg_writes = 0;
+ u32 new_bias = 0;
+
+ if (!AR_SREV_5416(ah) || synth_freq >= 3000)
+ return;
+
+ BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
+
+ if (synth_freq < 2412)
+ new_bias = 0;
+ else if (synth_freq < 2422)
+ new_bias = 1;
+ else
+ new_bias = 2;
+
+ /* pre-reverse this field */
+ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+ ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
+
+ /* swizzle rf_pwd_icsyndiv */
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+ /* write Bank 6 with new params */
+ ar5008_write_bank6(ah, &reg_writes);
+}
+
+/**
+ * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ ath_err(common, "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ ath_err(common, "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ ar5008_hw_force_bias(ah, freq);
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->curchan = chan;
+
+ return 0;
+}
+
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ static int pilot_mask_reg[4] = {
+ AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ static int chan_mask_reg[4] = {
+ AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ static int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+ int size = ah->iniBank6.ia_rows * sizeof(u32);
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ return 0;
+
+ ah->analogBank6Data = devm_kzalloc(ah->dev, size, GFP_KERNEL);
+ if (!ah->analogBank6Data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+/* *
+ * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+ int i;
+
+ /*
+ * Software does not need to program bank data
+ * for single chip devices, that is AR9280 or anything
+ * after that.
+ */
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ return true;
+
+ /* Setup rf parameters */
+ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+
+ for (i = 0; i < ah->iniBank6.ia_rows; i++)
+ ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex);
+
+ /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
+ db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
+ db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ /* Write Analog registers */
+ REG_WRITE_ARRAY(&bank0, 1, regWrites);
+ REG_WRITE_ARRAY(&bank1, 1, regWrites);
+ REG_WRITE_ARRAY(&bank2, 1, regWrites);
+ REG_WRITE_ARRAY(&bank3, modesIndex, regWrites);
+ ar5008_write_bank6(ah, &regWrites);
+ REG_WRITE_ARRAY(&bank7, 1, regWrites);
+
+ return true;
+}
+
+static void ar5008_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ ath9k_hw_synth_delay(ah, chan, synthDelay);
+}
+
+static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+{
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ah->rxchainmask;
+ tx_chainmask = ah->txchainmask;
+
+
+ switch (rx_chainmask) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ ENABLE_REGWRITE_BUFFER(ah);
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ ENABLE_REGWRITE_BUFFER(ah);
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ar5008_hw_override_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear if off only after
+ * RXE is set for MAC. This prevents frames with corrupted
+ * descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID.
+ * By default, this feature is enabled. But since the driver
+ * is not using this feature, we switch it off; otherwise
+ * multicast search based on MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_ADHOC_MCAST_KEYID_ENABLE);
+
+ if (!AR_SREV_9271(ah))
+ val &= ~AR_PCU_MISC_MODE2_HWWAR1;
+
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ return;
+ /*
+ * Disable BB clock gating
+ * Necessary to avoid issues on AR5416 2.0
+ */
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
+}
+
+static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
+ AR_PHY_FC_ENABLE_DAC_FIFO);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if (IS_CHAN_HT40PLUS(chan))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ }
+ ENABLE_REGWRITE_BUFFER(ah);
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ /* This function do only REG_WRITE, so
+ * we can include it to REGWRITE_BUFFER. */
+ ath9k_hw_set11nmac2040(ah, chan);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+
+static int ar5008_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i, regWrites = 0;
+ u32 modesIndex, freqIndex;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ freqIndex = 1;
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ } else {
+ freqIndex = 2;
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
+ }
+
+ /*
+ * Set correct baseband to analog shift setting to
+ * access analog chips.
+ */
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Write ADDAC shifts */
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+ if (ah->eep_ops->set_addac)
+ ah->eep_ops->set_addac(ah, chan);
+
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes, i, 0);
+ u32 val = INI_RA(&ah->iniModes, i, modesIndex);
+
+ if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
+ val &= ~AR_AN_TOP2_PWDCLKIND;
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg
+ && (common->bus_ops->ath_bus_type != ATH_USB)) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9287_11_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
+ AR_SREV_9287_11_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9271_10(ah)) {
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENA);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_ADC_ON, 0xa);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write common array parameters */
+ for (i = 0; i < ah->iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniCommon, i, 0);
+ u32 val = INI_RA(&ah->iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg
+ && (common->bus_ops->ath_bus_type != ATH_USB)) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex,
+ regWrites);
+
+ ar5008_hw_override_ini(ah, chan);
+ ar5008_hw_set_channel_regs(ah, chan);
+ ar5008_hw_init_chain_masks(ah);
+ ath9k_olc_init(ah);
+ ath9k_hw_apply_txpower(ah, chan, false);
+
+ /* Write analog registers */
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ ath_err(ath9k_hw_common(ah), "ar5416SetRfRegs failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_20_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ?
+ AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+static void ar5008_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+
+ ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static void ar5008_restore_chainmask(struct ath_hw *ah)
+{
+ int rx_chainmask = ah->rxchainmask;
+
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+}
+
+static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+
+ return pll;
+}
+
+static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+
+ return pll;
+}
+
+static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd,
+ int param)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ar5416AniState *aniState = &ah->ani;
+ s32 value;
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
+ u32 on = param ? 1 : 0;
+ /*
+ * make register setting for default
+ * (weak sig detect ON) come from INI file
+ */
+ int m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ int m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ int m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ int m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ int m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ int m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ int m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ int m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ int m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ int m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (on != aniState->ofdmWeakSigDetect) {
+ ath_dbg(common, ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ aniState->ofdmWeakSigDetect ?
+ "on" : "off",
+ on ? "on" : "off");
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetect = on;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ u32 level = param;
+
+ value = level * 2;
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP, value);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
+
+ if (level != aniState->firstepLevel) {
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL,
+ value,
+ aniState->iniDef.firstep);
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL,
+ value,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ value = (level + 1) * 2;
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1, value);
+
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1, value - 1);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
+ value,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_MRC_CCK:
+ /*
+ * You should not see this as AR5008, AR9001, AR9002
+ * does not have hardware support for MRC CCK.
+ */
+ WARN_ON(1);
+ break;
+ default:
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_dbg(common, ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ aniState->ofdmWeakSigDetect ? "on" : "off",
+ aniState->firstepLevel,
+ aniState->mrcCCK ? "on" : "off",
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+ return true;
+}
+
+static void ar5008_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+ nfarray[0] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
+ nfarray[1] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
+ nfarray[2] = sign_extend32(nf, 8);
+
+ if (!IS_CHAN_HT40(ah->curchan))
+ return;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ nfarray[3] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
+ nfarray[4] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
+ nfarray[5] = sign_extend32(nf, 8);
+}
+
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ar5416AniState *aniState = &ah->ani;
+ struct ath9k_ani_default *iniDef;
+ u32 val;
+
+ iniDef = &aniState->iniDef;
+
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = true;
+ aniState->mrcCCK = false; /* not available on pre AR9003 */
+}
+
+static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_5416_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ;
+ ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ;
+ ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
+}
+
+static void ar5008_hw_set_radar_params(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf)
+{
+ u32 radar_0 = 0, radar_1;
+
+ if (!conf) {
+ REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
+ return;
+ }
+
+ radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
+ radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
+ radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
+ radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
+ radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
+ radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
+ radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
+ radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
+ radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
+ radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
+
+ REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
+ REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
+ if (conf->ext_channel)
+ REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+ else
+ REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+}
+
+static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
+{
+ struct ath_hw_radar_conf *conf = &ah->radar_conf;
+
+ conf->fir_power = -33;
+ conf->radar_rssi = 20;
+ conf->pulse_height = 10;
+ conf->pulse_rssi = 15;
+ conf->pulse_inband = 15;
+ conf->pulse_maxlen = 255;
+ conf->pulse_inband_step = 12;
+ conf->radar_inband = 8;
+}
+
+static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array)
+{
+#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x))
+ ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]);
+ ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l],
+ rate_array[rate2s]));
+ ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l],
+ rate_array[rate5_5s]));
+ ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l],
+ rate_array[rate11s]));
+#undef CCK_DELTA
+}
+
+static void ar5008_hw_init_txpower_ofdm(struct ath_hw *ah, int16_t *rate_array,
+ int offset)
+{
+ int i, idx = 0;
+
+ for (i = offset; i < offset + AR5008_OFDM_RATES; i++) {
+ ah->tx_power[i] = rate_array[idx];
+ idx++;
+ }
+}
+
+static void ar5008_hw_init_txpower_ht(struct ath_hw *ah, int16_t *rate_array,
+ int ss_offset, int ds_offset,
+ bool is_40, int ht40_delta)
+{
+ int i, mcs_idx = (is_40) ? AR5008_HT40_SHIFT : AR5008_HT20_SHIFT;
+
+ for (i = ss_offset; i < ss_offset + AR5008_HT_SS_RATES; i++) {
+ ah->tx_power[i] = rate_array[mcs_idx] + ht40_delta;
+ mcs_idx++;
+ }
+ memcpy(&ah->tx_power[ds_offset], &ah->tx_power[ss_offset],
+ AR5008_HT_SS_RATES);
+}
+
+void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
+ struct ath9k_channel *chan, int ht40_delta)
+{
+ if (IS_CHAN_5GHZ(chan)) {
+ ar5008_hw_init_txpower_ofdm(ah, rate_array,
+ AR5008_11NA_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar5008_hw_init_txpower_ht(ah, rate_array,
+ AR5008_11NA_HT_SS_SHIFT,
+ AR5008_11NA_HT_DS_SHIFT,
+ IS_CHAN_HT40(chan),
+ ht40_delta);
+ }
+ } else {
+ ar5008_hw_init_txpower_cck(ah, rate_array);
+ ar5008_hw_init_txpower_ofdm(ah, rate_array,
+ AR5008_11NG_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar5008_hw_init_txpower_ht(ah, rate_array,
+ AR5008_11NG_HT_SS_SHIFT,
+ AR5008_11NG_HT_DS_SHIFT,
+ IS_CHAN_HT40(chan),
+ ht40_delta);
+ }
+ }
+}
+
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ static const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ int ret;
+
+ ret = ar5008_hw_rf_alloc_ext_banks(ah);
+ if (ret)
+ return ret;
+
+ priv_ops->rf_set_freq = ar5008_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
+
+ priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
+ priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
+ priv_ops->init_bb = ar5008_hw_init_bb;
+ priv_ops->process_ini = ar5008_hw_process_ini;
+ priv_ops->set_rfmode = ar5008_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar5008_hw_rfbus_req;
+ priv_ops->rfbus_done = ar5008_hw_rfbus_done;
+ priv_ops->restore_chainmask = ar5008_restore_chainmask;
+ priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->set_radar_params = ar5008_hw_set_radar_params;
+
+ priv_ops->ani_control = ar5008_hw_ani_control_new;
+ priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
+
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
+ else
+ priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
+
+ ar5008_hw_set_nf_limits(ah);
+ ar5008_hw_set_radar_conf(ah);
+ memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 000000000..59524e1d4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1089 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+static const u32 ar5416Modes_9100[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009940, 0x00750604, 0x00754604, 0xfff81204, 0xfff81204},
+ {0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020},
+ {0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e},
+ {0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff},
+ {0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0},
+ {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
+ {0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar5416Common_9100[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00020010, 0x00000003},
+ {0x00020038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00004000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x000080c0, 0x2a82301a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008120, 0x08f04800},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0x00000000},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c4, 0x00000000},
+ {0x000081d0, 0x00003210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x00008300, 0x00000000},
+ {0x00008304, 0x00000000},
+ {0x00008308, 0x00000000},
+ {0x0000830c, 0x00000000},
+ {0x00008310, 0x00000000},
+ {0x00008314, 0x00000000},
+ {0x00008318, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00000000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xad848e19},
+ {0x00009810, 0x7d14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x00009840, 0x206a01ae},
+ {0x0000984c, 0x1284233c},
+ {0x00009854, 0x00000859},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x05100000},
+ {0x0000a920, 0x05100000},
+ {0x0000b920, 0x05100000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280b212},
+ {0x0000994c, 0x00020028},
+ {0x0000c95c, 0x004b6a8e},
+ {0x0000c968, 0x000003ce},
+ {0x00009970, 0x190fb515},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x006f0000},
+ {0x000099b0, 0x03051000},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000200},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099fc, 0x00001042},
+ {0x00009b00, 0x00000000},
+ {0x00009b04, 0x00000001},
+ {0x00009b08, 0x00000002},
+ {0x00009b0c, 0x00000003},
+ {0x00009b10, 0x00000004},
+ {0x00009b14, 0x00000005},
+ {0x00009b18, 0x00000008},
+ {0x00009b1c, 0x00000009},
+ {0x00009b20, 0x0000000a},
+ {0x00009b24, 0x0000000b},
+ {0x00009b28, 0x0000000c},
+ {0x00009b2c, 0x0000000d},
+ {0x00009b30, 0x00000010},
+ {0x00009b34, 0x00000011},
+ {0x00009b38, 0x00000012},
+ {0x00009b3c, 0x00000013},
+ {0x00009b40, 0x00000014},
+ {0x00009b44, 0x00000015},
+ {0x00009b48, 0x00000018},
+ {0x00009b4c, 0x00000019},
+ {0x00009b50, 0x0000001a},
+ {0x00009b54, 0x0000001b},
+ {0x00009b58, 0x0000001c},
+ {0x00009b5c, 0x0000001d},
+ {0x00009b60, 0x00000020},
+ {0x00009b64, 0x00000021},
+ {0x00009b68, 0x00000022},
+ {0x00009b6c, 0x00000023},
+ {0x00009b70, 0x00000024},
+ {0x00009b74, 0x00000025},
+ {0x00009b78, 0x00000028},
+ {0x00009b7c, 0x00000029},
+ {0x00009b80, 0x0000002a},
+ {0x00009b84, 0x0000002b},
+ {0x00009b88, 0x0000002c},
+ {0x00009b8c, 0x0000002d},
+ {0x00009b90, 0x00000030},
+ {0x00009b94, 0x00000031},
+ {0x00009b98, 0x00000032},
+ {0x00009b9c, 0x00000033},
+ {0x00009ba0, 0x00000034},
+ {0x00009ba4, 0x00000035},
+ {0x00009ba8, 0x00000035},
+ {0x00009bac, 0x00000035},
+ {0x00009bb0, 0x00000035},
+ {0x00009bb4, 0x00000035},
+ {0x00009bb8, 0x00000035},
+ {0x00009bbc, 0x00000035},
+ {0x00009bc0, 0x00000035},
+ {0x00009bc4, 0x00000035},
+ {0x00009bc8, 0x00000035},
+ {0x00009bcc, 0x00000035},
+ {0x00009bd0, 0x00000035},
+ {0x00009bd4, 0x00000035},
+ {0x00009bd8, 0x00000035},
+ {0x00009bdc, 0x00000035},
+ {0x00009be0, 0x00000035},
+ {0x00009be4, 0x00000035},
+ {0x00009be8, 0x00000035},
+ {0x00009bec, 0x00000035},
+ {0x00009bf0, 0x00000035},
+ {0x00009bf4, 0x00000035},
+ {0x00009bf8, 0x00000010},
+ {0x00009bfc, 0x0000001a},
+ {0x0000a210, 0x40806333},
+ {0x0000a214, 0x00106c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x018830c6},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x001a0bb5},
+ {0x0000a22c, 0x00000000},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a23c, 0x13c889af},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00007bb6},
+ {0x0000a248, 0x0fff3ffc},
+ {0x0000a24c, 0x00000001},
+ {0x0000a250, 0x0000e000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cc75380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a268, 0x00000001},
+ {0x0000a26c, 0x0ebae9c6},
+ {0x0000b26c, 0x0ebae9c6},
+ {0x0000c26c, 0x0ebae9c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000a27c, 0x050701ce},
+ {0x0000a338, 0x00000000},
+ {0x0000a33c, 0x00000000},
+ {0x0000a340, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a348, 0x3fffffff},
+ {0x0000a34c, 0x3fffffff},
+ {0x0000a350, 0x3fffffff},
+ {0x0000a354, 0x0003ffff},
+ {0x0000a358, 0x79a8aa33},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+ /* Addr 5G 2G */
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00e00000, 0x00e00000},
+ {0x0000989c, 0x005e0000, 0x005e0000},
+ {0x0000989c, 0x00120000, 0x00120000},
+ {0x0000989c, 0x00620000, 0x00620000},
+ {0x0000989c, 0x00020000, 0x00020000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x005f0000, 0x005f0000},
+ {0x0000989c, 0x00870000, 0x00870000},
+ {0x0000989c, 0x00f90000, 0x00f90000},
+ {0x0000989c, 0x007b0000, 0x007b0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00f50000, 0x00f50000},
+ {0x0000989c, 0x00dc0000, 0x00dc0000},
+ {0x0000989c, 0x00110000, 0x00110000},
+ {0x0000989c, 0x006100a8, 0x006100a8},
+ {0x0000989c, 0x004210a2, 0x004210a2},
+ {0x0000989c, 0x0014000f, 0x0014000f},
+ {0x0000989c, 0x00c40002, 0x00c40002},
+ {0x0000989c, 0x003000f2, 0x003000f2},
+ {0x0000989c, 0x00440016, 0x00440016},
+ {0x0000989c, 0x00410040, 0x00410040},
+ {0x0000989c, 0x000180d6, 0x000180d6},
+ {0x0000989c, 0x0000c0aa, 0x0000c0aa},
+ {0x0000989c, 0x000000b1, 0x000000b1},
+ {0x0000989c, 0x00002000, 0x00002000},
+ {0x0000989c, 0x000000d4, 0x000000d4},
+ {0x000098d0, 0x0000000f, 0x0010000f},
+};
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+ /* Addr 5G 2G */
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00000000, 0x00000000},
+ {0x0000989c, 0x00e00000, 0x00e00000},
+ {0x0000989c, 0x005e0000, 0x005e0000},
+ {0x0000989c, 0x00120000, 0x00120000},
+ {0x0000989c, 0x00620000, 0x00620000},
+ {0x0000989c, 0x00020000, 0x00020000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x40ff0000, 0x40ff0000},
+ {0x0000989c, 0x005f0000, 0x005f0000},
+ {0x0000989c, 0x00870000, 0x00870000},
+ {0x0000989c, 0x00f90000, 0x00f90000},
+ {0x0000989c, 0x007b0000, 0x007b0000},
+ {0x0000989c, 0x00ff0000, 0x00ff0000},
+ {0x0000989c, 0x00f50000, 0x00f50000},
+ {0x0000989c, 0x00dc0000, 0x00dc0000},
+ {0x0000989c, 0x00110000, 0x00110000},
+ {0x0000989c, 0x006100a8, 0x006100a8},
+ {0x0000989c, 0x00423022, 0x00423022},
+ {0x0000989c, 0x2014008f, 0x2014008f},
+ {0x0000989c, 0x00c40002, 0x00c40002},
+ {0x0000989c, 0x003000f2, 0x003000f2},
+ {0x0000989c, 0x00440016, 0x00440016},
+ {0x0000989c, 0x00410040, 0x00410040},
+ {0x0000989c, 0x0001805e, 0x0001805e},
+ {0x0000989c, 0x0000c0ab, 0x0000c0ab},
+ {0x0000989c, 0x000000e1, 0x000000e1},
+ {0x0000989c, 0x00007080, 0x00007080},
+ {0x0000989c, 0x000000d4, 0x000000d4},
+ {0x000098d0, 0x0000000f, 0x0010000f},
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000010},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x000000c0},
+ {0x0000989c, 0x00000015},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x000098cc, 0x00000000},
+};
+
+static const u32 ar5416Modes_9160[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68},
+ {0x00009850, 0x6c48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6c48b0e2},
+ {0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0},
+ {0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020},
+ {0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40},
+ {0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120},
+ {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x000099bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00},
+ {0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880},
+ {0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788},
+ {0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa},
+ {0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000},
+ {0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402},
+ {0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06},
+ {0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b},
+ {0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b},
+ {0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a},
+ {0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf},
+ {0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f},
+ {0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f},
+ {0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f},
+ {0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00007010, 0x00000020},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x000080c0, 0x2a82301a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x00008300, 0x00000000},
+ {0x00008304, 0x00000000},
+ {0x00008308, 0x00000000},
+ {0x0000830c, 0x00000000},
+ {0x00008310, 0x00000000},
+ {0x00008314, 0x00000000},
+ {0x00008318, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xad848e19},
+ {0x00009810, 0x7d14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x00009840, 0x206a01ae},
+ {0x0000984c, 0x1284233c},
+ {0x00009854, 0x00000859},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x05100000},
+ {0x0000a920, 0x05100000},
+ {0x0000b920, 0x05100000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280b212},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x2108ecff},
+ {0x00009940, 0x00750604},
+ {0x0000c95c, 0x004b6a8e},
+ {0x00009970, 0x190fb515},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x006f0000},
+ {0x000099b0, 0x03051000},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000200},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099fc, 0x00001042},
+ {0x00009b00, 0x00000000},
+ {0x00009b04, 0x00000001},
+ {0x00009b08, 0x00000002},
+ {0x00009b0c, 0x00000003},
+ {0x00009b10, 0x00000004},
+ {0x00009b14, 0x00000005},
+ {0x00009b18, 0x00000008},
+ {0x00009b1c, 0x00000009},
+ {0x00009b20, 0x0000000a},
+ {0x00009b24, 0x0000000b},
+ {0x00009b28, 0x0000000c},
+ {0x00009b2c, 0x0000000d},
+ {0x00009b30, 0x00000010},
+ {0x00009b34, 0x00000011},
+ {0x00009b38, 0x00000012},
+ {0x00009b3c, 0x00000013},
+ {0x00009b40, 0x00000014},
+ {0x00009b44, 0x00000015},
+ {0x00009b48, 0x00000018},
+ {0x00009b4c, 0x00000019},
+ {0x00009b50, 0x0000001a},
+ {0x00009b54, 0x0000001b},
+ {0x00009b58, 0x0000001c},
+ {0x00009b5c, 0x0000001d},
+ {0x00009b60, 0x00000020},
+ {0x00009b64, 0x00000021},
+ {0x00009b68, 0x00000022},
+ {0x00009b6c, 0x00000023},
+ {0x00009b70, 0x00000024},
+ {0x00009b74, 0x00000025},
+ {0x00009b78, 0x00000028},
+ {0x00009b7c, 0x00000029},
+ {0x00009b80, 0x0000002a},
+ {0x00009b84, 0x0000002b},
+ {0x00009b88, 0x0000002c},
+ {0x00009b8c, 0x0000002d},
+ {0x00009b90, 0x00000030},
+ {0x00009b94, 0x00000031},
+ {0x00009b98, 0x00000032},
+ {0x00009b9c, 0x00000033},
+ {0x00009ba0, 0x00000034},
+ {0x00009ba4, 0x00000035},
+ {0x00009ba8, 0x00000035},
+ {0x00009bac, 0x00000035},
+ {0x00009bb0, 0x00000035},
+ {0x00009bb4, 0x00000035},
+ {0x00009bb8, 0x00000035},
+ {0x00009bbc, 0x00000035},
+ {0x00009bc0, 0x00000035},
+ {0x00009bc4, 0x00000035},
+ {0x00009bc8, 0x00000035},
+ {0x00009bcc, 0x00000035},
+ {0x00009bd0, 0x00000035},
+ {0x00009bd4, 0x00000035},
+ {0x00009bd8, 0x00000035},
+ {0x00009bdc, 0x00000035},
+ {0x00009be0, 0x00000035},
+ {0x00009be4, 0x00000035},
+ {0x00009be8, 0x00000035},
+ {0x00009bec, 0x00000035},
+ {0x00009bf0, 0x00000035},
+ {0x00009bf4, 0x00000035},
+ {0x00009bf8, 0x00000010},
+ {0x00009bfc, 0x0000001a},
+ {0x0000a210, 0x40806333},
+ {0x0000a214, 0x00106c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x018830c6},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x001a0bb5},
+ {0x0000a22c, 0x00000000},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a23c, 0x13c889af},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00007bb6},
+ {0x0000a248, 0x0fff3ffc},
+ {0x0000a24c, 0x00000001},
+ {0x0000a250, 0x0000e000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cc75380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a268, 0x00000001},
+ {0x0000a26c, 0x0e79e5c6},
+ {0x0000b26c, 0x0e79e5c6},
+ {0x0000c26c, 0x0e79e5c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000a27c, 0x050701ce},
+ {0x0000a338, 0x00000000},
+ {0x0000a33c, 0x00000000},
+ {0x0000a340, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a348, 0x3fffffff},
+ {0x0000a34c, 0x3fffffff},
+ {0x0000a350, 0x3fffffff},
+ {0x0000a354, 0x0003ffff},
+ {0x0000a358, 0x79bfaa03},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+};
+
+static const u32 ar5416Addac_9160[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x000000c0},
+ {0x0000989c, 0x00000018},
+ {0x0000989c, 0x00000004},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x000000c0},
+ {0x0000989c, 0x00000019},
+ {0x0000989c, 0x00000004},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000004},
+ {0x0000989c, 0x00000003},
+ {0x0000989c, 0x00000008},
+ {0x0000989c, 0x00000000},
+ {0x000098cc, 0x00000000},
+};
+
+static const u32 ar5416Addac_9160_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x000000c0},
+ {0x0000989c, 0x00000018},
+ {0x0000989c, 0x00000004},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x000000c0},
+ {0x0000989c, 0x00000019},
+ {0x0000989c, 0x00000004},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x0000989c, 0x00000000},
+ {0x000098cc, 0x00000000},
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 000000000..50fcd343c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9002_phy.h"
+
+#define AR9285_CLCAL_REDO_THRESH 1
+
+enum ar9002_cal_types {
+ ADC_GAIN_CAL = BIT(0),
+ ADC_DC_CAL = BIT(1),
+ IQ_MISMATCH_CAL = BIT(2),
+};
+
+static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ enum ar9002_cal_types cal_type)
+{
+ bool supported = false;
+ switch (ah->supp_cals & cal_type) {
+ case IQ_MISMATCH_CAL:
+ supported = true;
+ break;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
+ if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ IS_CHAN_HT20(chan)))
+ supported = true;
+ break;
+ }
+ return supported;
+}
+
+static void ar9002_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ ath_dbg(common, CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n");
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n");
+ break;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static bool ar9002_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool iscaldone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah, numChains);
+ caldata->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ ar9002_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(caldata->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_dbg(common, CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+ (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
+
+ iCoff = iCoff & 0x3f;
+ ath_dbg(common, CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = -16;
+
+ ath_dbg(common, CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_dbg(common, CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
+
+ ath_dbg(common, CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n",
+ i, qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n",
+ i, iGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n",
+ i, qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_dbg(common, CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct ath9k_percal_data *calData =
+ ah->cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
+
+ ath_dbg(common, CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n",
+ i, qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n",
+ i, iDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n",
+ i, qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_dbg(common, CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - ah->initPDADC)*4) / slope;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ }
+}
+
+static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata, i;
+ int delta, currPDADC, regval;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0)
+ return;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+ delta = (currPDADC - ah->initPDADC + 4) / 8;
+ else
+ delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+ if (delta != ah->PDADCdelta) {
+ ah->PDADCdelta = delta;
+ for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+ regval = ah->originalGain[i] - delta;
+ if (regval < 0)
+ regval = 0;
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_TX_GAIN_TBL1 + i * 4,
+ AR_PHY_TX_GAIN, regval);
+ }
+ }
+}
+
+static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ u32 regVal;
+ unsigned int i;
+ u32 regList[][2] = {
+ { AR9285_AN_TOP3, 0 },
+ { AR9285_AN_RXTXBB1, 0 },
+ { AR9285_AN_RF2G1, 0 },
+ { AR9285_AN_RF2G2, 0 },
+ { AR9285_AN_TOP2, 0 },
+ { AR9285_AN_RF2G8, 0 },
+ { AR9285_AN_RF2G7, 0 },
+ { AR9285_AN_RF2G3, 0 },
+ };
+
+ REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ /* 7834, b1=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+ /* 9808, b27=1 */
+ REG_SET_BIT(ah, 0x9808, 1 << 27);
+ /* 786c,b23,1, pwddac=1 */
+ REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
+ /* 7854, b5,1, pdrxtxbb=1 */
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
+ /* 7854, b7,1, pdv2i=1 */
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
+ /* 7854, b8,1, pddacinterface=1 */
+ REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
+ /* 7824,b12,0, offcal=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
+ /* 7838, b1,0, pwddb=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
+ /* 7820,b11,0, enpacal=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
+ /* 7820,b25,1, pdpadrv1=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
+ /* 7820,b24,0, pdpadrv2=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
+ /* 7820,b23,0, pdpaout=0 */
+ REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
+ /* 783c,b14-16,7, padrvgn2tab_0=7 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ /*
+ * 7838,b29-31,0, padrvgn1tab_0=0
+ * does not matter since we turn it off
+ */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ /* 7828, b0-11, ccom=fff */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ /* Set:
+ * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
+ * txon=1,paon=1,oscon=1,synthon_force=1
+ */
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
+
+ /* find off_6_1; */
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, AR9285_AN_RF2G6);
+ regVal |= (1 << (20 + i));
+ REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
+ udelay(1);
+ /* regVal = REG_READ(ah, 0x7834); */
+ regVal &= (~(0x1 << (20 + i)));
+ regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
+ AR9285_AN_RXTXBB1_SPARE9)
+ << (20 + i));
+ REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
+ }
+
+ regVal = (regVal >> 20) & 0x7f;
+
+ /* Update PA cal info */
+ if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = regVal;
+ }
+
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ /* 7834, b1=1 */
+ REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
+ /* 9808, b27=0 */
+ REG_CLR_BIT(ah, 0x9808, 1 << 27);
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regVal;
+ int i, offset, offs_6_1, offs_0;
+ u32 ccomp_org, reg_field;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 },
+ };
+
+ ath_dbg(common, CALIBRATE, "Running PA Calibration\n");
+
+ /* PA CAL is not needed for high power solution */
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
+ AR5416_EEP_TXGAIN_HIGH_POWER)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
+
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
+
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1 << (19 + i)));
+ reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
+ regVal |= (reg_field << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
+ udelay(1);
+ reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
+ offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
+ offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
+
+ offset = (offs_6_1<<1) | offs_0;
+ offset = offset - 0;
+ offs_6_1 = offset>>1;
+ offs_0 = offset & 1;
+
+ if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = offset;
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
+}
+
+static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ if (AR_SREV_9271(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9271_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9285_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ }
+}
+
+static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ if (OLC_FOR_AR9287_10_LATER)
+ ar9287_hw_olc_temp_compensation(ah);
+ else if (OLC_FOR_AR9280_20_LATER)
+ ar9280_hw_olc_temp_compensation(ah);
+}
+
+static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
+{
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+ bool nfcal, nfcal_pending = false, percal_pending;
+ int ret;
+
+ nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
+ if (ah->caldata)
+ nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
+
+ percal_pending = (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING));
+
+ if (percal_pending && !nfcal) {
+ if (!ar9002_hw_per_calibration(ah, chan, rxchainmask, currCal))
+ return 0;
+
+ ah->cal_list_curr = currCal = currCal->calNext;
+ if (currCal->calState == CAL_WAITING) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ return 0;
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal || nfcal_pending) {
+ /*
+ * Get the value from the previous NF cal and update
+ * history buffer.
+ */
+ if (ath9k_hw_getnf(ah, chan)) {
+ /*
+ * Load the NF from history buffer of the current
+ * channel.
+ * NF is slow time-variant, so it is OK to use a
+ * historical value.
+ */
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (longcal) {
+ ath9k_hw_start_nfcal(ah, false);
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+ }
+ }
+
+ return !percal_pending;
+}
+
+/* Carrier leakage Calibration fix */
+static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ if (IS_CHAN_HT20(chan)) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+ REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ }
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return true;
+}
+
+static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u_int32_t txgain_max;
+ u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
+ u_int32_t reg_clc_I0, reg_clc_Q0;
+ u_int32_t i0_num = 0;
+ u_int32_t q0_num = 0;
+ u_int32_t total_num = 0;
+ u_int32_t reg_rf2g5_org;
+ bool retv = true;
+
+ if (!(ar9285_hw_cl_cal(ah, chan)))
+ return false;
+
+ txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
+ AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
+
+ for (i = 0; i < (txgain_max+1); i++) {
+ clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
+ AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
+ if (!(gain_mask & (1 << clc_gain))) {
+ gain_mask |= (1 << clc_gain);
+ clc_num++;
+ }
+ }
+
+ for (i = 0; i < clc_num; i++) {
+ reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
+ reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
+ if (reg_clc_I0 == 0)
+ i0_num++;
+
+ if (reg_clc_Q0 == 0)
+ q0_num++;
+ }
+ total_num = i0_num + q0_num;
+ if (total_num > AR9285_CLCAL_REDO_THRESH) {
+ reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
+ if (AR_SREV_9285E_20(ah)) {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_XE_SET);
+ } else {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_SET);
+ }
+ retv = ar9285_hw_cl_cal(ah, chan);
+ REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
+ }
+ return retv;
+}
+
+static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9271(ah)) {
+ if (!ar9285_hw_cl_cal(ah, chan))
+ return false;
+ } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
+ if (!ar9285_hw_clc(ah, chan))
+ return false;
+ } else {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (!AR_SREV_9287_11_OR_LATER(ah))
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (!AR_SREV_9287_11_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+ }
+
+ /* Do PA Calibration */
+ ar9002_hw_pa_cal(ah, true);
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+
+ if (ah->caldata)
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
+
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ ah->supp_cals = IQ_MISMATCH_CAL;
+
+ if (AR_SREV_9160_10_OR_LATER(ah))
+ ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL;
+
+ if (AR_SREV_9287(ah))
+ ah->supp_cals &= ~ADC_GAIN_CAL;
+
+ if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
+ INIT_CAL(&ah->adcgain_caldata);
+ INSERT_CAL(ah, &ah->adcgain_caldata);
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC Gain Calibration\n");
+ }
+
+ if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
+ INIT_CAL(&ah->adcdc_caldata);
+ INSERT_CAL(ah, &ah->adcdc_caldata);
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC DC Calibration\n");
+ }
+
+ if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+ }
+
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+ }
+
+ if (ah->caldata)
+ ah->caldata->CalValid = 0;
+
+ return true;
+}
+
+static const struct ath9k_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+
+static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah)) {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+ return;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_single_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_single_sample;
+ } else {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_multi_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_multi_sample;
+ }
+ ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+ if (AR_SREV_9287(ah))
+ ah->supp_cals &= ~ADC_GAIN_CAL;
+ }
+}
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
+ priv_ops->init_cal = ar9002_hw_init_cal;
+ priv_ops->setup_calibration = ar9002_hw_setup_calibration;
+
+ ops->calibrate = ar9002_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 000000000..d480d2f3e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/moduleparam.h>
+#include "hw.h"
+#include "ar5008_initvals.h"
+#include "ar9001_initvals.h"
+#include "ar9002_initvals.h"
+#include "ar9002_phy.h"
+
+/* General hardware code for the A5008/AR9001/AR9002 hadware families */
+
+static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9271(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg);
+ return 0;
+ }
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2);
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9280Modes_fast_clock_9280_2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniAddac,
+ ar5416Addac_9160_1_1);
+ } else {
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac);
+ }
+
+ if (!AR_SREV_9280_20_OR_LATER(ah)) {
+ /* Common for AR5416, AR913x, AR9160 */
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain);
+
+ /* Common for AR913x, AR9160 */
+ if (!AR_SREV_5416(ah))
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6TPC_9100);
+ else
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6TPC);
+ }
+
+ /* iniAddac needs to be modified for these chips */
+ if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
+ struct ar5416IniArray *addac = &ah->iniAddac;
+ u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
+ u32 *data;
+
+ data = devm_kzalloc(ah->dev, size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data, addac->ia_array, size);
+ addac->ia_array = data;
+
+ if (!AR_SREV_5416_22_OR_LATER(ah)) {
+ /* override CLKDRV value */
+ INI_RA(addac, 31,1) = 0;
+ }
+ }
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniCckfirNormal,
+ ar9287Common_normal_cck_fir_coeff_9287_1_1);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_9287_1_1);
+ }
+ return 0;
+}
+
+static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
+{
+ u32 rxgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_17) {
+ rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
+
+ if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_13db_rxgain_9280_2);
+ else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_23db_rxgain_9280_2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2);
+ }
+}
+
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
+{
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_19) {
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_high_power_tx_gain_9280_2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2);
+ }
+}
+
+static void ar9271_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
+{
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9271Modes_high_power_tx_gain_9271);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9271Modes_normal_power_tx_gain_9271);
+}
+
+static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_1);
+ else if (AR_SREV_9280_20(ah))
+ ar9280_20_hw_init_rxgain_ini(ah);
+
+ if (AR_SREV_9271(ah)) {
+ ar9271_hw_init_txgain_ini(ah, txgain_type);
+ } else if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_1);
+ } else if (AR_SREV_9280_20(ah)) {
+ ar9280_20_hw_init_txgain_ini(ah, txgain_type);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ /* txgain table */
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_high_power);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_high_power_tx_gain_9285_1_2);
+ }
+ } else {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_normal_power);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_original_tx_gain_9285_1_2);
+ }
+ }
+ }
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
+ bool power_off)
+{
+ u8 i;
+ u32 val;
+
+ /* Nothing to do on restore for 11N */
+ if (!power_off /* !restore */) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * __ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ }
+
+ udelay(1000);
+ }
+
+ if (power_off) {
+ /* clear bit 19 to disable L1 */
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ val = REG_READ(ah, AR_WA);
+
+ /*
+ * Set PCIe workaround bits
+ * In AR9280 and AR9285, bit 14 in WA register (disable L1)
+ * should only be set when device enters D3 and be
+ * cleared when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
+ if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ } else if (AR_SREV_9280(ah)) {
+ if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ }
+ }
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+ /*
+ * Disable bit 6 and 7 before entering D3 to
+ * prevent system hang.
+ */
+ val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
+ }
+
+ if (AR_SREV_9280(ah))
+ val |= AR_WA_BIT22;
+
+ if (AR_SREV_9285E_20(ah))
+ val |= AR_WA_BIT23;
+
+ REG_WRITE(ah, AR_WA, val);
+ } else {
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * For AR9280 chips, bit 22 of 0x4004
+ * needs to be set.
+ */
+ val = AR9280_WA_DEFAULT;
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ val = AR_WA_DEFAULT;
+ }
+ }
+
+ /* WAR for ASPM system hang */
+ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
+ val |= (AR_WA_BIT6 | AR_WA_BIT7);
+
+ if (AR_SREV_9285E_20(ah))
+ val |= AR_WA_BIT23;
+
+ REG_WRITE(ah, AR_WA, val);
+
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ }
+}
+
+static int ar9002_hw_get_radiorev(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+int ar9002_hw_rf_claim(struct ath_hw *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ar9002_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ ath_err(ath9k_hw_common(ah),
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ ah->hw_version.analog5GhzRev = val;
+
+ return 0;
+}
+
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_13_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+}
+
+static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ ah->config.hw_hang_checks |= HW_BB_RIFS_HANG;
+ ah->config.hw_hang_checks |= HW_BB_DFS_HANG;
+ }
+
+ if (AR_SREV_9280(ah))
+ ah->config.hw_hang_checks |= HW_BB_RX_CLEAR_STUCK_HANG;
+
+ if (AR_SREV_5416(ah) || AR_SREV_9100(ah) || AR_SREV_9160(ah))
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+}
+
+/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+int ar9002_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ int ret;
+
+ ret = ar9002_hw_init_mode_regs(ah);
+ if (ret)
+ return ret;
+
+ priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9002_hw_init_hang_checks;
+
+ ops->config_pci_powersave = ar9002_hw_configpcipowersave;
+
+ ret = ar5008_hw_attach_phy_ops(ah);
+ if (ret)
+ return ret;
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ ar9002_hw_attach_phy_ops(ah);
+
+ ar9002_hw_attach_calib_ops(ah);
+ ar9002_hw_attach_mac_ops(ah);
+ return 0;
+}
+
+void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 modesIndex;
+ int i;
+
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes_9271_ANI_reg.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes_9271_ANI_reg, i, 0);
+ u32 val = INI_RA(&ah->iniModes_9271_ANI_reg, i, modesIndex);
+ u32 val_orig;
+
+ if (reg == AR_PHY_CCK_DETECT) {
+ val_orig = REG_READ(ah, reg);
+ val &= AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
+ val_orig &= ~AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
+
+ REG_WRITE(ah, reg, val|val_orig);
+ } else
+ REG_WRITE(ah, reg, val);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
new file mode 100644
index 000000000..4d18c66a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -0,0 +1,3180 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+static const u32 ar9280Modes_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20},
+ {0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b},
+ {0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010},
+ {0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
+ {0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010},
+ {0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210},
+ {0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c},
+ {0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000},
+ {0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+ {0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000},
+ {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000},
+};
+
+static const u32 ar9280Common_9280_2[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00004024, 0x0000001f},
+ {0x00004060, 0x00000000},
+ {0x00004064, 0x00000000},
+ {0x00007010, 0x00000033},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000000},
+ {0x000080c0, 0x2a80001a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000040},
+ {0x00008314, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0x00481043},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xafa68e30},
+ {0x00009810, 0xfd14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x0000984c, 0x0040233c},
+ {0x0000a84c, 0x0040233c},
+ {0x00009854, 0x00000044},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x00009910, 0x01002310},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x04900000},
+ {0x0000a920, 0x04900000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280c00a},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x2108ecff},
+ {0x00009940, 0x14750604},
+ {0x0000c95c, 0x004b6a8e},
+ {0x00009970, 0x190fb514},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x006f0000},
+ {0x000099b0, 0x03051000},
+ {0x000099b4, 0x00000820},
+ {0x000099c4, 0x06336f77},
+ {0x000099c8, 0x6af6532f},
+ {0x000099cc, 0x08f186c8},
+ {0x000099d0, 0x00046384},
+ {0x000099d4, 0x00000000},
+ {0x000099d8, 0x00000000},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000000},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099f0, 0x00000000},
+ {0x000099fc, 0x00001042},
+ {0x0000a208, 0x803e4788},
+ {0x0000a210, 0x4080a333},
+ {0x0000a214, 0x40206c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x01834061},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x000003b5},
+ {0x0000a22c, 0x233f7180},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00007bb6},
+ {0x0000a248, 0x0fff3ffc},
+ {0x0000a24c, 0x00000000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cdbd380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0e79e5c6},
+ {0x0000b26c, 0x0e79e5c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+ {0x0000a3e4, 0x00000000},
+ {0x0000a3e8, 0x18c43433},
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007818, 0x07e41000},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x0000783c, 0x07e40000},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x9259269a},
+ {0x00007860, 0x52802000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007898, 0x2a850160},
+};
+
+static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009820, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000f0f, 0x01000f0f},
+ {0x00009828, 0x0b020001, 0x0b020001},
+ {0x00009834, 0x00000f0f, 0x00000f0f},
+ {0x00009844, 0x03721821, 0x03721821},
+ {0x00009914, 0x00000898, 0x00001130},
+ {0x00009918, 0x0000000b, 0x00000016},
+};
+
+static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308},
+ {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b80, 0x00008b80},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b84, 0x00008b84},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b88, 0x00008b88},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b8c, 0x00008b8c},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b90, 0x00008b90},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b94, 0x00008b94},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008b98, 0x00008b98},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008ba4, 0x00008ba4},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008ba8, 0x00008ba8},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x00008bac, 0x00008bac},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00008bb0, 0x00008bb0},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00008bb4, 0x00008bb4},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008ba1, 0x00008ba1},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00008ba5, 0x00008ba5},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x00008ba9, 0x00008ba9},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x00008bad, 0x00008bad},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x00008bb1, 0x00008bb1},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00008bb5, 0x00008bb5},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008ba2, 0x00008ba2},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00008ba6, 0x00008ba6},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x00008baa, 0x00008baa},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00008bae, 0x00008bae},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x00008bb2, 0x00008bb2},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008bb6, 0x00008bb6},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x00008ba3, 0x00008ba3},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x00008ba7, 0x00008ba7},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008bab, 0x00008bab},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008baf, 0x00008baf},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008bb3, 0x00008bb3},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008bb7, 0x00008bb7},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008bc3, 0x00008bc3},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008bc7, 0x00008bc7},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008bcb, 0x00008bcb},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008bcf, 0x00008bcf},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008bd3, 0x00008bd3},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008bd7, 0x00008bd7},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008bdb, 0x00008bdb},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008bdb, 0x00008bdb},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008bdb, 0x00008bdb},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008bdb, 0x00008bdb},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008bdb, 0x00008bdb},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008bdb, 0x00008bdb},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008bdb, 0x00008bdb},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008bdb, 0x00008bdb},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008bdb, 0x00008bdb},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008bdb, 0x00008bdb},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008bdb, 0x00008bdb},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008bdb, 0x00008bdb},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008bdb, 0x00008bdb},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008bdb, 0x00008bdb},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008bdb, 0x00008bdb},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008bdb, 0x00008bdb},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008bdb, 0x00008bdb},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008bdb, 0x00008bdb},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001055, 0x00001055},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001055, 0x00001055},
+};
+
+static const u32 ar9280Modes_original_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00008000, 0x00008000},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00008000, 0x00008000},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00008000, 0x00008000},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00008000, 0x00008000},
+ {0x00009a10, 0x00008194, 0x00008194, 0x00008000, 0x00008000},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063},
+};
+
+static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290},
+ {0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300},
+ {0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304},
+ {0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308},
+ {0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c},
+ {0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000},
+ {0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004},
+ {0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008},
+ {0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c},
+ {0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080},
+ {0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084},
+ {0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088},
+ {0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c},
+ {0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100},
+ {0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104},
+ {0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108},
+ {0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c},
+ {0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110},
+ {0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114},
+ {0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180},
+ {0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184},
+ {0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188},
+ {0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c},
+ {0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190},
+ {0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194},
+ {0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0},
+ {0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c},
+ {0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8},
+ {0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284},
+ {0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288},
+ {0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224},
+ {0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290},
+ {0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300},
+ {0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304},
+ {0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308},
+ {0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c},
+ {0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380},
+ {0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384},
+ {0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700},
+ {0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704},
+ {0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708},
+ {0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c},
+ {0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780},
+ {0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784},
+ {0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00},
+ {0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04},
+ {0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08},
+ {0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c},
+ {0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80},
+ {0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84},
+ {0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88},
+ {0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c},
+ {0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90},
+ {0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80},
+ {0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84},
+ {0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88},
+ {0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c},
+ {0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90},
+ {0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310},
+ {0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314},
+ {0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320},
+ {0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324},
+ {0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328},
+ {0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c},
+ {0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330},
+ {0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334},
+ {0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321},
+ {0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325},
+ {0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329},
+ {0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d},
+ {0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331},
+ {0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335},
+ {0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322},
+ {0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326},
+ {0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a},
+ {0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e},
+ {0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332},
+ {0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336},
+ {0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323},
+ {0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327},
+ {0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b},
+ {0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f},
+ {0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333},
+ {0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337},
+ {0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343},
+ {0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347},
+ {0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b},
+ {0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f},
+ {0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353},
+ {0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357},
+ {0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b},
+ {0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b},
+ {0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b},
+ {0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b},
+ {0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b},
+ {0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b},
+ {0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b},
+ {0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b},
+ {0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b},
+ {0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b},
+ {0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b},
+ {0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b},
+ {0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b},
+ {0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a},
+ {0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a},
+};
+
+static const u32 ar9280Modes_high_power_tx_gain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652},
+ {0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce},
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002},
+ {0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008},
+ {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010},
+ {0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012},
+ {0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014},
+ {0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a},
+ {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211},
+ {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213},
+ {0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411},
+ {0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413},
+ {0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811},
+ {0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813},
+ {0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14},
+ {0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50},
+ {0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c},
+ {0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a},
+ {0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92},
+ {0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2},
+ {0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5},
+ {0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54},
+ {0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5},
+ {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
+ {0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
+ {0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff},
+ {0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
+ {0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000},
+ {0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
+ {0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480},
+};
+
+static const u32 ar9280Modes_original_tx_gain_9280_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652},
+ {0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce},
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002},
+ {0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009},
+ {0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b},
+ {0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012},
+ {0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048},
+ {0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a},
+ {0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211},
+ {0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213},
+ {0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b},
+ {0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412},
+ {0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414},
+ {0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a},
+ {0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649},
+ {0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b},
+ {0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49},
+ {0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48},
+ {0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a},
+ {0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88},
+ {0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a},
+ {0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9},
+ {0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42},
+ {0x0000a3ec, 0x00f70081, 0x00f70081, 0x00f70081, 0x00f70081},
+ {0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
+ {0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff},
+ {0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
+ {0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000},
+ {0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
+ {0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
+};
+
+static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffc},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffd},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9285Modes_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20},
+ {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
+ {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+};
+
+static const u32 ar9285Common_9285_1_2[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020045},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00004024, 0x0000001f},
+ {0x00004060, 0x00000000},
+ {0x00004064, 0x00000000},
+ {0x00007010, 0x00000031},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000000},
+ {0x000080c0, 0x2a80001a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008120, 0x08f04810},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081d0, 0x0000320a},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000040},
+ {0x00008314, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000001},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x00010380},
+ {0x00008344, 0x00481043},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xafe68e30},
+ {0x00009810, 0xfd14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x0000984c, 0x0040233c},
+ {0x00009854, 0x00000044},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x00009910, 0x01002310},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x04900000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009940, 0x14750604},
+ {0x00009948, 0x9280c00a},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x2108ecff},
+ {0x00009968, 0x000003ce},
+ {0x00009970, 0x192bb514},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x2def0400},
+ {0x000099b0, 0x03051000},
+ {0x000099b4, 0x00000820},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000000},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099f0, 0x00000000},
+ {0x0000a208, 0x803e68c8},
+ {0x0000a210, 0x4080a333},
+ {0x0000a214, 0x00206c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x01834061},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x000003b5},
+ {0x0000a22c, 0x00000000},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a244, 0x00000000},
+ {0x0000a248, 0xfffffffc},
+ {0x0000a24c, 0x00000000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0ccb5380},
+ {0x0000a25c, 0x15151501},
+ {0x0000a260, 0xdfa90f01},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0ebae9e6},
+ {0x0000d270, 0x0d820820},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3e4, 0x00000000},
+ {0x0000a3e8, 0x18c43433},
+ {0x0000a3ec, 0x00f70081},
+ {0x00007800, 0x00140000},
+ {0x00007804, 0x0e4548d8},
+ {0x00007808, 0x54214514},
+ {0x0000780c, 0x02025830},
+ {0x00007810, 0x71c0d388},
+ {0x0000781c, 0x00000000},
+ {0x00007824, 0x00d86fff},
+ {0x0000782c, 0x6e36d97b},
+ {0x00007834, 0x71400087},
+ {0x00007844, 0x000c0db6},
+ {0x00007848, 0x6db6246f},
+ {0x0000784c, 0x6d9b66db},
+ {0x00007850, 0x6d8c6dba},
+ {0x00007854, 0x00040000},
+ {0x00007858, 0xdb003012},
+ {0x0000785c, 0x04924914},
+ {0x00007860, 0x21084210},
+ {0x00007864, 0xf7d7ffde},
+ {0x00007868, 0xc2034080},
+ {0x00007870, 0x10142c00},
+};
+
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
+ {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
+ {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
+ {0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803},
+ {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
+ {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
+ {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
+ {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+ {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+};
+
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f},
+ {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8},
+ {0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b},
+ {0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e},
+ {0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801},
+ {0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe},
+ {0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652},
+ {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
+ {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+ {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+};
+
+static const u32 ar9285Modes_XE2_0_normal_power[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f},
+ {0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
+ {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b},
+ {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
+ {0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441},
+ {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
+ {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652},
+ {0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c},
+ {0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+ {0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c},
+ {0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c},
+};
+
+static const u32 ar9285Modes_XE2_0_high_power[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8},
+ {0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b},
+ {0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e},
+ {0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443},
+ {0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe},
+ {0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c},
+ {0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7},
+ {0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+ {0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7},
+};
+
+static const u32 ar9287Modes_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300},
+ {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001},
+ {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0},
+ {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2},
+ {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e},
+ {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20},
+ {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881},
+ {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898},
+ {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210},
+ {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce},
+ {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c},
+ {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4},
+ {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444},
+ {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+ {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9287Common_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00004024, 0x0000001f},
+ {0x00004060, 0x00000000},
+ {0x00004064, 0x00000000},
+ {0x00007010, 0x00000033},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000000},
+ {0x000080c0, 0x2a80001a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18487320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000040},
+ {0x00008314, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0x01c81043},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0x0fffffff},
+ {0x00008394, 0x0fffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xafe68e30},
+ {0x00009810, 0xfd14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x0000984c, 0x0040233c},
+ {0x0000a84c, 0x0040233c},
+ {0x00009854, 0x00000044},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x00009910, 0x10002310},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x04900000},
+ {0x0000a920, 0x04900000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009930, 0x00000000},
+ {0x0000a930, 0x00000000},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280c00a},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x0108ecff},
+ {0x00009940, 0x14750604},
+ {0x0000c95c, 0x004b6a8e},
+ {0x00009970, 0x990bb514},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x0c6f0000},
+ {0x000099b0, 0x03051000},
+ {0x000099b4, 0x00000820},
+ {0x000099c4, 0x06336f77},
+ {0x000099c8, 0x6af6532f},
+ {0x000099cc, 0x08f186c8},
+ {0x000099d0, 0x00046384},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000000},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099f0, 0x00000000},
+ {0x000099fc, 0x00001042},
+ {0x0000a208, 0x803e4788},
+ {0x0000a210, 0x4080a333},
+ {0x0000a214, 0x40206c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x01834061},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x000003b5},
+ {0x0000a22c, 0x233f7180},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a23c, 0x13c889af},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00000000},
+ {0x0000a248, 0xfffffffc},
+ {0x0000a24c, 0x00000000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cdbd380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a264, 0x00418a11},
+ {0x0000b264, 0x00418a11},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0e79e5c6},
+ {0x0000b26c, 0x0e79e5c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000a27c, 0x050701ce},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000b398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+ {0x0000a3e4, 0x00000000},
+ {0x0000a3e8, 0x18c43433},
+ {0x0000a3ec, 0x00f70081},
+ {0x0000a3f0, 0x01036a1e},
+ {0x0000a3f4, 0x00000000},
+ {0x0000b3f4, 0x00000000},
+ {0x0000a7d8, 0x000003f1},
+ {0x00007800, 0x00000800},
+ {0x00007804, 0x6c35ffd2},
+ {0x00007808, 0x6db6c000},
+ {0x0000780c, 0x6db6cb30},
+ {0x00007810, 0x6db6cb6c},
+ {0x00007814, 0x0501e200},
+ {0x00007818, 0x0094128d},
+ {0x0000781c, 0x976ee392},
+ {0x00007820, 0xf75ff6fc},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb003012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x00140000},
+ {0x00007838, 0x0e4548d8},
+ {0x0000783c, 0x54214514},
+ {0x00007840, 0x02025830},
+ {0x00007844, 0x71c0d388},
+ {0x00007848, 0x934934a8},
+ {0x00007850, 0x00000000},
+ {0x00007854, 0x00000800},
+ {0x00007858, 0x6c35ffd2},
+ {0x0000785c, 0x6db6c000},
+ {0x00007860, 0x6db6cb30},
+ {0x00007864, 0x6db6cb6c},
+ {0x00007868, 0x0501e200},
+ {0x0000786c, 0x0094128d},
+ {0x00007870, 0x976ee392},
+ {0x00007874, 0xf75ff6fc},
+ {0x00007878, 0x00040000},
+ {0x0000787c, 0xdb003012},
+ {0x00007880, 0x04924914},
+ {0x00007884, 0x21084210},
+ {0x00007888, 0x001b6db0},
+ {0x0000788c, 0x00376b63},
+ {0x00007890, 0x06db6db6},
+ {0x00007894, 0x006d8000},
+ {0x00007898, 0x48100000},
+ {0x0000789c, 0x00000000},
+ {0x000078a0, 0x08000000},
+ {0x000078a4, 0x0007ffd8},
+ {0x000078a8, 0x0007ffd8},
+ {0x000078ac, 0x001c0020},
+ {0x000078b0, 0x00060aeb},
+ {0x000078b4, 0x40008080},
+ {0x000078b8, 0x2a850160},
+};
+
+static const u32 ar9287Common_normal_cck_fir_coeff_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a1f4, 0x00fffeff},
+ {0x0000a1f8, 0x00f5f9ff},
+ {0x0000a1fc, 0xb79f6427},
+};
+
+static const u32 ar9287Common_japan_2484_cck_fir_coeff_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a1f4, 0x00000000},
+ {0x0000a1f8, 0xefff0301},
+ {0x0000a1fc, 0xca9228ee},
+};
+
+static const u32 ar9287Modes_tx_gain_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b},
+ {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a},
+ {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c},
+ {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc},
+ {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4},
+ {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc},
+ {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede},
+ {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e},
+ {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e},
+ {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e},
+ {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062},
+ {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064},
+ {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4},
+ {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa},
+ {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac},
+ {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4},
+ {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4},
+ {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134},
+ {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174},
+ {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c},
+ {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e},
+ {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be},
+ {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe},
+ {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000},
+};
+
+static const u32 ar9287Modes_rx_gain_9287_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120},
+ {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c},
+ {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130},
+ {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c},
+ {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210},
+ {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284},
+ {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290},
+ {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294},
+ {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8},
+ {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac},
+ {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4},
+ {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8},
+ {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4},
+ {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c},
+ {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04},
+ {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c},
+ {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10},
+ {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14},
+ {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90},
+ {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94},
+ {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4},
+ {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8},
+ {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04},
+ {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18},
+ {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24},
+ {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c},
+ {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380},
+ {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384},
+ {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388},
+ {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710},
+ {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714},
+ {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718},
+ {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10},
+ {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14},
+ {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c},
+ {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90},
+ {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94},
+ {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90},
+ {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94},
+ {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0},
+ {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8},
+ {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac},
+ {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0},
+ {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1},
+ {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5},
+ {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9},
+ {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1},
+ {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5},
+ {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9},
+ {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9},
+ {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd},
+ {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1},
+ {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2},
+ {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6},
+ {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca},
+ {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2},
+ {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6},
+ {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda},
+ {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb},
+ {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf},
+ {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3},
+ {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb},
+ {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067},
+ {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067},
+};
+
+static const u32 ar9271Modes_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+ {0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300},
+ {0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001},
+ {0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620},
+ {0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053},
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310},
+ {0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c},
+ {0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77},
+ {0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f},
+ {0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8},
+ {0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384},
+ {0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db},
+ {0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004},
+ {0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000},
+ {0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e},
+};
+
+static const u32 ar9271Common_9271[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020045},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00004024, 0x0000001f},
+ {0x00004060, 0x00000000},
+ {0x00004064, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000000},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a80001a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008120, 0x08f04810},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x32143320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081d0, 0x0000320a},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x00000000},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000040},
+ {0x00008314, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000001},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x00010380},
+ {0x00008344, 0x00581043},
+ {0x00007010, 0x00000030},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007800, 0x00140000},
+ {0x00007804, 0x0e4548d8},
+ {0x00007808, 0x54214514},
+ {0x0000780c, 0x02025820},
+ {0x00007810, 0x71c0d388},
+ {0x00007814, 0x924934a8},
+ {0x0000781c, 0x00000000},
+ {0x00007828, 0x66964300},
+ {0x0000782c, 0x8db6d961},
+ {0x00007830, 0x8db6d96c},
+ {0x00007834, 0x6140008b},
+ {0x0000783c, 0x72ee0a72},
+ {0x00007840, 0xbbfffffc},
+ {0x00007844, 0x000c0db6},
+ {0x00007848, 0x6db6246f},
+ {0x0000784c, 0x6d9b66db},
+ {0x00007850, 0x6d8c6dba},
+ {0x00007854, 0x00040000},
+ {0x00007858, 0xdb003012},
+ {0x0000785c, 0x04924914},
+ {0x00007860, 0x21084210},
+ {0x00007864, 0xf7d7ffde},
+ {0x00007868, 0xc2034080},
+ {0x00007870, 0x10142c00},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xafe68e30},
+ {0x00009810, 0xfd14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x0000984c, 0x0040233c},
+ {0x00009854, 0x00000044},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x04900000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009940, 0x14750604},
+ {0x00009948, 0x9280c00a},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x0108ecff},
+ {0x00009968, 0x000003ce},
+ {0x00009970, 0x192bb514},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x00009980, 0x00000000},
+ {0x00009984, 0x00000000},
+ {0x00009988, 0x00000000},
+ {0x0000998c, 0x00000000},
+ {0x00009990, 0x00000000},
+ {0x00009994, 0x00000000},
+ {0x00009998, 0x00000000},
+ {0x0000999c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x2def0400},
+ {0x000099b0, 0x03051000},
+ {0x000099b4, 0x00000820},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000000},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099f0, 0x00000000},
+ {0x0000a208, 0x803e68c8},
+ {0x0000a210, 0x4080a333},
+ {0x0000a214, 0x00206c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x01834061},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x000003b5},
+ {0x0000a22c, 0x00000000},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a244, 0x00000000},
+ {0x0000a248, 0xfffffffc},
+ {0x0000a24c, 0x00000000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0ccb5380},
+ {0x0000a25c, 0x15151501},
+ {0x0000a260, 0xdfa90f01},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0ebae9e6},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3a0, 0x00000000},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0x00000000},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x00000000},
+ {0x0000a3b4, 0x00000000},
+ {0x0000a3b8, 0x00000000},
+ {0x0000a3bc, 0x00000000},
+ {0x0000a3c0, 0x00000000},
+ {0x0000a3c4, 0x00000000},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3e4, 0x00000000},
+ {0x0000a3e8, 0x18c43433},
+ {0x0000a3ec, 0x00f70081},
+ {0x0000a3f0, 0x01036a2f},
+ {0x0000a3f4, 0x00000000},
+ {0x0000d270, 0x0d820820},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+};
+
+static const u32 ar9271Modes_9271_ANI_reg[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2},
+ {0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e},
+ {0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881},
+ {0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8},
+ {0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d},
+ {0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+};
+
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610},
+ {0x0000a314, 0x00000000, 0x00000000, 0x00024650, 0x00024650},
+ {0x0000a318, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x000316d2, 0x000316d2},
+ {0x0000a320, 0x00000000, 0x00000000, 0x00039758, 0x00039758},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759},
+ {0x0000a328, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e},
+ {0x0000a334, 0x000368de, 0x000368de, 0x0004979f, 0x0004979f},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0004d7df, 0x0004d7df},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029},
+ {0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff},
+ {0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4},
+ {0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04},
+ {0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21c652, 0x0a21c652},
+ {0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd},
+ {0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
+ {0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd},
+ {0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd},
+};
+
+static const u32 ar9271Modes_high_power_tx_gain_9271[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600},
+ {0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40},
+ {0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80},
+ {0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de},
+ {0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e},
+ {0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e},
+ {0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df},
+ {0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b},
+ {0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff},
+ {0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba},
+ {0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652},
+ {0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7},
+ {0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063},
+ {0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
+ {0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
+ {0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63},
+ {0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063},
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 000000000..f816909d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include <linux/export.h>
+
+#define AR_BufLen 0x00000fff
+
+static void ar9002_hw_rx_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ ((struct ath_desc*) ds)->ds_link = ds_link;
+}
+
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM |
+ AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ } else {
+ s0_s = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0_s);
+ s1_s = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+ isr &= ~(AR_ISR_TXOK |
+ AR_ISR_TXDESC |
+ AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ ath_dbg(common, INTERRUPT,
+ "receive FIFO overrun interrupt\n");
+ }
+
+ *masked |= mask2;
+ }
+
+ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
+ u32 s5_s;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ } else {
+ s5_s = REG_READ(ah, AR_ISR_S5);
+ }
+
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+ !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ *masked |= ATH9K_INT_TIM_TIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5_s);
+ isr &= ~AR_ISR_GENTMR;
+ }
+ }
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+ REG_READ(ah, AR_ISR);
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ if (sync_cause) {
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_dbg(common, ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_dbg(common, ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ ath_dbg(common, INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ ath_dbg(common, INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+
+ return true;
+}
+
+static void
+ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ u32 ctl1, ctl6;
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+
+ ACCESS_ONCE(ads->ds_link) = i->link;
+ ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+
+ ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+ ctl6 = SM(i->keytype, AR_EncrType);
+
+ if (AR_SREV_9285(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+
+ if ((i->is_first || i->is_last) &&
+ i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+ ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+ ACCESS_ONCE(ads->ds_ctl2) = 0;
+ ACCESS_ONCE(ads->ds_ctl3) = 0;
+ }
+
+ if (!i->is_first) {
+ ACCESS_ONCE(ads->ds_ctl0) = 0;
+ ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+ ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+ return;
+ }
+
+ ctl1 |= (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0)
+ | SM(i->type, AR_FrameType)
+ | (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ switch (i->aggr) {
+ case AGGR_BUF_FIRST:
+ ctl6 |= SM(i->aggr_len, AR_AggrLen);
+ /* fall through */
+ case AGGR_BUF_MIDDLE:
+ ctl1 |= AR_IsAggr | AR_MoreAggr;
+ ctl6 |= SM(i->ndelim, AR_PadDelim);
+ break;
+ case AGGR_BUF_LAST:
+ ctl1 |= AR_IsAggr;
+ break;
+ case AGGR_BUF_NONE:
+ break;
+ }
+
+ ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower[0], AR_XmitPower0)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+ ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+ ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+
+ if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+ return;
+
+ ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+ ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+ ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
+
+ ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
+}
+
+static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ u32 status;
+
+ status = ACCESS_ONCE(ads->ds_txstatus9);
+ if ((status & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ts->ts_tstamp = ads->AR_SendTimestamp;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (status & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ ts->tid = MS(status, AR_TxTid);
+ ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+ ts->ts_seqnum = MS(status, AR_SeqNum);
+
+ status = ACCESS_ONCE(ads->ds_txstatus0);
+ ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+ if (status & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ status = ACCESS_ONCE(ads->ds_txstatus1);
+ if (status & AR_FrmXmitOK)
+ ts->ts_status |= ATH9K_TX_ACKED;
+ else {
+ if (status & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (status & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (status & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ }
+ if (status & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+ if (status & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (status & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (status & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+ ts->ts_longretry = MS(status, AR_DataFailCnt);
+ ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
+
+ status = ACCESS_ONCE(ads->ds_txstatus5);
+ ts->ts_rssi = MS(status, AR_TxRSSICombined);
+ ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
+
+ ts->evm0 = ads->AR_TxEVM0;
+ ts->evm1 = ads->AR_TxEVM1;
+ ts->evm2 = ads->AR_TxEVM2;
+
+ return 0;
+}
+
+static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ switch (index) {
+ case 0:
+ return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0);
+ case 1:
+ return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1);
+ case 2:
+ return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2);
+ case 3:
+ return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3);
+ default:
+ return -1;
+ }
+}
+
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ memset(&ads->u.rx, 0, sizeof(ads->u.rx));
+}
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ops->rx_enable = ar9002_hw_rx_enable;
+ ops->set_desc_link = ar9002_hw_set_desc_link;
+ ops->get_isr = ar9002_hw_get_isr;
+ ops->set_txdesc = ar9002_set_txdesc;
+ ops->proc_txdesc = ar9002_hw_proc_txdesc;
+ ops->get_duration = ar9002_hw_get_duration;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 000000000..fc08162b5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
+
+#include "hw.h"
+#include "ar9002_phy.h"
+
+/**
+ * ar9002_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ u32 txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = CHANSEL_2G(freq);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+ 1, regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+ 1, regWrites);
+ }
+ } else {
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
+ case 0:
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+ aModeRefSel = 0;
+ else if ((freq % 20) == 0)
+ aModeRefSel = 3;
+ else if ((freq % 10) == 0)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
+ case 1:
+ default:
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels
+ * which are 5MHz spaced.
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = CHANSEL_5G(freq);
+
+ /* RefDivA setting */
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA,
+ AR_AN_SYNTH9_REFDIVA_S, refDivA);
+
+ }
+
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->curchan = chan;
+
+ return 0;
+}
+
+/**
+ * ar9002_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ static const int pilot_mask_reg[4] = {
+ AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ static const int chan_mask_reg[4] = {
+ AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ static const int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+static void ar9002_olc_init(struct ath_hw *ah)
+{
+ u32 i;
+
+ if (!OLC_FOR_AR9280_20_LATER)
+ return;
+
+ if (OLC_FOR_AR9287_10_LATER) {
+ REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_S,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ udelay(100);
+ } else {
+ for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
+ ah->originalGain[i] =
+ MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
+ AR_PHY_TX_GAIN);
+ ah->PDADCdelta = 0;
+ }
+}
+
+static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int ref_div = 5;
+ int pll_div = 0x2c;
+ u32 pll;
+
+ if (chan && IS_CHAN_5GHZ(chan) && !IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ if (AR_SREV_9280_20(ah)) {
+ ref_div = 10;
+ pll_div = 0x50;
+ } else {
+ pll_div = 0x28;
+ }
+ }
+
+ pll = SM(ref_div, AR_RTC_9160_PLL_REFDIV);
+ pll |= SM(pll_div, AR_RTC_9160_PLL_DIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ return pll;
+}
+
+static void ar9002_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+ nfarray[0] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (IS_CHAN_HT40(ah->curchan))
+ nfarray[3] = sign_extend32(nf, 8);
+
+ if (!(ah->rxchainmask & BIT(1)))
+ return;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
+ nfarray[1] = sign_extend32(nf, 8);
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
+ if (IS_CHAN_HT40(ah->curchan))
+ nfarray[4] = sign_extend32(nf, 8);
+}
+
+static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
+{
+ if (AR_SREV_9285(ah)) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9285_2GHZ;
+ } else if (AR_SREV_9287(ah)) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ;
+ } else if (AR_SREV_9271(ah)) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9271_2GHZ;
+ } else {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9280_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ;
+ ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ;
+ ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9280_5GHZ;
+ }
+}
+
+static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ u32 regval;
+
+ regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >>
+ AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S;
+ antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >>
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
+ antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
+ AR_PHY_9285_FAST_DIV_BIAS_S;
+ antconf->lna1_lna2_switch_delta = -1;
+ antconf->lna1_lna2_delta = -3;
+ antconf->div_group = 0;
+}
+
+static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ u32 regval;
+
+ regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF |
+ AR_PHY_9285_FAST_DIV_BIAS);
+ regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S)
+ & AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+ regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S)
+ & AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+ regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S)
+ & AR_PHY_9285_FAST_DIV_BIAS);
+
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
+}
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ u8 antdiv_ctrl1, antdiv_ctrl2;
+ u32 regval;
+
+ if (enable) {
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
+
+ /*
+ * Don't disable BT ant to allow BB to control SWCOM.
+ */
+ btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ } else {
+ /*
+ * Disable antenna diversity, use LNA1 only.
+ */
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
+
+ /*
+ * Disable BT Ant. to allow concurrent BT and WLAN receive.
+ */
+ btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ /*
+ * Program SWCOM table to make sure RF switch always parks
+ * at BT side.
+ */
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ }
+
+ regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
+ /*
+ * Clear ant_fast_div_bias [14:9] since for WB195,
+ * the main LNA is always LNA1.
+ */
+ regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
+ regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+ regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+ regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
+ regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
+
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+}
+
+#endif
+
+static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ /* on AR92xx, the highest bit of count will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless) {
+ if (AR_SREV_9271(ah))
+ count = 0;
+ else
+ count = 0x80;
+ } else if (count & 0x80)
+ count = 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9002_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->set_rf_regs = NULL;
+ priv_ops->rf_set_freq = ar9002_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
+ priv_ops->olc_init = ar9002_olc_init;
+ priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
+ priv_ops->do_getnf = ar9002_hw_do_getnf;
+
+ ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get;
+ ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set;
+ ops->spectral_scan_config = ar9002_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
+#endif
+ ops->tx99_start = ar9002_hw_tx99_start;
+ ops->tx99_stop = ar9002_hw_tx99_stop;
+
+ ar9002_hw_set_nf_limits(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 000000000..6314ae2e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef AR9002_PHY_H
+#define AR9002_PHY_H
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR_PHY_TEST2 0x9808
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_TO_ADC_ON 0xFF000000
+#define AR_PHY_TX_END_TO_ADC_ON_S 24
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_TSTDAC_CONST 0x983c
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_FIND_SIG_LOW 0x9840
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW 0x00000FC0L
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+#define AR_PHY_9285_FAST_DIV_BIAS 0x00007E00
+#define AR_PHY_9285_FAST_DIV_BIAS_S 9
+#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR_PHY_9285_ANT_DIV_CTL_S 24
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b
+#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09
+#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
+#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
+#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
+
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_ASYNCFIFO 0x80
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_TX_PWRCTRL4 0xa264
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR_PHY_TX_PWRCTRL6_0 0xa270
+#define AR_PHY_TX_PWRCTRL6_1 0xb270
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR_PHY_TX_PWRCTRL7 0xa274
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR_PHY_TX_PWRCTRL8 0xa278
+
+#define AR_PHY_TX_PWRCTRL9 0xa27C
+
+#define AR_PHY_TX_PWRCTRL10 0xa394
+#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
+#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR_PHY_TX_GAIN_TBL1 0xa300
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
+#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
+#define AR_PHY_CH0_TX_PWRCTRL12 0xa3dc
+#define AR_PHY_CH0_TX_PWRCTRL13 0xa3e0
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR_PHY_CL_CAL_CTL 0xA358
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#define AR_PHY_CCA_NOM_VAL_5416_2GHZ -90
+#define AR_PHY_CCA_NOM_VAL_5416_5GHZ -100
+#define AR_PHY_CCA_MIN_GOOD_VAL_5416_2GHZ -100
+#define AR_PHY_CCA_MIN_GOOD_VAL_5416_5GHZ -110
+#define AR_PHY_CCA_MAX_GOOD_VAL_5416_2GHZ -80
+#define AR_PHY_CCA_MAX_GOOD_VAL_5416_5GHZ -90
+
+#define AR_PHY_CCA_NOM_VAL_9280_2GHZ -112
+#define AR_PHY_CCA_NOM_VAL_9280_5GHZ -112
+#define AR_PHY_CCA_MIN_GOOD_VAL_9280_2GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9280_5GHZ -122
+#define AR_PHY_CCA_MAX_GOOD_VAL_9280_2GHZ -97
+#define AR_PHY_CCA_MAX_GOOD_VAL_9280_5GHZ -102
+
+#define AR_PHY_CCA_NOM_VAL_9285_2GHZ -118
+#define AR_PHY_CCA_MIN_GOOD_VAL_9285_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9285_2GHZ -108
+
+#define AR_PHY_CCA_NOM_VAL_9271_2GHZ -118
+#define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ -116
+
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -120
+#define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -110
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
new file mode 100644
index 000000000..c38399bc9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -0,0 +1,1755 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_2P2_H
+#define INITVALS_9003_2P2_H
+
+/* AR9003 2.2 */
+
+static const u32 ar9300_2p2_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p2[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p2_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458a214f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300_2p2_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x18f04800, 0x18f04800, 0x18f04810, 0x18f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p2_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9300_2p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000036c0, 0x000036c4, 0x000036c4, 0x000036c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01884061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261800},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5e88442e, 0x5e88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x628a4431, 0x628a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d4, 0x000050d4, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_mixed_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_type5_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p2_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p2_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000008},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0821265e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0825365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0821365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9300_2p2_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
+#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
new file mode 100644
index 000000000..1db119d77
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_mci.h"
+#include "ar9003_aic.h"
+#include "ar9003_phy.h"
+#include "reg_aic.h"
+
+static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
+ 0, 3, 9, 15, 21, 27
+};
+
+static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
+ 8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
+ 3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
+ 1298, 1157, 1031, 919, 819, 730, 651, 580,
+ 517, 461, 411, 366, 326, 291, 259, 231,
+ 206, 183, 163, 146, 130, 116, 103, 92,
+ 82, 73, 65, 58, 52, 46, 41, 37,
+ 33, 29, 26, 23, 21, 18, 16, 15,
+ 13, 12, 10, 9, 8, 7, 7, 6,
+ 5, 5, 4, 4, 3
+};
+
+static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ /*
+ * Disable AIC for now, until we have all the
+ * HW code and the driver-layer support ready.
+ */
+ return false;
+
+ if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
+ return false;
+
+ return true;
+}
+
+static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
+ bool dir, u8 index)
+{
+ int16_t i;
+
+ if (dir) {
+ for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ if (cal_sram[i].valid)
+ break;
+ }
+ } else {
+ for (i = index - 1; i >= 0; i--) {
+ if (cal_sram[i].valid)
+ break;
+ }
+ }
+
+ if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
+ i = -1;
+
+ return i;
+}
+
+/*
+ * type 0: aic_lin_table, 1: com_att_db_table
+ */
+static int16_t ar9003_aic_find_index(u8 type, int16_t value)
+{
+ int16_t i = -1;
+
+ if (type == 0) {
+ for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
+ if (aic_lin_table[i] >= value)
+ break;
+ }
+ } else if (type == 1) {
+ for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
+ if (com_att_db_table[i] > value) {
+ i--;
+ break;
+ }
+ }
+
+ if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
+ i = -1;
+ }
+
+ return i;
+}
+
+static void ar9003_aic_gain_table(struct ath_hw *ah)
+{
+ u32 aic_atten_word[19], i;
+
+ /* Config LNA gain difference */
+ REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
+ REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
+
+ /* Program gain table */
+ aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */
+ aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
+ aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
+ aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
+ aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
+ aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
+ aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
+ aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0xf & 0x1f); /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
+ aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0xf & 0x1f); /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
+ aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
+ aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
+ aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
+ aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
+ (0x7 & 0x1f); /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
+ aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x3 & 0x1f); /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
+ aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x3 & 0x1f); /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
+ aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
+ aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
+ aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
+ aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
+ (0x1 & 0x1f); /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
+
+ /* Write to Gain table with auto increment enabled. */
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+ (ATH_AIC_SRAM_AUTO_INCREMENT |
+ ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
+
+ for (i = 0; i < 19; i++) {
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
+ aic_atten_word[i]);
+ }
+}
+
+static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int i;
+
+ /* Write to Gain table with auto increment enabled. */
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
+ (ATH_AIC_SRAM_AUTO_INCREMENT |
+ ATH_AIC_SRAM_CAL_OFFSET));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
+ aic->aic_sram[i] = 0;
+ }
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
+ (SM(0, AR_PHY_AIC_MON_ENABLE) |
+ SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
+ SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
+ SM(37, AR_PHY_AIC_F_WLAN) |
+ SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+ SM(0, AR_PHY_AIC_CAL_ENABLE) |
+ SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+ SM(0, AR_PHY_AIC_ENABLE)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
+ (SM(0, AR_PHY_AIC_MON_ENABLE) |
+ SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
+ SM(0, AR_PHY_AIC_CAL_ENABLE) |
+ SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
+ SM(0, AR_PHY_AIC_ENABLE)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
+ (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
+ SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
+ SM(1, AR_PHY_AIC_STDBY_COND) |
+ SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
+ SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
+ SM(15, AR_PHY_AIC_RSSI_MAX) |
+ SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
+ (SM(15, AR_PHY_AIC_RSSI_MAX) |
+ SM(0, AR_PHY_AIC_RSSI_MIN)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
+ (SM(44, AR_PHY_AIC_RADIO_DELAY) |
+ SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
+ SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
+ SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
+ SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
+ SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
+ SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
+ SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
+ (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
+ SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
+ SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
+ SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
+ SM(10, AR_PHY_AIC_MON_PERF_THR) |
+ SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
+ SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
+ SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
+ (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+ SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+ SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+ SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+ SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+ REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
+ (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
+ SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
+ SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
+ SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
+ SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
+
+ ar9003_aic_gain_table(ah);
+
+ /* Need to enable AIC reference signal in BT modem. */
+ REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+ (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
+ ATH_AIC_BT_AIC_ENABLE));
+
+ aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
+
+ /* Start calibration */
+ REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+
+ aic->aic_caled_chan = 0;
+ aic->aic_cal_state = AIC_CAL_STATE_STARTED;
+
+ return aic->aic_cal_state;
+}
+
+static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
+ struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+ u32 dir_path_gain_idx, quad_path_gain_idx, value;
+ u32 fixed_com_att_db;
+ int8_t dir_path_sign, quad_path_sign;
+ int16_t i;
+ bool ret = true;
+
+ memset(&cal_sram, 0, sizeof(cal_sram));
+ memset(&aic_sram, 0, sizeof(aic_sram));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ value = aic->aic_sram[i];
+
+ cal_sram[i].valid =
+ MS(value, AR_PHY_AIC_SRAM_VALID);
+ cal_sram[i].rot_quad_att_db =
+ MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
+ cal_sram[i].vga_quad_sign =
+ MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
+ cal_sram[i].rot_dir_att_db =
+ MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
+ cal_sram[i].vga_dir_sign =
+ MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
+ cal_sram[i].com_att_6db =
+ MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
+
+ if (cal_sram[i].valid) {
+ dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
+ com_att_db_table[cal_sram[i].com_att_6db];
+ quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
+ com_att_db_table[cal_sram[i].com_att_6db];
+
+ dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
+ quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
+
+ aic_sram[i].dir_path_gain_lin = dir_path_sign *
+ aic_lin_table[dir_path_gain_idx];
+ aic_sram[i].quad_path_gain_lin = quad_path_sign *
+ aic_lin_table[quad_path_gain_idx];
+ }
+ }
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ int16_t start_idx, end_idx;
+
+ if (cal_sram[i].valid)
+ continue;
+
+ start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
+ end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
+
+ if (start_idx < 0) {
+ /* extrapolation */
+ start_idx = end_idx;
+ end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
+
+ if (end_idx < 0) {
+ ret = false;
+ break;
+ }
+
+ aic_sram[i].dir_path_gain_lin =
+ ((aic_sram[start_idx].dir_path_gain_lin -
+ aic_sram[end_idx].dir_path_gain_lin) *
+ (start_idx - i) + ((end_idx - i) >> 1)) /
+ (end_idx - i) +
+ aic_sram[start_idx].dir_path_gain_lin;
+ aic_sram[i].quad_path_gain_lin =
+ ((aic_sram[start_idx].quad_path_gain_lin -
+ aic_sram[end_idx].quad_path_gain_lin) *
+ (start_idx - i) + ((end_idx - i) >> 1)) /
+ (end_idx - i) +
+ aic_sram[start_idx].quad_path_gain_lin;
+ }
+
+ if (end_idx < 0) {
+ /* extrapolation */
+ end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
+
+ if (end_idx < 0) {
+ ret = false;
+ break;
+ }
+
+ aic_sram[i].dir_path_gain_lin =
+ ((aic_sram[start_idx].dir_path_gain_lin -
+ aic_sram[end_idx].dir_path_gain_lin) *
+ (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+ (start_idx - end_idx) +
+ aic_sram[start_idx].dir_path_gain_lin;
+ aic_sram[i].quad_path_gain_lin =
+ ((aic_sram[start_idx].quad_path_gain_lin -
+ aic_sram[end_idx].quad_path_gain_lin) *
+ (i - start_idx) + ((start_idx - end_idx) >> 1)) /
+ (start_idx - end_idx) +
+ aic_sram[start_idx].quad_path_gain_lin;
+
+ } else if (start_idx >= 0){
+ /* interpolation */
+ aic_sram[i].dir_path_gain_lin =
+ (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
+ ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
+ ((end_idx - start_idx) >> 1)) /
+ (end_idx - start_idx);
+ aic_sram[i].quad_path_gain_lin =
+ (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
+ ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
+ ((end_idx - start_idx) >> 1))/
+ (end_idx - start_idx);
+ }
+ }
+
+ /* From dir/quad_path_gain_lin to sram. */
+ i = ar9003_aic_find_valid(cal_sram, 1, 0);
+ if (i < 0) {
+ i = 0;
+ ret = false;
+ }
+ fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ int16_t rot_dir_path_att_db, rot_quad_path_att_db;
+
+ aic_sram[i].sram.vga_dir_sign =
+ (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
+ aic_sram[i].sram.vga_quad_sign=
+ (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
+
+ rot_dir_path_att_db =
+ ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
+ fixed_com_att_db;
+ rot_quad_path_att_db =
+ ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
+ fixed_com_att_db;
+
+ aic_sram[i].sram.com_att_6db =
+ ar9003_aic_find_index(1, fixed_com_att_db);
+
+ aic_sram[i].sram.valid = 1;
+
+ aic_sram[i].sram.rot_dir_att_db =
+ min(max(rot_dir_path_att_db,
+ (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
+ ATH_AIC_MAX_ROT_DIR_ATT_DB);
+ aic_sram[i].sram.rot_quad_att_db =
+ min(max(rot_quad_path_att_db,
+ (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
+ ATH_AIC_MAX_ROT_QUAD_ATT_DB);
+ }
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
+ AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
+ SM(aic_sram[i].sram.vga_quad_sign,
+ AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
+ SM(aic_sram[i].sram.com_att_6db,
+ AR_PHY_AIC_SRAM_COM_ATT_6DB) |
+ SM(aic_sram[i].sram.valid,
+ AR_PHY_AIC_SRAM_VALID) |
+ SM(aic_sram[i].sram.rot_dir_att_db,
+ AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
+ SM(aic_sram[i].sram.rot_quad_att_db,
+ AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
+ }
+
+ return ret;
+}
+
+static void ar9003_aic_cal_done(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+ /* Disable AIC reference signal in BT modem. */
+ REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
+ (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
+ ~ATH_AIC_BT_AIC_ENABLE));
+
+ if (ar9003_aic_cal_post_process(ah))
+ aic->aic_cal_state = AIC_CAL_STATE_DONE;
+ else
+ aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+}
+
+static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int i, num_chan;
+
+ num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+ if (!num_chan) {
+ aic->aic_cal_state = AIC_CAL_STATE_ERROR;
+ return aic->aic_cal_state;
+ }
+
+ if (cal_once) {
+ for (i = 0; i < 10000; i++) {
+ if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+ AR_PHY_AIC_CAL_ENABLE) == 0)
+ break;
+
+ udelay(100);
+ }
+ }
+
+ /*
+ * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
+ * Sometimes CAL_DONE bit is not asserted.
+ */
+ if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
+ AR_PHY_AIC_CAL_ENABLE) != 0) {
+ ath_dbg(common, MCI, "AIC cal is not done after 40ms");
+ goto exit;
+ }
+
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
+ (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ u32 value;
+
+ value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
+
+ if (value & 0x01) {
+ if (aic->aic_sram[i] == 0)
+ aic->aic_caled_chan++;
+
+ aic->aic_sram[i] = value;
+
+ if (!cal_once)
+ break;
+ }
+ }
+
+ if ((aic->aic_caled_chan >= num_chan) || cal_once) {
+ ar9003_aic_cal_done(ah);
+ } else {
+ /* Start calibration */
+ REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
+ AR_PHY_AIC_CAL_CH_VALID_RESET);
+ REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
+ }
+exit:
+ return aic->aic_cal_state;
+
+}
+
+u8 ar9003_aic_calibration(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ u8 cal_ret = AIC_CAL_STATE_ERROR;
+
+ switch (aic->aic_cal_state) {
+ case AIC_CAL_STATE_IDLE:
+ cal_ret = ar9003_aic_cal_start(ah, 1);
+ break;
+ case AIC_CAL_STATE_STARTED:
+ cal_ret = ar9003_aic_cal_continue(ah, false);
+ break;
+ case AIC_CAL_STATE_DONE:
+ cal_ret = AIC_CAL_STATE_DONE;
+ break;
+ default:
+ break;
+ }
+
+ return cal_ret;
+}
+
+u8 ar9003_aic_start_normal(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+ int16_t i;
+
+ if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
+ return 1;
+
+ ar9003_aic_gain_table(ah);
+
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
+
+ for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
+ }
+
+ /* FIXME: Replace these with proper register names */
+ REG_WRITE(ah, 0xa6b0, 0x80);
+ REG_WRITE(ah, 0xa6b4, 0x5b2df0);
+ REG_WRITE(ah, 0xa6b8, 0x10762cc8);
+ REG_WRITE(ah, 0xa6bc, 0x1219a4b);
+ REG_WRITE(ah, 0xa6c0, 0x1e01);
+ REG_WRITE(ah, 0xb6b4, 0xf0);
+ REG_WRITE(ah, 0xb6c0, 0x1e01);
+ REG_WRITE(ah, 0xb6b0, 0x81);
+ REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
+
+ aic->aic_enabled = true;
+
+ return 0;
+}
+
+u8 ar9003_aic_cal_reset(struct ath_hw *ah)
+{
+ struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
+
+ aic->aic_cal_state = AIC_CAL_STATE_IDLE;
+ return aic->aic_cal_state;
+}
+
+u8 ar9003_aic_calibration_single(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u8 cal_ret;
+ int num_chan;
+
+ num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
+
+ (void) ar9003_aic_cal_start(ah, num_chan);
+ cal_ret = ar9003_aic_cal_continue(ah, true);
+
+ return cal_ret;
+}
+
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
new file mode 100644
index 000000000..86f40644b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_AIC_H
+#define AR9003_AIC_H
+
+#define ATH_AIC_MAX_COM_ATT_DB_TABLE 6
+#define ATH_AIC_MAX_AIC_LIN_TABLE 69
+#define ATH_AIC_MIN_ROT_DIR_ATT_DB 0
+#define ATH_AIC_MIN_ROT_QUAD_ATT_DB 0
+#define ATH_AIC_MAX_ROT_DIR_ATT_DB 37
+#define ATH_AIC_MAX_ROT_QUAD_ATT_DB 37
+#define ATH_AIC_SRAM_AUTO_INCREMENT 0x80000000
+#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET 0x280
+#define ATH_AIC_SRAM_CAL_OFFSET 0x140
+#define ATH_AIC_SRAM_OFFSET 0x00
+#define ATH_AIC_MEAS_MAG_THRESH 20
+#define ATH_AIC_BT_JUPITER_CTRL 0x66820
+#define ATH_AIC_BT_AIC_ENABLE 0x02
+
+enum aic_cal_state {
+ AIC_CAL_STATE_IDLE = 0,
+ AIC_CAL_STATE_STARTED,
+ AIC_CAL_STATE_DONE,
+ AIC_CAL_STATE_ERROR
+};
+
+struct ath_aic_sram_info {
+ bool valid:1;
+ bool vga_quad_sign:1;
+ bool vga_dir_sign:1;
+ u8 rot_quad_att_db;
+ u8 rot_dir_att_db;
+ u8 com_att_6db;
+};
+
+struct ath_aic_out_info {
+ int16_t dir_path_gain_lin;
+ int16_t quad_path_gain_lin;
+ struct ath_aic_sram_info sram;
+};
+
+u8 ar9003_aic_calibration(struct ath_hw *ah);
+u8 ar9003_aic_start_normal(struct ath_hw *ah);
+u8 ar9003_aic_cal_reset(struct ath_hw *ah);
+u8 ar9003_aic_calibration_single(struct ath_hw *ah);
+
+#endif /* AR9003_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
new file mode 100644
index 000000000..59cf738f7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_BUFFALO_H
+#define INITVALS_9003_BUFFALO_H
+
+static const u32 ar9300Modes_high_power_tx_gain_table_buffalo[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+#endif /* INITVALS_9003_BUFFALO_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 000000000..174442beb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,1719 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
+#include "ar9003_mci.h"
+
+#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
+#define MAX_MAG_DELTA 11
+#define MAX_PHS_DELTA 10
+#define MAXIQCAL 3
+
+struct coeff {
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int iqc_coeff[2];
+};
+
+enum ar9003_cal_types {
+ IQ_MISMATCH_CAL = BIT(0),
+};
+
+static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Select calibration to run */
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * Start calibration with
+ * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+
+ ath_dbg(common, CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+ default:
+ ath_err(common, "Invalid calibration type\n");
+ break;
+ }
+}
+
+/*
+ * Generic calibration routine.
+ * Recalibrate the lower PHY chips to account for temperature/environment
+ * changes.
+ */
+static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ /* Cal is assumed not done until explicitly set below */
+ bool iscaldone = false;
+
+ /* Calibration in progress. */
+ if (currCal->calState == CAL_RUNNING) {
+ /* Check to see if it has finished. */
+ if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ /*
+ * Accumulate cal measures for active chains
+ */
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ /*
+ * Process accumulated data
+ */
+ currCal->calData->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ caldata->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ /*
+ * Set-up collection of another sub-sample until we
+ * get desired number
+ */
+ ar9003_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(caldata->CalValid & currCal->calData->calType)) {
+ /* If current cal is marked invalid in channel, kick it off */
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+ int ret;
+
+ /*
+ * For given calibration:
+ * 1. Call generic cal routine
+ * 2. When this cal is done (isCalDone) if we have more cals waiting
+ * (eg after reset), mask this to upper layers by not propagating
+ * isCalDone if it is set to TRUE.
+ * Instead, change isCalDone to FALSE and setup the waiting cal(s)
+ * to be run.
+ */
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9003_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /*
+ * Do NF cal only at longer intervals. Get the value from
+ * the previous NF cal and update history buffer.
+ */
+ if (longcal && ath9k_hw_getnf(ah, chan)) {
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ret = ath9k_hw_loadnf(ah, ah->curchan);
+ if (ret < 0)
+ return ret;
+
+ /* start NF calibration, without updating BB NF register */
+ ath9k_hw_start_nfcal(ah, false);
+ }
+
+ return iscaldone;
+}
+
+static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ /* Accumulate IQ cal measures for active chains */
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (ah->txchainmask & BIT(i)) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+ }
+}
+
+static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+ static const u_int32_t offset_array[3] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_dbg(common, CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n", i);
+
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
+
+ /* Force bounds on iCoff */
+ if (iCoff >= 63)
+ iCoff = 63;
+ else if (iCoff <= -63)
+ iCoff = -63;
+
+ /* Negate iCoff if iqCorrNeg == 0 */
+ if (iqCorrNeg == 0x0)
+ iCoff = -iCoff;
+
+ /* Force bounds on qCoff */
+ if (qCoff >= 63)
+ qCoff = 63;
+ else if (qCoff <= -63)
+ qCoff = -63;
+
+ iCoff = iCoff & 0x7f;
+ qCoff = qCoff & 0x7f;
+
+ ath_dbg(common, CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_dbg(common, CALIBRATE,
+ "Register offset (0x%04x) before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
+
+ if (AR_SREV_9565(ah) &&
+ (iCoff == 63 || qCoff == 63 ||
+ iCoff == -63 || qCoff == -63))
+ return;
+
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_dbg(common, CALIBRATE,
+ "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_dbg(common, CALIBRATE,
+ "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_dbg(common, CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n", i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
+ ath_dbg(common, CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+}
+
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9003_hw_iqcal_collect,
+ ar9003_hw_iqcalibrate
+};
+
+static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->enabled_cals |= TX_IQ_CAL;
+ if (AR_SREV_9485_OR_LATER(ah) && !AR_SREV_9340(ah))
+ ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
+ }
+
+ ah->supp_cals = IQ_MISMATCH_CAL;
+}
+
+#define OFF_UPPER_LT 24
+#define OFF_LOWER_LT 7
+
+static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
+ bool txiqcal_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ch0_done, osdac_ch0, dc_off_ch0_i1, dc_off_ch0_q1, dc_off_ch0_i2,
+ dc_off_ch0_q2, dc_off_ch0_i3, dc_off_ch0_q3;
+ int ch1_done, osdac_ch1, dc_off_ch1_i1, dc_off_ch1_q1, dc_off_ch1_i2,
+ dc_off_ch1_q2, dc_off_ch1_i3, dc_off_ch1_q3;
+ int ch2_done, osdac_ch2, dc_off_ch2_i1, dc_off_ch2_q1, dc_off_ch2_i2,
+ dc_off_ch2_q2, dc_off_ch2_i3, dc_off_ch2_q3;
+ bool status;
+ u32 temp, val;
+
+ /*
+ * Clear offset and IQ calibration, run AGC cal.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "AGC cal without offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ /*
+ * Allow only offset calibration and disable the others
+ * (Carrier Leak calibration, TX Filter calibration and
+ * Peak Detector offset calibration).
+ */
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+
+ ch0_done = 0;
+ ch1_done = 0;
+ ch2_done = 0;
+
+ while ((ch0_done == 0) || (ch1_done == 0) || (ch2_done == 0)) {
+ osdac_ch0 = (REG_READ(ah, AR_PHY_65NM_CH0_BB1) >> 30) & 0x3;
+ osdac_ch1 = (REG_READ(ah, AR_PHY_65NM_CH1_BB1) >> 30) & 0x3;
+ osdac_ch2 = (REG_READ(ah, AR_PHY_65NM_CH2_BB1) >> 30) & 0x3;
+
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "DC offset cal failed to complete in 1ms");
+ return false;
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * High gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (1 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (1 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q1 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i1 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q1 = (temp >> 21) & 0x1f;
+
+ /*
+ * Low gain.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (2 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (2 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q2 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i2 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q2 = (temp >> 21) & 0x1f;
+
+ /*
+ * Loopback.
+ */
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (3 << 8)));
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+ ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (3 << 8)));
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+ dc_off_ch0_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch0_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+ dc_off_ch1_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch1_q3 = (temp >> 21) & 0x1f;
+
+ temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+ dc_off_ch2_i3 = (temp >> 26) & 0x1f;
+ dc_off_ch2_q3 = (temp >> 21) & 0x1f;
+
+ if ((dc_off_ch0_i1 > OFF_UPPER_LT) || (dc_off_ch0_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i2 > OFF_UPPER_LT) || (dc_off_ch0_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_i3 > OFF_UPPER_LT) || (dc_off_ch0_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q1 > OFF_UPPER_LT) || (dc_off_ch0_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q2 > OFF_UPPER_LT) || (dc_off_ch0_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch0_q3 > OFF_UPPER_LT) || (dc_off_ch0_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch0 == 3) {
+ ch0_done = 1;
+ } else {
+ osdac_ch0++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH0_BB1) & 0x3fffffff;
+ val |= (osdac_ch0 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BB1, val);
+
+ ch0_done = 0;
+ }
+ } else {
+ ch0_done = 1;
+ }
+
+ if ((dc_off_ch1_i1 > OFF_UPPER_LT) || (dc_off_ch1_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i2 > OFF_UPPER_LT) || (dc_off_ch1_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_i3 > OFF_UPPER_LT) || (dc_off_ch1_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q1 > OFF_UPPER_LT) || (dc_off_ch1_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q2 > OFF_UPPER_LT) || (dc_off_ch1_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch1_q3 > OFF_UPPER_LT) || (dc_off_ch1_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch1 == 3) {
+ ch1_done = 1;
+ } else {
+ osdac_ch1++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH1_BB1) & 0x3fffffff;
+ val |= (osdac_ch1 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH1_BB1, val);
+
+ ch1_done = 0;
+ }
+ } else {
+ ch1_done = 1;
+ }
+
+ if ((dc_off_ch2_i1 > OFF_UPPER_LT) || (dc_off_ch2_i1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i2 > OFF_UPPER_LT) || (dc_off_ch2_i2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_i3 > OFF_UPPER_LT) || (dc_off_ch2_i3 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q1 > OFF_UPPER_LT) || (dc_off_ch2_q1 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q2 > OFF_UPPER_LT) || (dc_off_ch2_q2 < OFF_LOWER_LT) ||
+ (dc_off_ch2_q3 > OFF_UPPER_LT) || (dc_off_ch2_q3 < OFF_LOWER_LT)) {
+ if (osdac_ch2 == 3) {
+ ch2_done = 1;
+ } else {
+ osdac_ch2++;
+
+ val = REG_READ(ah, AR_PHY_65NM_CH2_BB1) & 0x3fffffff;
+ val |= (osdac_ch2 << 30);
+ REG_WRITE(ah, AR_PHY_65NM_CH2_BB1, val);
+
+ ch2_done = 0;
+ }
+ } else {
+ ch2_done = 1;
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_OFFSET_CAL);
+ REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * We don't need to check txiqcal_done here since it is always
+ * set for AR9550.
+ */
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+
+ return true;
+}
+
+/*
+ * solve 4x4 linear equation used in loopback iq cal.
+ */
+static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
+ s32 sin_2phi_1,
+ s32 cos_2phi_1,
+ s32 sin_2phi_2,
+ s32 cos_2phi_2,
+ s32 mag_a0_d0,
+ s32 phs_a0_d0,
+ s32 mag_a1_d0,
+ s32 phs_a1_d0,
+ s32 solved_eq[])
+{
+ s32 f1 = cos_2phi_1 - cos_2phi_2,
+ f3 = sin_2phi_1 - sin_2phi_2,
+ f2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ const s32 result_shift = 1 << 15;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ f2 = ((f1 >> 3) * (f1 >> 3) + (f3 >> 3) * (f3 >> 3)) >> 9;
+
+ if (!f2) {
+ ath_dbg(common, CALIBRATE, "Divide by 0\n");
+ return false;
+ }
+
+ /* mag mismatch, tx */
+ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
+ /* phs mismatch, tx */
+ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
+
+ mag_tx = (mag_tx / f2);
+ phs_tx = (phs_tx / f2);
+
+ /* mag mismatch, rx */
+ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
+ result_shift;
+ /* phs mismatch, rx */
+ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
+ result_shift;
+
+ solved_eq[0] = mag_tx;
+ solved_eq[1] = phs_tx;
+ solved_eq[2] = mag_rx;
+ solved_eq[3] = phs_rx;
+
+ return true;
+}
+
+static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
+{
+ s32 abs_i = abs(in_re),
+ abs_q = abs(in_im),
+ max_abs, min_abs;
+
+ if (abs_i > abs_q) {
+ max_abs = abs_i;
+ min_abs = abs_q;
+ } else {
+ max_abs = abs_q;
+ min_abs = abs_i;
+ }
+
+ return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
+}
+
+#define DELPT 32
+
+static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
+ s32 chain_idx,
+ const s32 iq_res[],
+ s32 iqc_coeff[])
+{
+ s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
+ i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
+ i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
+ i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
+ s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
+ phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
+ q_q_coff, q_i_coff;
+ const s32 res_scale = 1 << 15;
+ const s32 delpt_shift = 1 << 8;
+ s32 mag1, mag2;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
+ i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
+ iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
+
+ if (i2_m_q2_a0_d0 > 0x800)
+ i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
+
+ if (i2_p_q2_a0_d0 > 0x800)
+ i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
+
+ if (iq_corr_a0_d0 > 0x800)
+ iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
+
+ i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
+ i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
+ iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
+
+ if (i2_m_q2_a0_d1 > 0x800)
+ i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
+
+ if (iq_corr_a0_d1 > 0x800)
+ iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
+
+ i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
+ i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
+ iq_corr_a1_d0 = iq_res[4] & 0xfff;
+
+ if (i2_m_q2_a1_d0 > 0x800)
+ i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
+
+ if (i2_p_q2_a1_d0 > 0x800)
+ i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
+
+ if (iq_corr_a1_d0 > 0x800)
+ iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
+
+ i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
+ i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
+ iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
+
+ if (i2_m_q2_a1_d1 > 0x800)
+ i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
+
+ if (i2_p_q2_a1_d1 > 0x800)
+ i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
+
+ if (iq_corr_a1_d1 > 0x800)
+ iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
+
+ if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
+ (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
+ ath_dbg(common, CALIBRATE,
+ "Divide by 0:\n"
+ "a0_d0=%d\n"
+ "a0_d1=%d\n"
+ "a2_d0=%d\n"
+ "a1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ return false;
+ }
+
+ if ((i2_p_q2_a0_d0 < 1024) || (i2_p_q2_a0_d0 > 2047) ||
+ (i2_p_q2_a1_d0 < 0) || (i2_p_q2_a1_d1 < 0) ||
+ (i2_p_q2_a0_d0 <= i2_m_q2_a0_d0) ||
+ (i2_p_q2_a0_d0 <= iq_corr_a0_d0) ||
+ (i2_p_q2_a0_d1 <= i2_m_q2_a0_d1) ||
+ (i2_p_q2_a0_d1 <= iq_corr_a0_d1) ||
+ (i2_p_q2_a1_d0 <= i2_m_q2_a1_d0) ||
+ (i2_p_q2_a1_d0 <= iq_corr_a1_d0) ||
+ (i2_p_q2_a1_d1 <= i2_m_q2_a1_d1) ||
+ (i2_p_q2_a1_d1 <= iq_corr_a1_d1)) {
+ return false;
+ }
+
+ mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+ phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+
+ mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+ phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+
+ mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+ phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+
+ mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+ phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+
+ /* w/o analog phase shift */
+ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
+ /* w/o analog phase shift */
+ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
+
+ /*
+ * force sin^2 + cos^2 = 1;
+ * find magnitude by approximation
+ */
+ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
+ mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
+
+ if ((mag1 == 0) || (mag2 == 0)) {
+ ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
+ return false;
+ }
+
+ /* normalization sin and cos by mag */
+ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
+ cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
+ sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
+ cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
+
+ /* calculate IQ mismatch */
+ if (!ar9003_hw_solve_iq_cal(ah,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2,
+ mag_a0_d0, phs_a0_d0,
+ mag_a1_d0,
+ phs_a1_d0, solved_eq)) {
+ ath_dbg(common, CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed\n");
+ return false;
+ }
+
+ mag_tx = solved_eq[0];
+ phs_tx = solved_eq[1];
+ mag_rx = solved_eq[2];
+ phs_rx = solved_eq[3];
+
+ ath_dbg(common, CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+
+ if (res_scale == mag_tx) {
+ ath_dbg(common, CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Tx IQ correction factor */
+ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
+ phs_corr_tx = -phs_tx;
+
+ q_q_coff = (mag_corr_tx * 128 / res_scale);
+ q_i_coff = (phs_corr_tx * 256 / res_scale);
+
+ ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
+
+ ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
+
+ if (-mag_rx == res_scale) {
+ ath_dbg(common, CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Rx IQ correction factors */
+ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
+ phs_corr_rx = -phs_rx;
+
+ q_q_coff = (mag_corr_rx * 128 / res_scale);
+ q_i_coff = (phs_corr_rx * 256 / res_scale);
+
+ ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
+
+ ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+ int nmeasurement,
+ int max_delta)
+{
+ int mp_max = -64, max_idx = 0;
+ int mp_min = 63, min_idx = 0;
+ int mp_avg = 0, i, outlier_idx = 0, mp_count = 0;
+
+ /* find min/max mismatch across all calibrated gains */
+ for (i = 0; i < nmeasurement; i++) {
+ if (mp_coeff[i][0] > mp_max) {
+ mp_max = mp_coeff[i][0];
+ max_idx = i;
+ } else if (mp_coeff[i][0] < mp_min) {
+ mp_min = mp_coeff[i][0];
+ min_idx = i;
+ }
+ }
+
+ /* find average (exclude max abs value) */
+ for (i = 0; i < nmeasurement; i++) {
+ if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+ (abs(mp_coeff[i][0]) < abs(mp_min))) {
+ mp_avg += mp_coeff[i][0];
+ mp_count++;
+ }
+ }
+
+ /*
+ * finding mean magnitude/phase if possible, otherwise
+ * just use the last value as the mean
+ */
+ if (mp_count)
+ mp_avg /= mp_count;
+ else
+ mp_avg = mp_coeff[nmeasurement - 1][0];
+
+ /* detect outlier */
+ if (abs(mp_max - mp_min) > max_delta) {
+ if (abs(mp_max - mp_avg) > abs(mp_min - mp_avg))
+ outlier_idx = max_idx;
+ else
+ outlier_idx = min_idx;
+
+ mp_coeff[outlier_idx][0] = mp_avg;
+ }
+}
+
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+ struct coeff *coeff,
+ bool is_reusable)
+{
+ int i, im, nmeasurement;
+ int magnitude, phase;
+ u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+ memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
+ for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
+ tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ if (!AR_SREV_9485(ah)) {
+ tx_corr_coeff[i * 2][1] =
+ tx_corr_coeff[(i * 2) + 1][1] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B1(i);
+
+ tx_corr_coeff[i * 2][2] =
+ tx_corr_coeff[(i * 2) + 1][2] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B2(i);
+ }
+ }
+
+ /* Load the average of 2 passes */
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ nmeasurement = REG_READ_FIELD(ah,
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_CALIBRATED_GAINS_0);
+
+ if (nmeasurement > MAX_MEASUREMENT)
+ nmeasurement = MAX_MEASUREMENT;
+
+ /*
+ * Skip normal outlier detection for AR9550.
+ */
+ if (!AR_SREV_9550(ah)) {
+ /* detect outlier only if nmeasurement > 1 */
+ if (nmeasurement > 1) {
+ /* Detect magnitude outlier */
+ ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+ nmeasurement,
+ MAX_MAG_DELTA);
+
+ /* Detect phase outlier */
+ ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+ nmeasurement,
+ MAX_PHS_DELTA);
+ }
+ }
+
+ for (im = 0; im < nmeasurement; im++) {
+ magnitude = coeff->mag_coeff[i][im][0];
+ phase = coeff->phs_coeff[i][im][0];
+
+ coeff->iqc_coeff[0] =
+ (phase & 0x7f) | ((magnitude & 0x7f) << 7);
+
+ if ((im % 2) == 0)
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE,
+ coeff->iqc_coeff[0]);
+ else
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ coeff->iqc_coeff[0]);
+
+ if (caldata)
+ caldata->tx_corr_coeff[im][i] =
+ coeff->iqc_coeff[0];
+ }
+ if (caldata)
+ caldata->num_measures[i] = nmeasurement;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ if (caldata) {
+ if (is_reusable)
+ set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ else
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ }
+
+ return;
+}
+
+static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 tx_gain_forced;
+
+ tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TXGAIN_FORCE);
+ if (tx_gain_forced)
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TXGAIN_FORCE, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL, 1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL, 0,
+ AH_WAIT_TIMEOUT)) {
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
+ return false;
+ }
+ return true;
+}
+
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+ struct coeff *coeff,
+ int i, int nmeasurement)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int im, ix, iy, temp;
+
+ for (im = 0; im < nmeasurement; im++) {
+ for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+ for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+ if (coeff->mag_coeff[i][im][iy] <
+ coeff->mag_coeff[i][im][ix]) {
+ temp = coeff->mag_coeff[i][im][ix];
+ coeff->mag_coeff[i][im][ix] =
+ coeff->mag_coeff[i][im][iy];
+ coeff->mag_coeff[i][im][iy] = temp;
+ }
+ if (coeff->phs_coeff[i][im][iy] <
+ coeff->phs_coeff[i][im][ix]) {
+ temp = coeff->phs_coeff[i][im][ix];
+ coeff->phs_coeff[i][im][ix] =
+ coeff->phs_coeff[i][im][iy];
+ coeff->phs_coeff[i][im][iy] = temp;
+ }
+ }
+ }
+ coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+ coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+ ath_dbg(common, CALIBRATE,
+ "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+ i, im,
+ coeff->mag_coeff[i][im][0],
+ coeff->phs_coeff[i][im][0]);
+ }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+ struct coeff *coeff,
+ int iqcal_idx,
+ int nmeasurement)
+{
+ int i;
+
+ if ((iqcal_idx + 1) != MAXIQCAL)
+ return false;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+ }
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+ int iqcal_idx,
+ bool is_reusable)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ static struct coeff coeff;
+ s32 iq_res[6];
+ int i, im, j;
+ int nmeasurement = 0;
+ bool outlier_detect = true;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+
+ nmeasurement = REG_READ_FIELD(ah,
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_CALIBRATED_GAINS_0);
+ if (nmeasurement > MAX_MEASUREMENT)
+ nmeasurement = MAX_MEASUREMENT;
+
+ for (im = 0; im < nmeasurement; im++) {
+ ath_dbg(common, CALIBRATE,
+ "Doing Tx IQ Cal for chain %d\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_dbg(common, CALIBRATE,
+ "Tx IQ Cal failed for chain %d\n", i);
+ goto tx_iqcal_fail;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u32 idx = 2 * j, offset = 4 * (3 * im + j);
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 1);
+
+ /* 16 bits */
+ iq_res[idx + 1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] + offset);
+
+ ath_dbg(common, CALIBRATE,
+ "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx + 1,
+ iq_res[idx + 1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
+ coeff.iqc_coeff)) {
+ ath_dbg(common, CALIBRATE,
+ "Failed in calculation of IQ correction\n");
+ goto tx_iqcal_fail;
+ }
+
+ coeff.phs_coeff[i][im][iqcal_idx] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.mag_coeff[i][im][iqcal_idx] =
+ (coeff.iqc_coeff[0] >> 7) & 0x7f;
+
+ if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+ coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+ if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+ coeff.phs_coeff[i][im][iqcal_idx] -= 128;
+ }
+ }
+
+ if (AR_SREV_9550(ah))
+ outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+ iqcal_idx, nmeasurement);
+ if (outlier_detect)
+ ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
+
+ return;
+
+tx_iqcal_fail:
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n");
+ return;
+}
+
+static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
+ int i, im;
+
+ memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
+ for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
+ tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ if (!AR_SREV_9485(ah)) {
+ tx_corr_coeff[i * 2][1] =
+ tx_corr_coeff[(i * 2) + 1][1] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B1(i);
+
+ tx_corr_coeff[i * 2][2] =
+ tx_corr_coeff[(i * 2) + 1][2] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B2(i);
+ }
+ }
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+
+ for (im = 0; im < caldata->num_measures[i]; im++) {
+ if ((im % 2) == 0)
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE,
+ caldata->tx_corr_coeff[im][i]);
+ else
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ caldata->tx_corr_coeff[im][i]);
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+}
+
+static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
+{
+ int offset[8] = {0}, total = 0, test;
+ int agc_out, i, peak_detect_threshold;
+
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
+ peak_detect_threshold = 8;
+ else
+ peak_detect_threshold = 0;
+
+ /*
+ * Turn off LNA/SW.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
+
+ if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) {
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
+ }
+
+ /*
+ * Turn off RXON.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON, 0x0);
+
+ /*
+ * Turn on AGC for cal.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
+
+ if (AR_SREV_9330_11(ah))
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
+
+ if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+ peak_detect_threshold);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+ peak_detect_threshold);
+ }
+
+ for (i = 6; i > 0; i--) {
+ offset[i] = BIT(i - 1);
+ test = total + offset[i];
+
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ test);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ test);
+ udelay(100);
+ agc_out = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OUT);
+ offset[i] = (agc_out) ? 0 : 1;
+ total += (offset[i] << (i - 1));
+ }
+
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, total);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
+
+ /*
+ * Turn on LNA.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
+ /*
+ * Turn off RXON.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON_OVR, 0);
+ /*
+ * Turn off peak detect calibration.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
+}
+
+static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ bool run_rtt_cal)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ int i;
+
+ if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
+ return;
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+ return;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
+ }
+
+ if (caldata)
+ set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+ if (IS_CHAN_2GHZ(chan)){
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+ } else {
+ caldata->caldac[0] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ caldata->caldac[1] = REG_READ_FIELD(ah,
+ AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+ }
+ }
+}
+
+static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+{
+ u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txclcal_done = false;
+ int i, j;
+
+ if (!caldata || !(ah->enabled_cals & TX_CL_CAL))
+ return;
+
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_CLC_SUCCESS);
+
+ if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
+ caldata->tx_clcal[i][j]);
+ }
+ } else if (is_reusable && txclcal_done) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ caldata->tx_clcal[i][j] =
+ REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
+ }
+ set_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ }
+}
+
+static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txiqcal_done = false;
+ bool is_reusable = true, status = true;
+ bool run_rtt_cal = false, run_agc_cal;
+ bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ u32 rx_delay = 0;
+ u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL;
+
+ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (rtt) {
+ if (!ar9003_hw_rtt_restore(ah, chan))
+ run_rtt_cal = true;
+
+ if (run_rtt_cal)
+ ath_dbg(common, CALIBRATE, "RTT calibration to be done\n");
+ }
+
+ run_agc_cal = run_rtt_cal;
+
+ if (run_rtt_cal) {
+ ar9003_hw_rtt_enable(ah);
+ ar9003_hw_rtt_set_mask(ah, 0x00);
+ ar9003_hw_rtt_clear_hist(ah);
+ }
+
+ if (rtt) {
+ if (!run_rtt_cal) {
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_supp_cals &= agc_ctrl;
+ agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+ AR_PHY_AGC_CONTROL_FLTR_CAL |
+ AR_PHY_AGC_CONTROL_PKDET_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ } else {
+ if (ah->ah_flags & AH_FASTCC)
+ run_agc_cal = true;
+ }
+ }
+
+ if (ah->enabled_cals & TX_CL_CAL) {
+ if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ else {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL,
+ AR_PHY_CL_CAL_ENABLE);
+ run_agc_cal = true;
+ }
+ }
+
+ if ((IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) ||
+ !(ah->enabled_cals & TX_IQ_CAL))
+ goto skip_tx_iqcal;
+
+ /* Do Tx IQ Calibration */
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+
+ /*
+ * For AR9485 or later chips, TxIQ cal runs as part of
+ * AGC calibration
+ */
+ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+ if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ else
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+ txiqcal_done = run_agc_cal = true;
+ }
+
+skip_tx_iqcal:
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ ar9003_mci_init_cal_req(ah, &is_reusable);
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+ /* Disable BB_active */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
+ if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+
+ ar9003_hw_do_pcoem_manual_peak_cal(ah, chan, run_rtt_cal);
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+ REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+ udelay(5);
+ }
+
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ ar9003_mci_init_cal_done(ah);
+
+ if (rtt && !run_rtt_cal) {
+ agc_ctrl |= agc_supp_cals;
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ }
+
+ if (!status) {
+ if (run_rtt_cal)
+ ar9003_hw_rtt_disable(ah);
+
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms; noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
+ else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
+ ar9003_hw_tx_iq_cal_reload(ah);
+
+ ar9003_hw_cl_cal_post_proc(ah, is_reusable);
+
+ if (run_rtt_cal && caldata) {
+ if (is_reusable) {
+ if (!ath9k_hw_rfbus_req(ah)) {
+ ath_err(ath9k_hw_common(ah),
+ "Could not stop baseband\n");
+ } else {
+ ar9003_hw_rtt_fill_hist(ah);
+
+ if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+ ar9003_hw_rtt_load_hist(ah);
+ }
+
+ ath9k_hw_rfbus_done(ah);
+ }
+
+ ar9003_hw_rtt_disable(ah);
+ }
+
+ /* Revert chainmask to runtime parameters */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ if (caldata)
+ caldata->CalValid = 0;
+
+ return true;
+}
+
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool status;
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms,"
+ "noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ return true;
+}
+
+static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txiqcal_done = false;
+ bool status = true;
+ bool run_agc_cal = false, sep_iq_cal = false;
+ int i = 0;
+
+ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (ah->enabled_cals & TX_CL_CAL) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ run_agc_cal = true;
+ }
+
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+ goto skip_tx_iqcal;
+
+ /* Do Tx IQ Calibration */
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+
+ /*
+ * For AR9485 or later chips, TxIQ cal runs as part of
+ * AGC calibration. Specifically, AR9550 in SoC chips.
+ */
+ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+ txiqcal_done = true;
+ } else {
+ txiqcal_done = false;
+ }
+ run_agc_cal = true;
+ } else {
+ sep_iq_cal = true;
+ run_agc_cal = true;
+ }
+
+ /*
+ * In the SoC family, this will run for AR9300, AR9331 and AR9340.
+ */
+ if (sep_iq_cal) {
+ txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ }
+
+ if (AR_SREV_9550(ah) && IS_CHAN_2GHZ(chan)) {
+ if (!ar9003_hw_dynamic_osdac_selection(ah, txiqcal_done))
+ return false;
+ }
+
+skip_tx_iqcal:
+ if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+ if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i,
+ IS_CHAN_2GHZ(chan));
+ }
+ }
+
+ /*
+ * For non-AR9550 chips, we just trigger AGC calibration
+ * in the HW, poll for completion and then process
+ * the results.
+ *
+ * For AR955x, we run it multiple times and use
+ * median IQ correction.
+ */
+ if (!AR_SREV_9550(ah)) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+ } else {
+ if (!txiqcal_done) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ } else {
+ for (i = 0; i < MAXIQCAL; i++) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+ }
+ }
+ }
+ }
+
+ /* Revert chainmask to runtime parameters */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ if (caldata)
+ caldata->CalValid = 0;
+
+ return true;
+}
+
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
+ else
+ priv_ops->init_cal = ar9003_hw_init_cal_soc;
+
+ priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
+ priv_ops->setup_calibration = ar9003_hw_setup_calibration;
+
+ ops->calibrate = ar9003_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 000000000..8b4561e8c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,5510 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
+#include "ar9003_mci.h"
+
+#define COMP_HDR_LEN 4
+#define COMP_CKSUM_LEN 2
+
+#define LE16(x) cpu_to_le16(x)
+#define LE32(x) cpu_to_le32(x)
+
+/* Local defines to distinguish between extension and control CTL's */
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+
+#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
+#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+
+#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
+
+#define EEPROM_DATA_LEN_9485 1088
+
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np);
+
+static const struct ar9300_eeprom ar9300_default = {
+ .eepromVersion = 2,
+ .templateVersion = 2,
+ .macAddr = {0, 2, 3, 4, 5, 6},
+ .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0c,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 3,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 36,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {0, 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {36, 36, 36, 36} },
+ { {36, 36, 36, 36} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .calTargetPower2GHT40 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x000), LE16(0x000), LE16(0x000),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 0,
+ .tempSlopeHigh = 0,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+static const struct ar9300_eeprom ar9300_x113 = {
+ .eepromVersion = 2,
+ .templateVersion = 6,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"x113-023-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x21,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x11111),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0xf,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 72,
+ .tempSlopeHigh = 105,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5190, 0),
+ FREQ2FBIN(5230, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5410, 0),
+ FREQ2FBIN(5510, 0),
+ FREQ2FBIN(5670, 0),
+ FREQ2FBIN(5755, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} },
+ { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} },
+ { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+
+static const struct ar9300_eeprom ar9300_h112 = {
+ .eepromVersion = 2,
+ .templateVersion = 3,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"h112-241-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x10,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2462, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 45,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 40,
+ .tempSlopeHigh = 50,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+
+static const struct ar9300_eeprom ar9300_x112 = {
+ .eepromVersion = 2,
+ .templateVersion = 5,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"x112-041-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastclock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[ar9300_max_chains]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
+
+ /*
+ * xatten1DB[AR9300_max_chains]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0x1b, 0x1b, 0x1b},
+
+ /*
+ * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x15, 0x15, 0x15},
+ .tempSlope = 50,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPrey_eeprom_modal_sPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11s */
+ { {38, 38, 38, 38} },
+ { {38, 38, 38, 38} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {38, 38, 36, 34} },
+ { {38, 38, 36, 34} },
+ { {38, 38, 34, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
+ { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} },
+ { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
+ },
+ .calTargetPower2GHT40 = {
+ { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
+ { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} },
+ { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x0), LE16(0x0), LE16(0x0),
+ },
+ /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0x13, 0x19, 0x17},
+
+ /*
+ * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x19, 0x19, 0x19},
+ .tempSlope = 70,
+ .voltSlope = 15,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshch check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 72,
+ .tempSlopeHigh = 105,
+ .xatten1DBLow = {0x10, 0x14, 0x10},
+ .xatten1MarginLow = {0x19, 0x19 , 0x19},
+ .xatten1DBHigh = {0x1d, 0x20, 0x24},
+ .xatten1MarginHigh = {0x10, 0x10, 0x10}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 26} },
+ { {32, 32, 28, 26} },
+ { {32, 32, 28, 26} },
+ { {32, 32, 26, 24} },
+ { {32, 32, 26, 24} },
+ { {32, 32, 24, 22} },
+ { {30, 30, 24, 22} },
+ { {30, 30, 24, 22} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} },
+ { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} },
+ { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} },
+ { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} },
+ { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctledges[6].bchannel */ 0xFF,
+ /* Data[3].ctledges[7].bchannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctledges[4].bchannel */ 0xFF,
+ /* Data[4].ctledges[5].bchannel */ 0xFF,
+ /* Data[4].ctledges[6].bchannel */ 0xFF,
+ /* Data[4].ctledges[7].bchannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctledges[6].bchannel */ 0xFF,
+ /* Data[5].ctledges[7].bchannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+static const struct ar9300_eeprom ar9300_h116 = {
+ .eepromVersion = 2,
+ .templateVersion = 4,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"h116-041-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x10,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0x1f, 0x1f, 0x1f},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x12, 0x12, 0x12},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80C080),
+ .papdRateMaskHt40 = LE32(0x0080C080),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2462, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0x19, 0x19, 0x19},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x14, 0x14, 0x14},
+ .tempSlope = 70,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .switchcomspdt = 0,
+ .xlna_bias_strength = 0,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 35,
+ .tempSlopeHigh = 50,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5160, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+
+static const struct ar9300_eeprom *ar9300_eep_templates[] = {
+ &ar9300_default,
+ &ar9300_x112,
+ &ar9300_h116,
+ &ar9300_h112,
+ &ar9300_x113,
+};
+
+static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
+{
+#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
+ int it;
+
+ for (it = 0; it < N_LOOP; it++)
+ if (ar9300_eep_templates[it]->templateVersion == id)
+ return ar9300_eep_templates[it];
+ return NULL;
+#undef N_LOOP
+}
+
+static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static int interpolate(int x, int xa, int xb, int ya, int yb)
+{
+ int bf, factor, plus;
+
+ bf = 2 * (yb - ya) * (x - xa) / (xb - xa);
+ factor = bf / 2;
+ plus = bf % 2;
+ return ya + factor + plus;
+}
+
+static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_MAC_LSW:
+ return get_unaligned_be16(eep->macAddr);
+ case EEP_MAC_MID:
+ return get_unaligned_be16(eep->macAddr + 2);
+ case EEP_MAC_MSW:
+ return get_unaligned_be16(eep->macAddr + 4);
+ case EEP_REG_0:
+ return le16_to_cpu(pBase->regDmn[0]);
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags.opFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_TX_MASK:
+ return (pBase->txrxMask >> 4) & 0xf;
+ case EEP_RX_MASK:
+ return pBase->txrxMask & 0xf;
+ case EEP_PAPRD:
+ return !!(pBase->featureEnable & BIT(5));
+ case EEP_CHAIN_MASK_REDUCE:
+ return (pBase->miscConfiguration >> 0x3) & 0x1;
+ case EEP_ANT_DIV_CTL1:
+ if (AR_SREV_9565(ah))
+ return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+ else
+ return eep->base_ext1.ant_div_control;
+ case EEP_ANTENNA_GAIN_5G:
+ return eep->modalHeader5G.antennaGain;
+ case EEP_ANTENNA_GAIN_2G:
+ return eep->modalHeader2G.antennaGain;
+ default:
+ return 0;
+ }
+}
+
+static bool ar9300_eeprom_read_byte(struct ath_hw *ah, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
+ return false;
+
+ *buffer = (val >> (8 * (address % 2))) & 0xff;
+ return true;
+}
+
+static bool ar9300_eeprom_read_word(struct ath_hw *ah, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
+ return false;
+
+ buffer[0] = val >> 8;
+ buffer[1] = val & 0xff;
+
+ return true;
+}
+
+static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i;
+
+ if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
+ ath_dbg(common, EEPROM, "eeprom address not in range\n");
+ return false;
+ }
+
+ /*
+ * Since we're reading the bytes in reverse order from a little-endian
+ * word stream, an even address means we only use the lower half of
+ * the 16-bit word at that address
+ */
+ if (address % 2 == 0) {
+ if (!ar9300_eeprom_read_byte(ah, address--, buffer++))
+ goto error;
+
+ count--;
+ }
+
+ for (i = 0; i < count / 2; i++) {
+ if (!ar9300_eeprom_read_word(ah, address, buffer))
+ goto error;
+
+ address -= 2;
+ buffer += 2;
+ }
+
+ if (count % 2)
+ if (!ar9300_eeprom_read_byte(ah, address, buffer))
+ goto error;
+
+ return true;
+
+error:
+ ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n",
+ address);
+ return false;
+}
+
+static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
+{
+ REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
+
+ if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
+ AR9300_OTP_STATUS_VALID, 1000))
+ return false;
+
+ *data = REG_READ(ah, AR9300_OTP_READ_DATA);
+ return true;
+}
+
+static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int offset = 8 * ((address - i) % 4);
+ if (!ar9300_otp_read_word(ah, (address - i) / 4, &data))
+ return false;
+
+ buffer[i] = (data >> offset) & 0xff;
+ }
+
+ return true;
+}
+
+
+static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
+ int *length, int *major, int *minor)
+{
+ unsigned long value[4];
+
+ value[0] = best[0];
+ value[1] = best[1];
+ value[2] = best[2];
+ value[3] = best[3];
+ *code = ((value[0] >> 5) & 0x0007);
+ *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
+ *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
+ *major = (value[2] & 0x000f);
+ *minor = (value[3] & 0x00ff);
+}
+
+static u16 ar9300_comp_cksum(u8 *data, int dsize)
+{
+ int it, checksum = 0;
+
+ for (it = 0; it < dsize; it++) {
+ checksum += data[it];
+ checksum &= 0xffff;
+ }
+
+ return checksum;
+}
+
+static bool ar9300_uncompress_block(struct ath_hw *ah,
+ u8 *mptr,
+ int mdataSize,
+ u8 *block,
+ int size)
+{
+ int it;
+ int spot;
+ int offset;
+ int length;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ spot = 0;
+
+ for (it = 0; it < size; it += (length+2)) {
+ offset = block[it];
+ offset &= 0xff;
+ spot += offset;
+ length = block[it+1];
+ length &= 0xff;
+
+ if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
+ ath_dbg(common, EEPROM,
+ "Restore at %d: spot=%d offset=%d length=%d\n",
+ it, spot, offset, length);
+ memcpy(&mptr[spot], &block[it+2], length);
+ spot += length;
+ } else if (length > 0) {
+ ath_dbg(common, EEPROM,
+ "Bad restore at %d: spot=%d offset=%d length=%d\n",
+ it, spot, offset, length);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int ar9300_compress_decision(struct ath_hw *ah,
+ int it,
+ int code,
+ int reference,
+ u8 *mptr,
+ u8 *word, int length, int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ar9300_eeprom *eep = NULL;
+
+ switch (code) {
+ case _CompressNone:
+ if (length != mdata_size) {
+ ath_dbg(common, EEPROM,
+ "EEPROM structure size mismatch memory=%d eeprom=%d\n",
+ mdata_size, length);
+ return -1;
+ }
+ memcpy(mptr, word + COMP_HDR_LEN, length);
+ ath_dbg(common, EEPROM,
+ "restored eeprom %d: uncompressed, length %d\n",
+ it, length);
+ break;
+ case _CompressBlock:
+ if (reference == 0) {
+ } else {
+ eep = ar9003_eeprom_struct_find_by_id(reference);
+ if (eep == NULL) {
+ ath_dbg(common, EEPROM,
+ "can't find reference eeprom struct %d\n",
+ reference);
+ return -1;
+ }
+ memcpy(mptr, eep, mdata_size);
+ }
+ ath_dbg(common, EEPROM,
+ "restore eeprom %d: block, reference %d, length %d\n",
+ it, reference, length);
+ ar9300_uncompress_block(ah, mptr, mdata_size,
+ (word + COMP_HDR_LEN), length);
+ break;
+ default:
+ ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
+ return -1;
+ }
+ return 0;
+}
+
+typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer,
+ int count);
+
+static bool ar9300_check_header(void *data)
+{
+ u32 *word = data;
+ return !(*word == 0 || *word == ~0);
+}
+
+static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
+ int base_addr)
+{
+ u8 header[4];
+
+ if (!read(ah, base_addr, header, 4))
+ return false;
+
+ return ar9300_check_header(header);
+}
+
+static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
+ int mdata_size)
+{
+ u16 *data = (u16 *) mptr;
+ int i;
+
+ for (i = 0; i < mdata_size / 2; i++, data++)
+ ath9k_hw_nvram_read(ah, i, data);
+
+ return 0;
+}
+/*
+ * Read the configuration data from the eeprom.
+ * The data can be put in any specified memory buffer.
+ *
+ * Returns -1 on error.
+ * Returns address of next memory location on success.
+ */
+static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
+ u8 *mptr, int mdata_size)
+{
+#define MDEFAULT 15
+#define MSTATE 100
+ int cptr;
+ u8 *word;
+ int code;
+ int reference, length, major, minor;
+ int osize;
+ int it;
+ u16 checksum, mchecksum;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *eep;
+ eeprom_read_op read;
+
+ if (ath9k_hw_use_flash(ah)) {
+ u8 txrx;
+
+ ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+
+ /* check if eeprom contains valid data */
+ eep = (struct ar9300_eeprom *) mptr;
+ txrx = eep->baseEepHeader.txrxMask;
+ if (txrx != 0 && txrx != 0xff)
+ return 0;
+ }
+
+ word = kzalloc(2048, GFP_KERNEL);
+ if (!word)
+ return -ENOMEM;
+
+ memcpy(mptr, &ar9300_default, mdata_size);
+
+ read = ar9300_read_eeprom;
+ if (AR_SREV_9485(ah))
+ cptr = AR9300_BASE_ADDR_4K;
+ else if (AR_SREV_9330(ah))
+ cptr = AR9300_BASE_ADDR_512;
+ else
+ cptr = AR9300_BASE_ADDR;
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ cptr = AR9300_BASE_ADDR_512;
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ read = ar9300_read_otp;
+ cptr = AR9300_BASE_ADDR;
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ cptr = AR9300_BASE_ADDR_512;
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ goto fail;
+
+found:
+ ath_dbg(common, EEPROM, "Found valid EEPROM data\n");
+
+ for (it = 0; it < MSTATE; it++) {
+ if (!read(ah, cptr, word, COMP_HDR_LEN))
+ goto fail;
+
+ if (!ar9300_check_header(word))
+ break;
+
+ ar9300_comp_hdr_unpack(word, &code, &reference,
+ &length, &major, &minor);
+ ath_dbg(common, EEPROM,
+ "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
+ cptr, code, reference, length, major, minor);
+ if ((!AR_SREV_9485(ah) && length >= 1024) ||
+ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
+ ath_dbg(common, EEPROM, "Skipping bad header\n");
+ cptr -= COMP_HDR_LEN;
+ continue;
+ }
+
+ osize = length;
+ read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
+ mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
+ ath_dbg(common, EEPROM, "checksum %x %x\n",
+ checksum, mchecksum);
+ if (checksum == mchecksum) {
+ ar9300_compress_decision(ah, it, code, reference, mptr,
+ word, length, mdata_size);
+ } else {
+ ath_dbg(common, EEPROM,
+ "skipping block with bad checksum\n");
+ }
+ cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ }
+
+ kfree(word);
+ return cptr;
+
+fail:
+ kfree(word);
+ return -1;
+}
+
+/*
+ * Restore the configuration structure by reading the eeprom.
+ * This function destroys any existing in-memory structure
+ * content.
+ */
+static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
+{
+ u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep;
+
+ if (ar9300_eeprom_restore_internal(ah, mptr,
+ sizeof(struct ar9300_eeprom)) < 0)
+ return false;
+
+ return true;
+}
+
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct ar9300_modal_eep_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+ PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
+ PR_EEP("Ant. Common Control2", le32_to_cpu(modal_hdr->antCtrlCommon2));
+ PR_EEP("Ant. Gain", modal_hdr->antennaGain);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 xatten1DB", modal_hdr->xatten1DB[0]);
+ PR_EEP("Chain1 xatten1DB", modal_hdr->xatten1DB[1]);
+ PR_EEP("Chain2 xatten1DB", modal_hdr->xatten1DB[2]);
+ PR_EEP("Chain0 xatten1Margin", modal_hdr->xatten1Margin[0]);
+ PR_EEP("Chain1 xatten1Margin", modal_hdr->xatten1Margin[1]);
+ PR_EEP("Chain2 xatten1Margin", modal_hdr->xatten1Margin[2]);
+ PR_EEP("Temp Slope", modal_hdr->tempSlope);
+ PR_EEP("Volt Slope", modal_hdr->voltSlope);
+ PR_EEP("spur Channels0", modal_hdr->spurChans[0]);
+ PR_EEP("spur Channels1", modal_hdr->spurChans[1]);
+ PR_EEP("spur Channels2", modal_hdr->spurChans[2]);
+ PR_EEP("spur Channels3", modal_hdr->spurChans[3]);
+ PR_EEP("spur Channels4", modal_hdr->spurChans[4]);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("Quick Drop", modal_hdr->quick_drop);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("txClip", modal_hdr->txClip);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+
+ return len;
+}
+
+static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase;
+
+ if (!dump_base_hdr) {
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len = ar9003_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader2G);
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
+ len = ar9003_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader5G);
+ goto out;
+ }
+
+ pBase = &eep->baseEepHeader;
+
+ PR_EEP("EEPROM Version", ah->eeprom.ar9300_eep.eepromVersion);
+ PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+ PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
+ PR_EEP("TX Mask", (pBase->txrxMask >> 4));
+ PR_EEP("RX Mask", (pBase->txrxMask & 0x0f));
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01));
+ PR_EEP("RF Silent", pBase->rfSilent);
+ PR_EEP("BT option", pBase->blueToothOptions);
+ PR_EEP("Device Cap", pBase->deviceCap);
+ PR_EEP("Device Type", pBase->deviceType);
+ PR_EEP("Power Table Offset", pBase->pwrTableOffset);
+ PR_EEP("Tuning Caps1", pBase->params_for_tuning_caps[0]);
+ PR_EEP("Tuning Caps2", pBase->params_for_tuning_caps[1]);
+ PR_EEP("Enable Tx Temp Comp", !!(pBase->featureEnable & BIT(0)));
+ PR_EEP("Enable Tx Volt Comp", !!(pBase->featureEnable & BIT(1)));
+ PR_EEP("Enable fast clock", !!(pBase->featureEnable & BIT(2)));
+ PR_EEP("Enable doubling", !!(pBase->featureEnable & BIT(3)));
+ PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
+ PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
+ PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+ PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
+ PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
+ PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
+ PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
+ PR_EEP("WLAN LED Gpio", pBase->wlanLedGpio);
+ PR_EEP("Rx Band Select Gpio", pBase->rxBandSelectGpio);
+ PR_EEP("Tx Gain", pBase->txrxgain >> 4);
+ PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
+ PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
+
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ ah->eeprom.ar9300_eep.macAddr);
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+/* XXX: review hardware docs */
+static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ah->eeprom.ar9300_eep.eepromVersion;
+}
+
+/* XXX: could be read from the eepromVersion, not sure yet */
+static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static struct ar9300_modal_eep_header *ar9003_modal_header(struct ath_hw *ah,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return &eep->modalHeader2G;
+ else
+ return &eep->modalHeader5G;
+}
+
+static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
+{
+ int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
+
+ if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9531(ah) || AR_SREV_9561(ah))
+ REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
+ else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
+ else {
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_THERM,
+ AR_CH0_THERM_XPABIASLVL_MSB,
+ bias >> 2);
+ REG_RMW_FIELD(ah, AR_CH0_THERM,
+ AR_CH0_THERM_XPASHORT2GND, 1);
+ }
+}
+
+static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
+{
+ return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
+}
+
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
+}
+
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+{
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
+}
+
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
+ bool is2ghz)
+{
+ __le16 val = ar9003_modal_header(ah, is2ghz)->antCtrlChain[chain];
+ return le16_to_cpu(val);
+}
+
+static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ int chain;
+ u32 regval, value, gpio;
+ static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
+ AR_PHY_SWITCH_CHAIN_0,
+ AR_PHY_SWITCH_CHAIN_1,
+ AR_PHY_SWITCH_CHAIN_2,
+ };
+
+ if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) {
+ if (ah->config.xlna_gpio)
+ gpio = ah->config.xlna_gpio;
+ else
+ gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
+
+ ath9k_hw_cfg_output(ah, gpio,
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+ }
+
+ value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_AR9462_ALL, value);
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_AR9550_ALL, value);
+ } else
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_ALL, value);
+
+
+ /*
+ * AR9462 defines new switch table for BT/WLAN,
+ * here's new field name in XXX.ref for both 2G and 5G.
+ * Register: [GLB_CONTROL] GLB_CONTROL (@0x20044)
+ * 15:12 R/W SWITCH_TABLE_COM_SPDT_WLAN_RX
+ * SWITCH_TABLE_COM_SPDT_WLAN_RX
+ *
+ * 11:8 R/W SWITCH_TABLE_COM_SPDT_WLAN_TX
+ * SWITCH_TABLE_COM_SPDT_WLAN_TX
+ *
+ * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
+ * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
+ */
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
+ value = ar9003_switch_com_spdt_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_SWITCH_TABLE_COM_SPDT_ALL, value);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE);
+ }
+
+ value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
+ value &= ~AR_SWITCH_TABLE_COM2_ALL;
+ value |= ah->config.ant_ctrl_comm2g_switch_enable;
+
+ }
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+
+ if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, switch_chain_reg[0],
+ AR_SWITCH_TABLE_ALL, value);
+ }
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if ((ah->rxchainmask & BIT(chain)) ||
+ (ah->txchainmask & BIT(chain))) {
+ value = ar9003_hw_ant_ctrl_chain_get(ah, chain,
+ is2ghz);
+ REG_RMW_FIELD(ah, switch_chain_reg[chain],
+ AR_SWITCH_TABLE_ALL, value);
+ }
+ }
+
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
+ value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ /*
+ * main_lnaconf, alt_lnaconf, main_tb, alt_tb
+ * are the fields present
+ */
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~AR_ANT_DIV_CTRL_ALL);
+ regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+ /* enable_lnadiv */
+ regval &= (~AR_PHY_ANT_DIV_LNADIV);
+ regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ regval |= AR_ANT_DIV_ENABLE;
+
+ if (AR_SREV_9565(ah)) {
+ if (common->bt_ant_diversity) {
+ regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+ /* Force WLAN LNA diversity ON */
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ } else {
+ regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
+ regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+ /* Force WLAN LNA diversity OFF */
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ /* enable fast_div */
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= (~AR_FAST_DIV_ENABLE);
+ regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+ if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ && common->bt_ant_diversity)
+ regval |= AR_FAST_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ /*
+ * clear bits 25-30 main_lnaconf, alt_lnaconf,
+ * main_tb, alt_tb
+ */
+ regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_ALT_GAINTB |
+ AR_PHY_ANT_DIV_MAIN_GAINTB));
+ /* by default use LNA1 for the main antenna */
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+ }
+}
+
+static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int drive_strength;
+ unsigned long reg;
+
+ drive_strength = pBase->miscConfiguration & BIT(0);
+ if (!drive_strength)
+ return;
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
+ reg &= ~0x00ffffc0;
+ reg |= 0x5 << 21;
+ reg |= 0x5 << 18;
+ reg |= 0x5 << 15;
+ reg |= 0x5 << 12;
+ reg |= 0x5 << 9;
+ reg |= 0x5 << 6;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
+ reg &= ~0xffffffe0;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ reg |= 0x5 << 20;
+ reg |= 0x5 << 17;
+ reg |= 0x5 << 14;
+ reg |= 0x5 << 11;
+ reg |= 0x5 << 8;
+ reg |= 0x5 << 5;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
+ reg &= ~0xff800000;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
+}
+
+static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain,
+ struct ath9k_channel *chan)
+{
+ int f[3], t[3];
+ u16 value;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (chain >= 0 && chain < 3) {
+ if (IS_CHAN_2GHZ(chan))
+ return eep->modalHeader2G.xatten1DB[chain];
+ else if (eep->base_ext2.xatten1DBLow[chain] != 0) {
+ t[0] = eep->base_ext2.xatten1DBLow[chain];
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.xatten1DB[chain];
+ f[1] = 5500;
+ t[2] = eep->base_ext2.xatten1DBHigh[chain];
+ f[2] = 5785;
+ value = ar9003_hw_power_interpolate((s32) chan->channel,
+ f, t, 3);
+ return value;
+ } else
+ return eep->modalHeader5G.xatten1DB[chain];
+ }
+
+ return 0;
+}
+
+
+static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain,
+ struct ath9k_channel *chan)
+{
+ int f[3], t[3];
+ u16 value;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (chain >= 0 && chain < 3) {
+ if (IS_CHAN_2GHZ(chan))
+ return eep->modalHeader2G.xatten1Margin[chain];
+ else if (eep->base_ext2.xatten1MarginLow[chain] != 0) {
+ t[0] = eep->base_ext2.xatten1MarginLow[chain];
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.xatten1Margin[chain];
+ f[1] = 5500;
+ t[2] = eep->base_ext2.xatten1MarginHigh[chain];
+ f[2] = 5785;
+ value = ar9003_hw_power_interpolate((s32) chan->channel,
+ f, t, 3);
+ return value;
+ } else
+ return eep->modalHeader5G.xatten1Margin[chain];
+ }
+
+ return 0;
+}
+
+static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u16 value;
+ unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0,
+ AR_PHY_EXT_ATTEN_CTL_1,
+ AR_PHY_EXT_ATTEN_CTL_2,
+ };
+
+ if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
+ value = ar9003_hw_atten_chain_get(ah, 1, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[0],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
+
+ value = ar9003_hw_atten_chain_get_margin(ah, 1, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[0],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+ }
+
+ /* Test value. if 0 then attenuation is unused. Don't load anything. */
+ for (i = 0; i < 3; i++) {
+ if (ah->txchainmask & BIT(i)) {
+ value = ar9003_hw_atten_chain_get(ah, i, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[i],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
+
+ if (AR_SREV_9485(ah) &&
+ (ar9003_hw_get_rx_gain_idx(ah) == 0) &&
+ ah->config.xatten_margin_cfg)
+ value = 5;
+ else
+ value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+
+ if (ah->config.alt_mingainidx)
+ REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0,
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+
+ REG_RMW_FIELD(ah, ext_atten_reg[i],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+ }
+ }
+}
+
+static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
+{
+ int timeout = 100;
+
+ while (pmu_set != REG_READ(ah, pmu_reg)) {
+ if (timeout-- == 0)
+ return false;
+ REG_WRITE(ah, pmu_reg, pmu_set);
+ udelay(10);
+ }
+
+ return true;
+}
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ u32 reg_val;
+
+ if (pBase->featureEnable & BIT(4)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+ int reg_pmu_set;
+
+ reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+
+ if (AR_SREV_9330(ah)) {
+ if (ah->is_clk_25mhz) {
+ reg_pmu_set = (3 << 1) | (8 << 4) |
+ (3 << 8) | (1 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24);
+ } else {
+ reg_pmu_set = (4 << 1) | (7 << 4) |
+ (3 << 8) | (1 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24);
+ }
+ } else {
+ reg_pmu_set = (5 << 1) | (7 << 4) |
+ (2 << 8) | (2 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24) | (1 << 28);
+ }
+
+ REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
+ return;
+
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000)
+ | (4 << 26);
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000)
+ | (1 << 21);
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+ } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) ||
+ AR_SREV_9561(ah)) {
+ reg_val = le32_to_cpu(pBase->swreg);
+ REG_WRITE(ah, AR_PHY_PMU1, reg_val);
+
+ if (AR_SREV_9561(ah))
+ REG_WRITE(ah, AR_PHY_PMU2, 0x10200000);
+ } else {
+ /* Internal regulator is ON. Write swreg register. */
+ reg_val = le32_to_cpu(pBase->swreg);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, reg_val);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ }
+ } else {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
+ while (REG_READ_FIELD(ah, AR_PHY_PMU2,
+ AR_PHY_PMU2_PGM))
+ udelay(10);
+
+ REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
+ AR_PHY_PMU1_PWD))
+ udelay(10);
+ REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
+ AR_PHY_PMU2_PGM))
+ udelay(10);
+ } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ else {
+ reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD;
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val);
+ }
+ }
+
+}
+
+static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
+
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+ return;
+
+ if (eep->baseEepHeader.featureEnable & 0x40) {
+ tuning_caps_param &= 0x7f;
+ REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
+ tuning_caps_param);
+ REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC,
+ tuning_caps_param);
+ }
+}
+
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int quick_drop;
+ s32 t[3], f[3] = {5180, 5500, 5785};
+
+ if (!(pBase->miscConfiguration & BIT(4)))
+ return;
+
+ if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
+ if (freq < 4000) {
+ quick_drop = eep->modalHeader2G.quick_drop;
+ } else {
+ t[0] = eep->base_ext1.quick_drop_low;
+ t[1] = eep->modalHeader5G.quick_drop;
+ t[2] = eep->base_ext1.quick_drop_high;
+ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+ }
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
+{
+ u32 value;
+
+ value = ar9003_modal_header(ah, is2ghz)->txEndToXpaOff;
+
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
+static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 xpa_ctl;
+
+ if (!(eep->baseEepHeader.featureEnable & 0x80))
+ return;
+
+ if (!AR_SREV_9300(ah) &&
+ !AR_SREV_9340(ah) &&
+ !AR_SREV_9580(ah) &&
+ !AR_SREV_9531(ah) &&
+ !AR_SREV_9561(ah))
+ return;
+
+ xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
+ if (is2ghz)
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON, xpa_ctl);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON, xpa_ctl);
+}
+
+static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 bias;
+
+ if (!(eep->baseEepHeader.miscConfiguration & 0x40))
+ return;
+
+ if (!AR_SREV_9300(ah))
+ return;
+
+ bias = ar9003_modal_header(ah, is2ghz)->xlna_bias_strength;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+}
+
+static int ar9003_hw_get_thermometer(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int thermometer = (pBase->miscConfiguration >> 1) & 0x3;
+
+ return --thermometer;
+}
+
+static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ int thermometer = ar9003_hw_get_thermometer(ah);
+ u8 therm_on = (thermometer < 0) ? 0 : 1;
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+ if (pCap->chip_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+ if (pCap->chip_chainmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ if (pCap->chip_chainmask & BIT(1)) {
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ }
+ if (pCap->chip_chainmask & BIT(2)) {
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ }
+}
+
+static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
+{
+ u32 data, ko, kg;
+
+ if (!AR_SREV_9462_20_OR_LATER(ah))
+ return;
+
+ ar9300_otp_read_word(ah, 1, &data);
+ ko = data & 0xff;
+ kg = (data >> 8) & 0xff;
+ if (ko || kg) {
+ REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+ AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
+ REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+ AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
+ kg + 256);
+ }
+}
+
+static void ar9003_hw_apply_minccapwr_thresh(struct ath_hw *ah,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ const u_int32_t cca_ctrl[AR9300_MAX_CHAINS] = {
+ AR_PHY_CCA_CTRL_0,
+ AR_PHY_CCA_CTRL_1,
+ AR_PHY_CCA_CTRL_2,
+ };
+ int chain;
+ u32 val;
+
+ if (is2ghz) {
+ if (!(eep->base_ext1.misc_enable & BIT(2)))
+ return;
+ } else {
+ if (!(eep->base_ext1.misc_enable & BIT(3)))
+ return;
+ }
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ val = ar9003_modal_header(ah, is2ghz)->noiseFloorThreshCh[chain];
+ REG_RMW_FIELD(ah, cca_ctrl[chain],
+ AR_PHY_EXT_CCA0_THRESH62_1, val);
+ }
+
+}
+
+static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ bool is2ghz = IS_CHAN_2GHZ(chan);
+ ar9003_hw_xpa_timing_control_apply(ah, is2ghz);
+ ar9003_hw_xpa_bias_level_apply(ah, is2ghz);
+ ar9003_hw_ant_ctrl_apply(ah, is2ghz);
+ ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
+ ar9003_hw_atten_apply(ah, chan);
+ ar9003_hw_quick_drop_apply(ah, chan->channel);
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah))
+ ar9003_hw_internal_regulator_apply(ah);
+ ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_apply_minccapwr_thresh(ah, chan);
+ ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
+ ar9003_hw_thermometer_apply(ah);
+ ar9003_hw_thermo_cal_apply(ah);
+}
+
+static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+}
+
+/*
+ * Returns the interpolated y value corresponding to the specified x value
+ * from the np ordered pairs of data (px,py).
+ * The pairs do not have to be in any order.
+ * If the specified x value is less than any of the px,
+ * the returned y value is equal to the py for the lowest px.
+ * If the specified x value is greater than any of the px,
+ * the returned y value is equal to the py for the highest px.
+ */
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np)
+{
+ int ip = 0;
+ int lx = 0, ly = 0, lhave = 0;
+ int hx = 0, hy = 0, hhave = 0;
+ int dx = 0;
+ int y = 0;
+
+ lhave = 0;
+ hhave = 0;
+
+ /* identify best lower and higher x calibration measurement */
+ for (ip = 0; ip < np; ip++) {
+ dx = x - px[ip];
+
+ /* this measurement is higher than our desired x */
+ if (dx <= 0) {
+ if (!hhave || dx > (x - hx)) {
+ /* new best higher x measurement */
+ hx = px[ip];
+ hy = py[ip];
+ hhave = 1;
+ }
+ }
+ /* this measurement is lower than our desired x */
+ if (dx >= 0) {
+ if (!lhave || dx < (x - lx)) {
+ /* new best lower x measurement */
+ lx = px[ip];
+ ly = py[ip];
+ lhave = 1;
+ }
+ }
+ }
+
+ /* the low x is good */
+ if (lhave) {
+ /* so is the high x */
+ if (hhave) {
+ /* they're the same, so just pick one */
+ if (hx == lx)
+ y = ly;
+ else /* interpolate */
+ y = interpolate(x, lx, hx, ly, hy);
+ } else /* only low is good, use it */
+ y = ly;
+ } else if (hhave) /* only high is good, use it */
+ y = hy;
+ else /* nothing is good,this should never happen unless np=0, ???? */
+ y = -(1 << 30);
+ return y;
+}
+
+static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2G;
+ pFreqBin = eep->calTarget_freqbin_2G;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5G;
+ pFreqBin = eep->calTarget_freqbin_5G;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT20;
+ pFreqBin = eep->calTarget_freqbin_2GHT20;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT20;
+ pFreqBin = eep->calTarget_freqbin_5GHT20;
+ }
+
+ /*
+ * create array of channels and targetpower
+ * from targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT40;
+ pFreqBin = eep->calTarget_freqbin_2GHT40;
+ } else {
+ numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT40;
+ pFreqBin = eep->calTarget_freqbin_5GHT40;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq)
+{
+ u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
+ s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
+ u8 *pFreqBin = eep->calTarget_freqbin_Cck;
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], 1);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *pwr_array)
+{
+ u32 val;
+
+ /* target power values for self generated frames (ACK,RTS/CTS) */
+ if (IS_CHAN_2GHZ(chan)) {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_1L_5L], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ } else {
+ val = SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_ACK) |
+ SM(pwr_array[ALL_TARGET_LEGACY_6_24], AR_TPC_CTS) |
+ SM(0x3f, AR_TPC_CHIRP) | SM(0x3f, AR_TPC_RPT);
+ }
+ REG_WRITE(ah, AR_TPC, val);
+}
+
+/* Set tx power registers to array of values passed in */
+static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+ /* make sure forced gain is not set */
+ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
+
+ /* Write the OFDM power per rate set */
+
+ /* 6 (LSB), 9, 12, 18 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* 24 (LSB), 36, 48, 54 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* Write the CCK power per rate set */
+
+ /* 1L (LSB), reserved, 2L, 2S (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
+
+ /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the power for duplicated frames - HT40 */
+
+ /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(8),
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
+ POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
+ POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
+ POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
+ );
+
+ /* Mixed HT20 and HT40 rates */
+
+ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
+ POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
+ );
+
+ /*
+ * Write the HT40 power per rate set
+ * correct PAR difference between HT40 and HT20/LEGACY
+ * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
+ */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
+ POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
+ POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
+ POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
+ );
+
+ return 0;
+#undef POW_SM
+}
+
+static void ar9003_hw_get_legacy_target_powers(struct ath_hw *ah, u16 freq,
+ u8 *targetPowerValT2,
+ bool is2GHz)
+{
+ targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_36] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_48] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_54] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
+ is2GHz);
+}
+
+static void ar9003_hw_get_cck_target_powers(struct ath_hw *ah, u16 freq,
+ u8 *targetPowerValT2)
+{
+ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
+ freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_5S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+}
+
+static void ar9003_hw_get_ht20_target_powers(struct ath_hw *ah, u16 freq,
+ u8 *targetPowerValT2, bool is2GHz)
+{
+ targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq, is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_4] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_5] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_6] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_7] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_12] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_13] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_14] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_15] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_20] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_21] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_22] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_23] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz);
+}
+
+static void ar9003_hw_get_ht40_target_powers(struct ath_hw *ah,
+ u16 freq,
+ u8 *targetPowerValT2,
+ bool is2GHz)
+{
+ /* XXX: hard code for now, need to get from eeprom struct */
+ u8 ht40PowerIncForPdadc = 0;
+
+ targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_4] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_5] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_6] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_7] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_12] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_13] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_14] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_15] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_20] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_21] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_22] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_23] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+}
+
+static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *targetPowerValT2)
+{
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+ unsigned int i = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 freq = chan->channel;
+
+ if (is2GHz)
+ ar9003_hw_get_cck_target_powers(ah, freq, targetPowerValT2);
+
+ ar9003_hw_get_legacy_target_powers(ah, freq, targetPowerValT2, is2GHz);
+ ar9003_hw_get_ht20_target_powers(ah, freq, targetPowerValT2, is2GHz);
+
+ if (IS_CHAN_HT40(chan))
+ ar9003_hw_get_ht40_target_powers(ah, freq, targetPowerValT2,
+ is2GHz);
+
+ for (i = 0; i < ar9300RateSize; i++) {
+ ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
+ }
+}
+
+static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
+ int mode,
+ int ipier,
+ int ichain,
+ int *pfrequency,
+ int *pcorrection,
+ int *ptemperature, int *pvoltage)
+{
+ u8 *pCalPier;
+ struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
+ int is2GHz;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ichain >= AR9300_MAX_CHAINS) {
+ ath_dbg(common, EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
+ return -1;
+ }
+
+ if (mode) { /* 5GHz */
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_dbg(common, EEPROM,
+ "Invalid 5GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
+ is2GHz = 0;
+ } else {
+ if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
+ ath_dbg(common, EEPROM,
+ "Invalid 2GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_2G_CAL_PIERS);
+ return -1;
+ }
+
+ pCalPier = &(eep->calFreqPier2G[ipier]);
+ pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
+ is2GHz = 1;
+ }
+
+ *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
+ *pcorrection = pCalPierStruct->refPower;
+ *ptemperature = pCalPierStruct->tempMeas;
+ *pvoltage = pCalPierStruct->voltMeas;
+
+ return 0;
+}
+
+static void ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
+{
+ int temp_slope = 0, temp_slope1 = 0, temp_slope2 = 0;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ int f[8], t[8], t1[3], t2[3], i;
+
+ REG_RMW(ah, AR_PHY_TPC_11_B0,
+ (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+
+ /* enable open loop power control on chip */
+ REG_RMW(ah, AR_PHY_TPC_6_B0,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+
+ /*
+ * enable temperature compensation
+ * Need to use register names
+ */
+ if (frequency < 4000) {
+ temp_slope = eep->modalHeader2G.tempSlope;
+ } else {
+ if (AR_SREV_9550(ah)) {
+ t[0] = eep->base_ext1.tempslopextension[2];
+ t1[0] = eep->base_ext1.tempslopextension[3];
+ t2[0] = eep->base_ext1.tempslopextension[4];
+ f[0] = 5180;
+
+ t[1] = eep->modalHeader5G.tempSlope;
+ t1[1] = eep->base_ext1.tempslopextension[0];
+ t2[1] = eep->base_ext1.tempslopextension[1];
+ f[1] = 5500;
+
+ t[2] = eep->base_ext1.tempslopextension[5];
+ t1[2] = eep->base_ext1.tempslopextension[6];
+ t2[2] = eep->base_ext1.tempslopextension[7];
+ f[2] = 5785;
+
+ temp_slope = ar9003_hw_power_interpolate(frequency,
+ f, t, 3);
+ temp_slope1 = ar9003_hw_power_interpolate(frequency,
+ f, t1, 3);
+ temp_slope2 = ar9003_hw_power_interpolate(frequency,
+ f, t2, 3);
+
+ goto tempslope;
+ }
+
+ if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+ for (i = 0; i < 8; i++) {
+ t[i] = eep->base_ext1.tempslopextension[i];
+ f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ }
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 8);
+ } else if (eep->base_ext2.tempSlopeLow != 0) {
+ t[0] = eep->base_ext2.tempSlopeLow;
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.tempSlope;
+ f[1] = 5500;
+ t[2] = eep->base_ext2.tempSlopeHigh;
+ f[2] = 5785;
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 3);
+ } else {
+ temp_slope = eep->modalHeader5G.tempSlope;
+ }
+ }
+
+tempslope:
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
+ u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
+
+ /*
+ * AR955x has tempSlope register for each chain.
+ * Check whether temp_compensation feature is enabled or not.
+ */
+ if (eep->baseEepHeader.featureEnable & 0x1) {
+ if (frequency < 4000) {
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeLow);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeHigh);
+ } else {
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope1);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope2);
+ }
+ } else {
+ /*
+ * If temp compensation is not enabled,
+ * set all registers to 0.
+ */
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ }
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
+ }
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
+
+
+ REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
+ temperature[0]);
+}
+
+/* Apply the recorded correction values. */
+static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
+{
+ int ichain, ipier, npier;
+ int mode;
+ int lfrequency[AR9300_MAX_CHAINS],
+ lcorrection[AR9300_MAX_CHAINS],
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ int hfrequency[AR9300_MAX_CHAINS],
+ hcorrection[AR9300_MAX_CHAINS],
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ int fdiff;
+ int correction[AR9300_MAX_CHAINS],
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mode = (frequency >= 4000);
+ if (mode)
+ npier = AR9300_NUM_5G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_2G_CAL_PIERS;
+
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ lfrequency[ichain] = 0;
+ hfrequency[ichain] = 100000;
+ }
+ /* identify best lower and higher frequency calibration measurement */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ for (ipier = 0; ipier < npier; ipier++) {
+ if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ &pfrequency, &pcorrection,
+ &ptemperature, &pvoltage)) {
+ fdiff = frequency - pfrequency;
+
+ /*
+ * this measurement is higher than
+ * our desired frequency
+ */
+ if (fdiff <= 0) {
+ if (hfrequency[ichain] <= 0 ||
+ hfrequency[ichain] >= 100000 ||
+ fdiff >
+ (frequency - hfrequency[ichain])) {
+ /*
+ * new best higher
+ * frequency measurement
+ */
+ hfrequency[ichain] = pfrequency;
+ hcorrection[ichain] =
+ pcorrection;
+ htemperature[ichain] =
+ ptemperature;
+ hvoltage[ichain] = pvoltage;
+ }
+ }
+ if (fdiff >= 0) {
+ if (lfrequency[ichain] <= 0
+ || fdiff <
+ (frequency - lfrequency[ichain])) {
+ /*
+ * new best lower
+ * frequency measurement
+ */
+ lfrequency[ichain] = pfrequency;
+ lcorrection[ichain] =
+ pcorrection;
+ ltemperature[ichain] =
+ ptemperature;
+ lvoltage[ichain] = pvoltage;
+ }
+ }
+ }
+ }
+ }
+
+ /* interpolate */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
+ /* they're the same, so just pick one */
+ if (hfrequency[ichain] == lfrequency[ichain]) {
+ correction[ichain] = lcorrection[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ }
+ /* the low frequency is good */
+ else if (frequency - lfrequency[ichain] < 1000) {
+ /* so is the high frequency, interpolate */
+ if (hfrequency[ichain] - frequency < 1000) {
+
+ correction[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lcorrection[ichain],
+ hcorrection[ichain]);
+
+ temperature[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ ltemperature[ichain],
+ htemperature[ichain]);
+
+ voltage[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lvoltage[ichain],
+ hvoltage[ichain]);
+ }
+ /* only low is good, use it */
+ else {
+ correction[ichain] = lcorrection[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ }
+ }
+ /* only high is good, use it */
+ else if (hfrequency[ichain] - frequency < 1000) {
+ correction[ichain] = hcorrection[ichain];
+ temperature[ichain] = htemperature[ichain];
+ voltage[ichain] = hvoltage[ichain];
+ } else { /* nothing is good, presume 0???? */
+ correction[ichain] = 0;
+ temperature[ichain] = 0;
+ voltage[ichain] = 0;
+ }
+ }
+
+ ar9003_hw_power_control_override(ah, frequency, correction, voltage,
+ temperature);
+
+ ath_dbg(common, EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
+
+ return 0;
+}
+
+static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
+ int idx,
+ int edge,
+ bool is2GHz)
+{
+ struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+ struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+ if (is2GHz)
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
+ else
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
+}
+
+static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
+ int idx,
+ unsigned int edge,
+ u16 freq,
+ bool is2GHz)
+{
+ struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+ struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+ u8 *ctl_freqbin = is2GHz ?
+ &eep->ctl_freqbin_2G[idx][0] :
+ &eep->ctl_freqbin_5G[idx][0];
+
+ if (is2GHz) {
+ if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
+ CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
+ } else {
+ if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
+ CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
+ }
+
+ return MAX_RATE_POWER;
+}
+
+/*
+ * Find the maximum conformance test limit for the given channel and CTL info
+ */
+static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
+ u16 freq, int idx, bool is2GHz)
+{
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u8 *ctl_freqbin = is2GHz ?
+ &eep->ctl_freqbin_2G[idx][0] :
+ &eep->ctl_freqbin_5G[idx][0];
+ u16 num_edges = is2GHz ?
+ AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G;
+ unsigned int edge;
+
+ /* Get the edge power */
+ for (edge = 0;
+ (edge < num_edges) && (ctl_freqbin[edge] != AR5416_BCHAN_UNUSED);
+ edge++) {
+ /*
+ * If there's an exact channel match or an inband flag set
+ * on the lower channel use the given rdEdgePower
+ */
+ if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) {
+ twiceMaxEdgePower =
+ ar9003_hw_get_direct_edge_power(eep, idx,
+ edge, is2GHz);
+ break;
+ } else if ((edge > 0) &&
+ (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge],
+ is2GHz))) {
+ twiceMaxEdgePower =
+ ar9003_hw_get_indirect_edge_power(eep, idx,
+ edge, freq,
+ is2GHz);
+ /*
+ * Leave loop - no more affecting edges possible in
+ * this monotonic increasing list
+ */
+ break;
+ }
+ }
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
+ return twiceMaxEdgePower;
+}
+
+static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *pPwrArray, u16 cfgCtl,
+ u8 antenna_reduction,
+ u16 powerLimit)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
+ u16 twiceMaxEdgePower;
+ int i;
+ u16 scaledPower = 0, minCtlPower;
+ static const u16 ctlModesFor11a[] = {
+ CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
+ };
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
+ CTL_11G_EXT, CTL_2GHT40
+ };
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
+ struct chan_centers centers;
+ u8 *ctlIndex;
+ u8 ctlNum;
+ u16 twiceMinEdgePower;
+ bool is2ghz = IS_CHAN_2GHZ(chan);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+ antenna_reduction);
+
+ if (is2ghz) {
+ /* Setup for CTL modes */
+ /* CTL_11B, CTL_11G, CTL_2GHT20 */
+ numCtlModes =
+ ARRAY_SIZE(ctlModesFor11g) -
+ SUB_NUM_CTL_MODES_AT_2G_40;
+ pCtlMode = ctlModesFor11g;
+ if (IS_CHAN_HT40(chan))
+ /* All 2G CTL's */
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+ } else {
+ /* Setup for CTL modes */
+ /* CTL_11A, CTL_5GHT20 */
+ numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
+ SUB_NUM_CTL_MODES_AT_5G_40;
+ pCtlMode = ctlModesFor11a;
+ if (IS_CHAN_HT40(chan))
+ /* All 5G CTL's */
+ numCtlModes = ARRAY_SIZE(ctlModesFor11a);
+ }
+
+ /*
+ * For MIMO, need to apply regulatory caps individually across
+ * dynamically running modes: CCK, OFDM, HT20, HT40
+ *
+ * The outer loop walks through each possible applicable runtime mode.
+ * The inner loop walks through each ctlIndex entry in EEPROM.
+ * The ctl value is encoded as [7:4] == test group, [3:0] == test mode.
+ */
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+ (pCtlMode[ctlMode] == CTL_2GHT40);
+ if (isHt40CtlMode)
+ freq = centers.synth_center;
+ else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+ freq = centers.ext_center;
+ else
+ freq = centers.ctl_center;
+
+ ath_dbg(common, REGULATORY,
+ "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
+ ctlMode, numCtlModes, isHt40CtlMode,
+ (pCtlMode[ctlMode] & EXT_ADDITIVE));
+
+ /* walk through each CTL index stored in EEPROM */
+ if (is2ghz) {
+ ctlIndex = pEepData->ctlIndex_2G;
+ ctlNum = AR9300_NUM_CTLS_2G;
+ } else {
+ ctlIndex = pEepData->ctlIndex_5G;
+ ctlNum = AR9300_NUM_CTLS_5G;
+ }
+
+ twiceMaxEdgePower = MAX_RATE_POWER;
+ for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
+ ath_dbg(common, REGULATORY,
+ "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
+ i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
+ chan->channel);
+
+ /*
+ * compare test group from regulatory
+ * channel list with test mode from pCtlMode
+ * list
+ */
+ if ((((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ctlIndex[i]) ||
+ (((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ((ctlIndex[i] & CTL_MODE_M) |
+ SD_NO_CTL))) {
+ twiceMinEdgePower =
+ ar9003_hw_get_max_edge_power(pEepData,
+ freq, i,
+ is2ghz);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+ /*
+ * Find the minimum of all CTL
+ * edge powers that apply to
+ * this channel
+ */
+ twiceMaxEdgePower =
+ min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ else {
+ /* specific */
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+
+ minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+
+ ath_dbg(common, REGULATORY,
+ "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+ ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+ scaledPower, minCtlPower);
+
+ /* Apply ctl mode to correct target power set */
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = ALL_TARGET_LEGACY_1L_5L;
+ i <= ALL_TARGET_LEGACY_11S; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = ALL_TARGET_LEGACY_6_24;
+ i <= ALL_TARGET_LEGACY_54; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = ALL_TARGET_HT20_0_8_16;
+ i <= ALL_TARGET_HT20_23; i++) {
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = ALL_TARGET_HT40_0_8_16;
+ i <= ALL_TARGET_HT40_23; i++) {
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
+ break;
+ default:
+ break;
+ }
+ } /* end ctl mode checking */
+}
+
+static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
+{
+ u8 mod_idx = mcs_idx % 8;
+
+ if (mod_idx <= 3)
+ return mod_idx ? (base_pwridx + 1) : base_pwridx;
+ else
+ return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2;
+}
+
+static void ar9003_paprd_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *targetPowerValT2)
+{
+ int i;
+
+ if (!ar9003_is_paprd_enabled(ah))
+ return;
+
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_7;
+ else
+ i = ALL_TARGET_HT20_7;
+
+ if (IS_CHAN_2GHZ(chan)) {
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) &&
+ !AR_SREV_9462(ah) && !AR_SREV_9565(ah)) {
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_0_8_16;
+ else
+ i = ALL_TARGET_HT20_0_8_16;
+ }
+ }
+
+ ah->paprd_target_power = targetPowerValT2[i];
+}
+
+static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 powerLimit, bool test)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_modal_eep_header *modal_hdr;
+ u8 targetPowerValT2[ar9300RateSize];
+ u8 target_power_val_t2_eep[ar9300RateSize];
+ u8 targetPowerValT2_tpc[ar9300RateSize];
+ unsigned int i = 0, paprd_scale_factor = 0;
+ u8 pwr_idx, min_pwridx = 0;
+
+ memset(targetPowerValT2, 0 , sizeof(targetPowerValT2));
+
+ /*
+ * Get target powers from EEPROM - our baseline for TX Power
+ */
+ ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
+
+ if (ar9003_is_paprd_enabled(ah)) {
+ if (IS_CHAN_2GHZ(chan))
+ modal_hdr = &eep->modalHeader2G;
+ else
+ modal_hdr = &eep->modalHeader5G;
+
+ ah->paprd_ratemask =
+ le32_to_cpu(modal_hdr->papdRateMaskHt20) &
+ AR9300_PAPRD_RATE_MASK;
+
+ ah->paprd_ratemask_ht40 =
+ le32_to_cpu(modal_hdr->papdRateMaskHt40) &
+ AR9300_PAPRD_RATE_MASK;
+
+ paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
+ min_pwridx = IS_CHAN_HT40(chan) ? ALL_TARGET_HT40_0_8_16 :
+ ALL_TARGET_HT20_0_8_16;
+
+ if (!ah->paprd_table_write_done) {
+ memcpy(target_power_val_t2_eep, targetPowerValT2,
+ sizeof(targetPowerValT2));
+ for (i = 0; i < 24; i++) {
+ pwr_idx = mcsidx_to_tgtpwridx(i, min_pwridx);
+ if (ah->paprd_ratemask & (1 << i)) {
+ if (targetPowerValT2[pwr_idx] &&
+ targetPowerValT2[pwr_idx] ==
+ target_power_val_t2_eep[pwr_idx])
+ targetPowerValT2[pwr_idx] -=
+ paprd_scale_factor;
+ }
+ }
+ }
+ memcpy(target_power_val_t2_eep, targetPowerValT2,
+ sizeof(targetPowerValT2));
+ }
+
+ ar9003_hw_set_power_per_rate_table(ah, chan,
+ targetPowerValT2, cfgCtl,
+ twiceAntennaReduction,
+ powerLimit);
+
+ memcpy(targetPowerValT2_tpc, targetPowerValT2,
+ sizeof(targetPowerValT2));
+
+ if (ar9003_is_paprd_enabled(ah)) {
+ for (i = 0; i < ar9300RateSize; i++) {
+ if ((ah->paprd_ratemask & (1 << i)) &&
+ (abs(targetPowerValT2[i] -
+ target_power_val_t2_eep[i]) >
+ paprd_scale_factor)) {
+ ah->paprd_ratemask &= ~(1 << i);
+ ath_dbg(common, EEPROM,
+ "paprd disabled for mcs %d\n", i);
+ }
+ }
+ }
+
+ regulatory->max_power_level = 0;
+ for (i = 0; i < ar9300RateSize; i++) {
+ if (targetPowerValT2[i] > regulatory->max_power_level)
+ regulatory->max_power_level = targetPowerValT2[i];
+ }
+
+ ath9k_hw_update_regulatory_maxpower(ah);
+
+ if (test)
+ return;
+
+ for (i = 0; i < ar9300RateSize; i++) {
+ ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
+ }
+
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+ ar9003_hw_calibration_apply(ah, chan->channel);
+ ar9003_paprd_set_txpower(ah, chan, targetPowerValT2);
+
+ ar9003_hw_selfgen_tpc_txpower(ah, chan, targetPowerValT2);
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ u32 val;
+
+ ar9003_hw_init_rate_txpower(ah, targetPowerValT2_tpc, chan);
+
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX,
+ AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ /* Disable per chain power reduction */
+ val = REG_READ(ah, AR_PHY_POWER_TX_SUB);
+ if (AR_SREV_9340(ah))
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFFFC0);
+ else
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ val & 0xFFFFF000);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_PWRTX_MAX, 0);
+ }
+}
+
+static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return AR_NO_SPUR;
+}
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
+}
+
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
+}
+
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
+{
+ return ar9003_modal_header(ah, is2ghz)->spurChans;
+}
+
+unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_2GHZ(chan))
+ return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
+ else {
+ if (chan->channel >= 5700)
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
+ else if (chan->channel >= 5400)
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_2);
+ else
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_1);
+ }
+}
+
+const struct eeprom_ops eep_ar9300_ops = {
+ .check_eeprom = ath9k_hw_ar9300_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9300_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .dump_eeprom = ath9k_hw_ar9003_dump_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
+ .set_board_values = ath9k_hw_ar9300_set_board_values,
+ .set_addac = ath9k_hw_ar9300_set_addac,
+ .set_txpower = ath9k_hw_ar9300_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 000000000..694ca2e68
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_EEPROM_H
+#define AR9003_EEPROM_H
+
+#include <linux/types.h>
+
+#define AR9300_EEP_VER 0xD000
+#define AR9300_EEP_VER_MINOR_MASK 0xFFF
+#define AR9300_EEP_MINOR_VER_1 0x1
+#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
+
+/* 16-bit offset location start of calibration struct */
+#define AR9300_EEP_START_LOC 256
+#define AR9300_NUM_5G_CAL_PIERS 8
+#define AR9300_NUM_2G_CAL_PIERS 3
+#define AR9300_NUM_5G_20_TARGET_POWERS 8
+#define AR9300_NUM_5G_40_TARGET_POWERS 8
+#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
+#define AR9300_NUM_2G_20_TARGET_POWERS 3
+#define AR9300_NUM_2G_40_TARGET_POWERS 3
+/* #define AR9300_NUM_CTLS 21 */
+#define AR9300_NUM_CTLS_5G 9
+#define AR9300_NUM_CTLS_2G 12
+#define AR9300_NUM_BAND_EDGES_5G 8
+#define AR9300_NUM_BAND_EDGES_2G 4
+#define AR9300_EEPMISC_BIG_ENDIAN 0x01
+#define AR9300_EEPMISC_WOW 0x02
+#define AR9300_CUSTOMER_DATA_SIZE 20
+
+#define AR9300_MAX_CHAINS 3
+#define AR9300_ANT_16S 25
+#define AR9300_FUTURE_MODAL_SZ 6
+
+#define AR9300_PAPRD_RATE_MASK 0x01ffffff
+#define AR9300_PAPRD_SCALE_1 0x0e000000
+#define AR9300_PAPRD_SCALE_1_S 25
+#define AR9300_PAPRD_SCALE_2 0x70000000
+#define AR9300_PAPRD_SCALE_2_S 28
+
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
+/* Delta from which to start power to pdadc table */
+/* This offset is used in both open loop and closed loop power control
+ * schemes. In open loop power control, it is not really needed, but for
+ * the "sake of consistency" it was kept. For certain AP designs, this
+ * value is overwritten by the value in the flag "pwrTableOffset" just
+ * before writing the pdadc vs pwr into the chip registers.
+ */
+#define AR9300_PWR_TABLE_OFFSET 0
+
+/* byte addressable */
+#define AR9300_EEPROM_SIZE (16*1024)
+
+#define AR9300_BASE_ADDR_4K 0xfff
+#define AR9300_BASE_ADDR 0x3ff
+#define AR9300_BASE_ADDR_512 0x1ff
+
+#define AR9300_OTP_BASE \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
+#define AR9300_OTP_STATUS_TYPE 0x7
+#define AR9300_OTP_STATUS_VALID 0x4
+#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
+#define AR9300_OTP_STATUS_SM_BUSY 0x1
+#define AR9300_OTP_READ_DATA \
+ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
+
+enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+ HT_TARGET_RATE_1_3_9_11_17_19,
+ HT_TARGET_RATE_4,
+ HT_TARGET_RATE_5,
+ HT_TARGET_RATE_6,
+ HT_TARGET_RATE_7,
+ HT_TARGET_RATE_12,
+ HT_TARGET_RATE_13,
+ HT_TARGET_RATE_14,
+ HT_TARGET_RATE_15,
+ HT_TARGET_RATE_20,
+ HT_TARGET_RATE_21,
+ HT_TARGET_RATE_22,
+ HT_TARGET_RATE_23
+};
+
+enum targetPowerLegacyRates {
+ LEGACY_TARGET_RATE_6_24,
+ LEGACY_TARGET_RATE_36,
+ LEGACY_TARGET_RATE_48,
+ LEGACY_TARGET_RATE_54
+};
+
+enum targetPowerCckRates {
+ LEGACY_TARGET_RATE_1L_5L,
+ LEGACY_TARGET_RATE_5S,
+ LEGACY_TARGET_RATE_11L,
+ LEGACY_TARGET_RATE_11S
+};
+
+enum ar9300_Rates {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54,
+ ALL_TARGET_LEGACY_1L_5L,
+ ALL_TARGET_LEGACY_5S,
+ ALL_TARGET_LEGACY_11L,
+ ALL_TARGET_LEGACY_11S,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+ ar9300RateSize,
+};
+
+
+struct eepFlags {
+ u8 opFlags;
+ u8 eepMisc;
+} __packed;
+
+enum CompressAlgorithm {
+ _CompressNone = 0,
+ _CompressLzma,
+ _CompressPairs,
+ _CompressBlock,
+ _Compress4,
+ _Compress5,
+ _Compress6,
+ _Compress7,
+};
+
+struct ar9300_base_eep_hdr {
+ __le16 regDmn[2];
+ /* 4 bits tx and 4 bits rx */
+ u8 txrxMask;
+ struct eepFlags opCapFlags;
+ u8 rfSilent;
+ u8 blueToothOptions;
+ u8 deviceCap;
+ /* takes lower byte in eeprom location */
+ u8 deviceType;
+ /* offset in dB to be added to beginning
+ * of pdadc table in calibration
+ */
+ int8_t pwrTableOffset;
+ u8 params_for_tuning_caps[2];
+ /*
+ * bit0 - enable tx temp comp
+ * bit1 - enable tx volt comp
+ * bit2 - enable fastClock - default to 1
+ * bit3 - enable doubling - default to 1
+ * bit4 - enable internal regulator - default to 1
+ */
+ u8 featureEnable;
+ /* misc flags: bit0 - turn down drivestrength */
+ u8 miscConfiguration;
+ u8 eepromWriteEnableGpio;
+ u8 wlanDisableGpio;
+ u8 wlanLedGpio;
+ u8 rxBandSelectGpio;
+ u8 txrxgain;
+ /* SW controlled internal regulator fields */
+ __le32 swreg;
+} __packed;
+
+struct ar9300_modal_eep_header {
+ /* 4 idle, t1, t2, b (4 bits per setting) */
+ __le32 antCtrlCommon;
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ __le32 antCtrlCommon2;
+ /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
+ __le16 antCtrlChain[AR9300_MAX_CHAINS];
+ /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ u8 xatten1DB[AR9300_MAX_CHAINS];
+ /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
+ u8 xatten1Margin[AR9300_MAX_CHAINS];
+ int8_t tempSlope;
+ int8_t voltSlope;
+ /* spur channels in usual fbin coding format */
+ u8 spurChans[AR_EEPROM_MODAL_SPURS];
+ /* 3 Check if the register is per chain */
+ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
+ u8 reserved[11];
+ int8_t quick_drop;
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 txClip;
+ int8_t antennaGain;
+ u8 switchSettling;
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ __le32 papdRateMaskHt20;
+ __le32 papdRateMaskHt40;
+ __le16 switchcomspdt;
+ u8 xlna_bias_strength;
+ u8 futureModal[7];
+} __packed;
+
+struct ar9300_cal_data_per_freq_op_loop {
+ int8_t refPower;
+ /* pdadc voltage at power measurement */
+ u8 voltMeas;
+ /* pcdac used for power measurement */
+ u8 tempMeas;
+ /* range is -60 to -127 create a mapping equation 1db resolution */
+ int8_t rxNoisefloorCal;
+ /*range is same as noisefloor */
+ int8_t rxNoisefloorPower;
+ /* temp measured when noisefloor cal was performed */
+ u8 rxTempMeas;
+} __packed;
+
+struct cal_tgt_pow_legacy {
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_tgt_pow_ht {
+ u8 tPow2x[14];
+} __packed;
+
+struct cal_ctl_data_2g {
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+} __packed;
+
+struct cal_ctl_data_5g {
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+#define MAX_BASE_EXTENSION_FUTURE 2
+
+struct ar9300_BaseExtension_1 {
+ u8 ant_div_control;
+ u8 future[MAX_BASE_EXTENSION_FUTURE];
+ /*
+ * misc_enable:
+ *
+ * BIT 0 - TX Gain Cap enable.
+ * BIT 1 - Uncompressed Checksum enable.
+ * BIT 2/3 - MinCCApwr enable 2g/5g.
+ */
+ u8 misc_enable;
+ int8_t tempslopextension[8];
+ int8_t quick_drop_low;
+ int8_t quick_drop_high;
+} __packed;
+
+struct ar9300_BaseExtension_2 {
+ int8_t tempSlopeLow;
+ int8_t tempSlopeHigh;
+ u8 xatten1DBLow[AR9300_MAX_CHAINS];
+ u8 xatten1MarginLow[AR9300_MAX_CHAINS];
+ u8 xatten1DBHigh[AR9300_MAX_CHAINS];
+ u8 xatten1MarginHigh[AR9300_MAX_CHAINS];
+} __packed;
+
+struct ar9300_eeprom {
+ u8 eepromVersion;
+ u8 templateVersion;
+ u8 macAddr[6];
+ u8 custData[AR9300_CUSTOMER_DATA_SIZE];
+
+ struct ar9300_base_eep_hdr baseEepHeader;
+
+ struct ar9300_modal_eep_header modalHeader2G;
+ struct ar9300_BaseExtension_1 base_ext1;
+ u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
+ u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
+ u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
+ struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
+ struct ar9300_modal_eep_header modalHeader5G;
+ struct ar9300_BaseExtension_2 base_ext2;
+ u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
+ u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
+ u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
+ struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
+} __packed;
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
+
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
+
+unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 000000000..79fd3b2dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,1184 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_2p2_initvals.h"
+#include "ar9003_buffalo_initvals.h"
+#include "ar9485_initvals.h"
+#include "ar9340_initvals.h"
+#include "ar9330_1p1_initvals.h"
+#include "ar9330_1p2_initvals.h"
+#include "ar955x_1p0_initvals.h"
+#include "ar9580_1p0_initvals.h"
+#include "ar9462_2p0_initvals.h"
+#include "ar9462_2p1_initvals.h"
+#include "ar9565_1p0_initvals.h"
+#include "ar9565_1p1_initvals.h"
+#include "ar953x_initvals.h"
+#include "ar956x_initvals.h"
+
+/* General hardware code for the AR9003 hadware family */
+
+/*
+ * The AR9003 family uses a new INI format (pre, core, post
+ * arrays per subsystem). This provides support for the
+ * AR9003 2.2 chipsets.
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_11(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9331_1p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9331_1p1_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9331_1p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9331_1p1_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9331_1p1_radio_core);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9331_1p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9331_1p1_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p1);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
+
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9331_1p1_baseband_core_txfir_coeff_japan_2484);
+
+ /* additional clock settings */
+ if (ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9331_1p1_xtal_25M);
+ else
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9331_1p1_xtal_40M);
+ } else if (AR_SREV_9330_12(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9331_1p2_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9331_1p2_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9331_1p2_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9331_1p2_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9331_1p2_radio_core);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9331_1p2_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9331_1p2_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
+
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9331_1p2_baseband_core_txfir_coeff_japan_2484);
+
+ /* additional clock settings */
+ if (ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9331_1p2_xtal_25M);
+ else
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9331_1p2_xtal_40M);
+ } else if (AR_SREV_9340(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9340_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9340_1p0_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9340_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9340_1p0_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9340_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9340_1p0_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9340_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9340_1p0_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9340Modes_fast_clock_1p0);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9340_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9340_1p0_baseband_postamble_dfs_channel);
+
+ if (!ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniAdditional,
+ ar9340_1p0_radio_core_40M);
+ } else if (AR_SREV_9485_11_OR_LATER(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9485_1_1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9485_1_1_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9485_1_1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9485_1_1_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9485_1_1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9485_1_1_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9485_1_1_soc_preamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
+
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
+
+ if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
+ }
+ } else if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9462_2p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9462_2p1_mac_postamble);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9462_2p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9462_2p1_baseband_postamble);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9462_2p1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9462_2p1_radio_postamble);
+ INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
+ ar9462_2p1_radio_postamble_sys2ant);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9462_2p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9462_2p1_soc_postamble);
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_rx_gain);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9462_2p1_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
+
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p1_pciephy_clkreq_disable_L1);
+ }
+ } else if (AR_SREV_9462_20(ah)) {
+
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9462_2p0_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9462_2p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9462_2p0_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9462_2p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9462_2p0_radio_postamble);
+ INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
+ ar9462_2p0_radio_postamble_sys2ant);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9462_2p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9462_2p0_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_common_rx_gain);
+
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9462_2p0_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9462_2p0_pciephy_clkreq_disable_L1);
+ }
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9462_2p0_modes_fast_clock);
+
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
+ } else if (AR_SREV_9550(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar955x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar955x_1p0_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar955x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar955x_1p0_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar955x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar955x_1p0_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar955x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar955x_1p0_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar955x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ qca953x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ qca953x_1p0_mac_postamble);
+ if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca953x_2p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca953x_2p0_baseband_postamble);
+ } else {
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca953x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca953x_1p0_baseband_postamble);
+ }
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ qca953x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ qca953x_1p0_radio_postamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ qca953x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ qca953x_1p0_soc_postamble);
+
+ if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ }
+
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ qca953x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ qca956x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ qca956x_1p0_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ qca956x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ qca956x_1p0_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ qca956x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ qca956x_1p0_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ qca956x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ qca956x_1p0_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ qca956x_1p0_baseband_postamble_dfs_channel);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ qca956x_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ qca956x_1p0_modes_fast_clock);
+ } else if (AR_SREV_9580(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9580_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9580_1p0_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9580_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9580_1p0_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9580_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9580_1p0_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9580_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9580_1p0_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_low_ob_db_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9580_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9580_1p0_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9580_1p0_baseband_postamble_dfs_channel);
+ } else if (AR_SREV_9565_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9565_1p1_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9565_1p1_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9565_1p1_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9565_1p1_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9565_1p1_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9565_1p1_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9565_1p1_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9565_1p1_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
+
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p1_pciephy_clkreq_disable_L1);
+ }
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9565_1p1_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p1_baseband_core_txfir_coeff_japan_2484);
+ } else if (AR_SREV_9565(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9565_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9565_1p0_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9565_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9565_1p0_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9565_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9565_1p0_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9565_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9565_1p0_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
+
+ /* Awake -> Sleep Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p0_pciephy_clkreq_disable_L1);
+ }
+
+ /* Sleep -> Awake Setting */
+ if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
+ (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p0_pciephy_clkreq_disable_L1);
+ }
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9565_1p0_modes_fast_clock);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
+ } else {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p2_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p2_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p2_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p2_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p2_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p2_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p2_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p2_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9300Modes_fast_clock_2p2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9300_2p2_baseband_core_txfir_coeff_japan_2484);
+ INIT_INI_ARRAY(&ah->ini_dfs,
+ ar9300_2p2_baseband_postamble_dfs_channel);
+ }
+}
+
+static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_xpa_tx_gain_table);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_lowest_ob_db_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_low_ob_db_tx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p0_modes_low_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_low_ob_db_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_high_ob_db_tx_gain_table);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531(ah)) {
+ if (AR_SREV_9531_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_2p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p0_modes_no_xpa_tx_gain_table);
+ } else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_high_ob_db_tx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p0_modes_high_ob_db_tx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_ob_db_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_high_ob_db_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_low_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_low_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_low_ob_db_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_high_power_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_power_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_high_power_tx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p1_modes_high_power_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_high_power_tx_gain_table);
+ else {
+ if (ah->config.tx_gain_buffalo)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_buffalo);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_2p2);
+ }
+}
+
+static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_mixed_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_mixed_ob_db_tx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p1_modes_mix_ob_db_tx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9462_2p0_modes_mix_ob_db_tx_gain);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
+{
+ if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type5_tx_gain_table);
+ else if (AR_SREV_9561(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca956x_1p0_modes_no_xpa_green_tx_gain_table);
+ else if (AR_SREV_9300_22(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_type5_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_spur_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type6_tx_gain_table);
+}
+
+static void ar9003_tx_gain_table_mode7(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340_cus227_tx_gain_table_1p0);
+}
+
+typedef void (*ath_txgain_tab)(struct ath_hw *ah);
+
+static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
+{
+ static const ath_txgain_tab modes[] = {
+ ar9003_tx_gain_table_mode0,
+ ar9003_tx_gain_table_mode1,
+ ar9003_tx_gain_table_mode2,
+ ar9003_tx_gain_table_mode3,
+ ar9003_tx_gain_table_mode4,
+ ar9003_tx_gain_table_mode5,
+ ar9003_tx_gain_table_mode6,
+ ar9003_tx_gain_table_mode7,
+ };
+ int idx = ar9003_hw_get_tx_gain_idx(ah);
+
+ if (idx >= ARRAY_SIZE(modes))
+ idx = 0;
+
+ modes[idx](ah);
+}
+
+static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9340Common_rx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485_common_rx_gain_1_1);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9531(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ qca956x_1p0_xlna_only);
+ } else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_rx_gain_table);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_rx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_common_rx_gain);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_Common_rx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2);
+}
+
+static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
+{
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
+ else if (AR_SREV_9485_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1);
+ else if (AR_SREV_9462_21(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_wo_xlna_rx_gain);
+ else if (AR_SREV_9462_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_common_wo_xlna_rx_gain);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531_10(ah) || AR_SREV_9531_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9531_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca953x_2p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca953x_2p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9561(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ qca956x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ qca956x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9580_1p0_wo_xlna_rx_gain_table);
+ else if (AR_SREV_9565_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p1_common_wo_xlna_rx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_common_wo_xlna_rx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p2);
+}
+
+static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
+{
+ if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_mixed_rx_gain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ ar9462_2p1_baseband_core_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ ar9462_2p1_baseband_postamble_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p1_baseband_postamble_5g_xlna);
+ } else if (AR_SREV_9462_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_common_mixed_rx_gain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ ar9462_2p0_baseband_core_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ ar9462_2p0_baseband_postamble_mix_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p0_baseband_postamble_5g_xlna);
+ }
+}
+
+static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
+{
+ if (AR_SREV_9462_21(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p1_common_5g_xlna_only_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p1_baseband_postamble_5g_xlna);
+ } else if (AR_SREV_9462_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9462_2p0_common_5g_xlna_only_rxgain);
+ INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ ar9462_2p0_baseband_postamble_5g_xlna);
+ }
+}
+
+static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_rx_gain_idx(ah)) {
+ case 0:
+ default:
+ ar9003_rx_gain_table_mode0(ah);
+ break;
+ case 1:
+ ar9003_rx_gain_table_mode1(ah);
+ break;
+ case 2:
+ ar9003_rx_gain_table_mode2(ah);
+ break;
+ case 3:
+ ar9003_rx_gain_table_mode3(ah);
+ break;
+ }
+}
+
+/* set gain table pointers according to values read from the eeprom */
+static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ ar9003_tx_gain_table_apply(ah);
+ ar9003_rx_gain_table_apply(ah);
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
+ bool power_off)
+{
+ unsigned int i;
+ struct ar5416IniArray *array;
+
+ /*
+ * Increase L1 Entry Latency. Some WB222 boards don't have
+ * this change in eeprom/OTP.
+ *
+ */
+ if (AR_SREV_9462(ah)) {
+ u32 val = ah->config.aspm_l1_fix;
+ if ((val & 0xff000000) == 0x17000000) {
+ val &= 0x00ffffff;
+ val |= 0x27000000;
+ REG_WRITE(ah, 0x570c, val);
+ }
+ }
+
+ /* Nothing to do on restore for 11N */
+ if (!power_off /* !restore */) {
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ }
+
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ array = power_off ? &ah->iniPcieSerdes :
+ &ah->iniPcieSerdesLowPower;
+
+ for (i = 0; i < array->ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(array, i, 0),
+ INI_RA(array, i, 1));
+ }
+}
+
+static void ar9003_hw_init_hang_checks(struct ath_hw *ah)
+{
+ /*
+ * All chips support detection of BB/MAC hangs.
+ */
+ ah->config.hw_hang_checks |= HW_BB_WATCHDOG;
+ ah->config.hw_hang_checks |= HW_MAC_HANG;
+
+ /*
+ * This is not required for AR9580 1.0
+ */
+ if (AR_SREV_9300_22(ah))
+ ah->config.hw_hang_checks |= HW_PHYRESTART_CLC_WAR;
+
+ if (AR_SREV_9330(ah))
+ ah->bb_watchdog_timeout_ms = 85;
+ else
+ ah->bb_watchdog_timeout_ms = 25;
+}
+
+/*
+ * MAC HW hang check
+ * =================
+ *
+ * Signature: dcu_chain_state is 0x6 and dcu_complete_state is 0x1.
+ *
+ * The state of each DCU chain (mapped to TX queues) is available from these
+ * DMA debug registers:
+ *
+ * Chain 0 state : Bits 4:0 of AR_DMADBG_4
+ * Chain 1 state : Bits 9:5 of AR_DMADBG_4
+ * Chain 2 state : Bits 14:10 of AR_DMADBG_4
+ * Chain 3 state : Bits 19:15 of AR_DMADBG_4
+ * Chain 4 state : Bits 24:20 of AR_DMADBG_4
+ * Chain 5 state : Bits 29:25 of AR_DMADBG_4
+ * Chain 6 state : Bits 4:0 of AR_DMADBG_5
+ * Chain 7 state : Bits 9:5 of AR_DMADBG_5
+ * Chain 8 state : Bits 14:10 of AR_DMADBG_5
+ * Chain 9 state : Bits 19:15 of AR_DMADBG_5
+ *
+ * The DCU chain state "0x6" means "WAIT_FRDONE" - wait for TX frame to be done.
+ */
+
+#define NUM_STATUS_READS 50
+
+static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+{
+ u32 dma_dbg_chain, dma_dbg_complete;
+ u8 dcu_chain_state, dcu_complete_state;
+ int i;
+
+ for (i = 0; i < NUM_STATUS_READS; i++) {
+ if (queue < 6)
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+ else
+ dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
+
+ dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
+ dcu_complete_state = dma_dbg_complete & 0x3;
+
+ if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+ return false;
+ }
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "MAC Hang signature found for queue: %d\n", queue);
+
+ return true;
+}
+
+static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ u32 dma_dbg_4, dma_dbg_5, dma_dbg_6, chk_dbg;
+ u8 dcu_chain_state, dcu_complete_state;
+ bool dcu_wait_frdone = false;
+ unsigned long chk_dcu = 0;
+ unsigned int i = 0;
+
+ dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+ dma_dbg_5 = REG_READ(ah, AR_DMADBG_5);
+ dma_dbg_6 = REG_READ(ah, AR_DMADBG_6);
+
+ dcu_complete_state = dma_dbg_6 & 0x3;
+ if (dcu_complete_state != 0x1)
+ goto exit;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (i < 6)
+ chk_dbg = dma_dbg_4;
+ else
+ chk_dbg = dma_dbg_5;
+
+ dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
+ if (dcu_chain_state == 0x6) {
+ dcu_wait_frdone = true;
+ chk_dcu |= BIT(i);
+ }
+ }
+
+ if ((dcu_complete_state == 0x1) && dcu_wait_frdone) {
+ for_each_set_bit(i, &chk_dcu, ATH9K_NUM_TX_QUEUES) {
+ if (ath9k_hw_verify_hang(ah, i))
+ return true;
+ }
+ }
+exit:
+ return false;
+}
+
+/* Sets up the AR9003 hardware familiy callbacks */
+void ar9003_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ar9003_hw_init_mode_regs(ah);
+
+ if (AR_SREV_9003_PCOEM(ah)) {
+ WARN_ON(!ah->iniPcieSerdes.ia_array);
+ WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
+ }
+
+ priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
+ priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
+
+ ops->config_pci_powersave = ar9003_hw_configpcipowersave;
+
+ ar9003_hw_attach_phy_ops(ah);
+ ar9003_hw_attach_calib_ops(ah);
+ ar9003_hw_attach_mac_ops(ah);
+ ar9003_hw_attach_aic_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 000000000..da84b705c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_mci.h"
+
+static void ar9003_hw_rx_enable(struct ath_hw *hw)
+{
+ REG_WRITE(hw, AR_CR, 0);
+}
+
+static void
+ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+{
+ struct ar9003_txc *ads = ds;
+ int checksum = 0;
+ u32 val, ctl12, ctl17;
+ u8 desc_len;
+
+ desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
+
+ val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (i->qcu << AR_TxQcuNum_S) | desc_len;
+
+ checksum += val;
+ ACCESS_ONCE(ads->info) = val;
+
+ checksum += i->link;
+ ACCESS_ONCE(ads->link) = i->link;
+
+ checksum += i->buf_addr[0];
+ ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+ checksum += i->buf_addr[1];
+ ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+ checksum += i->buf_addr[2];
+ ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+ checksum += i->buf_addr[3];
+ ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+
+ checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl3) = val;
+ checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl5) = val;
+ checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl7) = val;
+ checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+ ACCESS_ONCE(ads->ctl9) = val;
+
+ checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+ ACCESS_ONCE(ads->ctl10) = checksum;
+
+ if (i->is_first || i->is_last) {
+ ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+ ACCESS_ONCE(ads->ctl13) = 0;
+ ACCESS_ONCE(ads->ctl14) = 0;
+ }
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+ ads->ctl23 = 0;
+
+ ctl17 = SM(i->keytype, AR_EncrType);
+ if (!i->is_first) {
+ ACCESS_ONCE(ads->ctl11) = 0;
+ ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+ ACCESS_ONCE(ads->ctl15) = 0;
+ ACCESS_ONCE(ads->ctl16) = 0;
+ ACCESS_ONCE(ads->ctl17) = ctl17;
+ ACCESS_ONCE(ads->ctl18) = 0;
+ ACCESS_ONCE(ads->ctl19) = 0;
+ return;
+ }
+
+ ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower[0], AR_XmitPower0)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
+ | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+ ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ?
+ SM(i->keyix, AR_DestIdx) : 0)
+ | SM(i->type, AR_FrameType)
+ | (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ctl17 |= (i->flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ switch (i->aggr) {
+ case AGGR_BUF_FIRST:
+ ctl17 |= SM(i->aggr_len, AR_AggrLen);
+ /* fall through */
+ case AGGR_BUF_MIDDLE:
+ ctl12 |= AR_IsAggr | AR_MoreAggr;
+ ctl17 |= SM(i->ndelim, AR_PadDelim);
+ break;
+ case AGGR_BUF_LAST:
+ ctl12 |= AR_IsAggr;
+ break;
+ case AGGR_BUF_NONE:
+ break;
+ }
+
+ val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+ ctl12 |= SM(val, AR_PAPRDChainMask);
+
+ ACCESS_ONCE(ads->ctl12) = ctl12;
+ ACCESS_ONCE(ads->ctl17) = ctl17;
+
+ ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+ ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+ ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
+
+ ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+
+ ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
+ ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
+ ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
+}
+
+static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+{
+ int checksum;
+
+ checksum = ads->info + ads->link
+ + ads->data0 + ads->ctl3
+ + ads->data1 + ads->ctl5
+ + ads->data2 + ads->ctl7
+ + ads->data3 + ads->ctl9;
+
+ return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
+}
+
+static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->link = ds_link;
+ ads->ctl10 &= ~AR_TxPtrChkSum;
+ ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
+}
+
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+ bool fatal_int;
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (async_cause & async_mask) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON)
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause && !async_cause)
+ return false;
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+
+ mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
+ MAP_ISR_S2_TIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
+ MAP_ISR_S2_DTIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
+ MAP_ISR_S2_DTIMSYNC);
+ mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
+ MAP_ISR_S2_CABEND);
+ mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
+ MAP_ISR_S2_GTT);
+ mask2 |= ((isr2 & AR_ISR_S2_CST) <<
+ MAP_ISR_S2_CST);
+ mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
+ MAP_ISR_S2_TSFOOR);
+ mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
+ MAP_ISR_S2_BB_WATCHDOG);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation)
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (ah->config.tx_intr_mitigation)
+ if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
+ *masked |= ATH9K_INT_TX;
+
+ if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (isr & AR_ISR_HP_RXOK)
+ *masked |= ATH9K_INT_RXHP;
+
+ if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
+ *masked |= ATH9K_INT_TX;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ u32 s0, s1;
+ s0 = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0);
+ s1 = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1);
+
+ isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+ }
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ s5 = REG_READ(ah, AR_ISR_S5_S);
+ else
+ s5 = REG_READ(ah, AR_ISR_S5);
+
+ ah->intr_gen_timer_trigger =
+ MS(s5, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5);
+ isr &= ~AR_ISR_GENTMR;
+ }
+
+ }
+
+ *masked |= mask2;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+
+ (void) REG_READ(ah, AR_ISR);
+ }
+
+ if (*masked & ATH9K_INT_BB_WATCHDOG)
+ ar9003_hw_bb_watchdog_read(ah);
+ }
+
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI)
+ ar9003_mci_get_isr(ah, masked);
+
+ if (sync_cause) {
+ if (sync_cause_p)
+ *sync_cause_p = sync_cause;
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_dbg(common, ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_dbg(common, ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ ath_dbg(common, INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+
+ }
+ return true;
+}
+
+static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar9003_txs *ads;
+ u32 status;
+
+ ads = &ah->ts_ring[ah->ts_tail];
+
+ status = ACCESS_ONCE(ads->status8);
+ if ((status & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+
+ if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
+ (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
+ ath_dbg(ath9k_hw_common(ah), XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
+ memset(ads, 0, sizeof(*ads));
+ return -EIO;
+ }
+
+ ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+ ts->ts_seqnum = MS(status, AR_SeqNum);
+ ts->tid = MS(status, AR_TxTid);
+
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ ts->desc_id = MS(ads->status1, AR_TxDescId);
+ ts->ts_tstamp = ads->status4;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (status & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ status = ACCESS_ONCE(ads->status2);
+ ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+ if (status & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ status = ACCESS_ONCE(ads->status3);
+ if (status & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (status & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (status & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (status & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+ if (status & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (status & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (status & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+ ts->ts_longretry = MS(status, AR_DataFailCnt);
+ ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
+
+ status = ACCESS_ONCE(ads->status7);
+ ts->ts_rssi = MS(status, AR_TxRSSICombined);
+ ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
+
+ memset(ads, 0, sizeof(*ads));
+
+ return 0;
+}
+
+static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
+{
+ const struct ar9003_txc *adc = ds;
+
+ switch (index) {
+ case 0:
+ return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0);
+ case 1:
+ return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1);
+ case 2:
+ return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2);
+ case 3:
+ return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3);
+ default:
+ return 0;
+ }
+}
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(hw);
+
+ ops->rx_enable = ar9003_hw_rx_enable;
+ ops->set_desc_link = ar9003_hw_set_desc_link;
+ ops->get_isr = ar9003_hw_get_isr;
+ ops->set_txdesc = ar9003_set_txdesc;
+ ops->proc_txdesc = ar9003_hw_proc_txdesc;
+ ops->get_duration = ar9003_hw_get_duration;
+}
+
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
+{
+ REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
+}
+EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
+
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype)
+{
+ if (qtype == ATH9K_RX_QUEUE_HP)
+ REG_WRITE(ah, AR_HP_RXDP, rxdp);
+ else
+ REG_WRITE(ah, AR_LP_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ void *buf_addr)
+{
+ struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ unsigned int phyerr;
+
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
+
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
+
+ rxs->rs_status = 0;
+ rxs->rs_flags = 0;
+ rxs->flag = 0;
+
+ rxs->rs_datalen = rxsp->status2 & AR_DataLen;
+ rxs->rs_tstamp = rxsp->status3;
+
+ /* XXX: Keycache */
+ rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
+ rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
+
+ if (rxsp->status11 & AR_RxKeyIdxValid)
+ rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
+ else
+ rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
+ rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+
+ rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
+ rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
+ rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
+ rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
+ rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+ rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
+
+ rxs->evm0 = rxsp->status6;
+ rxs->evm1 = rxsp->status7;
+ rxs->evm2 = rxsp->status8;
+ rxs->evm3 = rxsp->status9;
+ rxs->evm4 = (rxsp->status10 & 0xffff);
+
+ if (rxsp->status11 & AR_PreDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+
+ if (rxsp->status11 & AR_PostDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+
+ if (rxsp->status11 & AR_DecryptBusyErr)
+ rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((rxsp->status11 & AR_RxFrameOK) == 0) {
+ /*
+ * AR_CRCErr will bet set to true if we're on the last
+ * subframe and the AR_PostDelimCRCErr is caught.
+ * In a way this also gives us a guarantee that when
+ * (!(AR_CRCErr) && (AR_PostDelimCRCErr)) we cannot
+ * possibly be reviewing the last subframe. AR_CRCErr
+ * is the CRC of the actual data.
+ */
+ if (rxsp->status11 & AR_CRCErr)
+ rxs->rs_status |= ATH9K_RXERR_CRC;
+ else if (rxsp->status11 & AR_DecryptCRCErr)
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (rxsp->status11 & AR_MichaelErr)
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ if (rxsp->status11 & AR_PHYErr) {
+ phyerr = MS(rxsp->status11, AR_PHYErrCode);
+ /*
+ * If we reach a point here where AR_PostDelimCRCErr is
+ * true it implies we're *not* on the last subframe. In
+ * in that case that we know already that the CRC of
+ * the frame was OK, and MAC would send an ACK for that
+ * subframe, even if we did get a phy error of type
+ * ATH9K_PHYERR_OFDM_RESTART. This is only applicable
+ * to frame that are prior to the last subframe.
+ * The AR_PostDelimCRCErr is the CRC for the MPDU
+ * delimiter, which contains the 4 reserved bits,
+ * the MPDU length (12 bits), and follows the MPDU
+ * delimiter for an A-MPDU subframe (0x4E = 'N' ASCII).
+ */
+ if ((phyerr == ATH9K_PHYERR_OFDM_RESTART) &&
+ (rxsp->status11 & AR_PostDelimCRCErr)) {
+ rxs->rs_phyerr = 0;
+ } else {
+ rxs->rs_status |= ATH9K_RXERR_PHY;
+ rxs->rs_phyerr = phyerr;
+ }
+ }
+ }
+
+ if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
+{
+ ah->ts_tail = 0;
+
+ memset((void *) ah->ts_ring, 0,
+ ah->ts_size * sizeof(struct ar9003_txs));
+
+ ath_dbg(ath9k_hw_common(ah), XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
+
+ REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
+ REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
+}
+
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u16 size)
+{
+
+ ah->ts_paddr_start = ts_paddr_start;
+ ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
+ ah->ts_size = size;
+ ah->ts_ring = (struct ar9003_txs *) ts_start;
+
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 000000000..cbf60b090
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MAC_H
+#define AR9003_MAC_H
+
+#define AR_DescId 0xffff0000
+#define AR_DescId_S 16
+#define AR_CtrlStat 0x00004000
+#define AR_CtrlStat_S 14
+#define AR_TxRxDesc 0x00008000
+#define AR_TxRxDesc_S 15
+#define AR_TxQcuNum 0x00000f00
+#define AR_TxQcuNum_S 8
+
+#define AR_BufLen 0x0fff0000
+#define AR_BufLen_S 16
+
+#define AR_TxDescId 0xffff0000
+#define AR_TxDescId_S 16
+#define AR_TxPtrChkSum 0x0000ffff
+
+#define AR_LowRxChain 0x00004000
+
+#define AR_Not_Sounding 0x20000000
+
+/* ctl 12 */
+#define AR_PAPRDChainMask 0x00000e00
+#define AR_PAPRDChainMask_S 9
+
+#define MAP_ISR_S2_CST 6
+#define MAP_ISR_S2_GTT 6
+#define MAP_ISR_S2_TIM 3
+#define MAP_ISR_S2_CABEND 0
+#define MAP_ISR_S2_DTIMSYNC 7
+#define MAP_ISR_S2_DTIM 7
+#define MAP_ISR_S2_TSFOOR 4
+#define MAP_ISR_S2_BB_WATCHDOG 6
+
+#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
+
+struct ar9003_rxs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ u32 status10;
+ u32 status11;
+} __packed __aligned(4);
+
+/* Transmit Control Descriptor */
+struct ar9003_txc {
+ u32 info; /* descriptor information */
+ u32 link; /* link pointer */
+ u32 data0; /* data pointer to 1st buffer */
+ u32 ctl3; /* DMA control 3 */
+ u32 data1; /* data pointer to 2nd buffer */
+ u32 ctl5; /* DMA control 5 */
+ u32 data2; /* data pointer to 3rd buffer */
+ u32 ctl7; /* DMA control 7 */
+ u32 data3; /* data pointer to 4th buffer */
+ u32 ctl9; /* DMA control 9 */
+ u32 ctl10; /* DMA control 10 */
+ u32 ctl11; /* DMA control 11 */
+ u32 ctl12; /* DMA control 12 */
+ u32 ctl13; /* DMA control 13 */
+ u32 ctl14; /* DMA control 14 */
+ u32 ctl15; /* DMA control 15 */
+ u32 ctl16; /* DMA control 16 */
+ u32 ctl17; /* DMA control 17 */
+ u32 ctl18; /* DMA control 18 */
+ u32 ctl19; /* DMA control 19 */
+ u32 ctl20; /* DMA control 20 */
+ u32 ctl21; /* DMA control 21 */
+ u32 ctl22; /* DMA control 22 */
+ u32 ctl23; /* DMA control 23 */
+ u32 pad[8]; /* pad to cache line (128 bytes/32 dwords) */
+} __packed __aligned(4);
+
+struct ar9003_txs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+} __packed __aligned(4);
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
+ struct ath_rx_status *rxs,
+ void *buf_addr);
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u16 size);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 000000000..af5ee416a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1573 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+#include "ar9003_aic.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+ udelay(1);
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+ u32 bit_position, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ while (time_out) {
+ if (!(REG_READ(ah, address) & bit_position)) {
+ udelay(10);
+ time_out -= 10;
+
+ if (time_out < 0)
+ break;
+ else
+ continue;
+ }
+ REG_WRITE(ah, address, bit_position);
+
+ if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
+ break;
+
+ if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
+ break;
+ }
+
+ if (time_out <= 0) {
+ ath_dbg(common, MCI,
+ "MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
+ address, bit_position);
+ ath_dbg(common, MCI,
+ "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ time_out = 0;
+ }
+
+ return time_out;
+}
+
+static void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+ wait_done, false);
+ udelay(5);
+}
+
+static void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x00000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+ udelay(5);
+}
+
+static void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x70000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+ MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (mci->bt_version_known ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_RESPONSE);
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+ mci->wlan_ver_major;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+ mci->wlan_ver_minor;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload = &mci->wlan_channels[0];
+
+ if (!mci->wlan_channels_update ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+ bool wait_done, u8 query_type)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+ bool query_btinfo;
+
+ if (mci->bt_state == MCI_BT_SLEEP)
+ return;
+
+ query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_STATUS_QUERY);
+
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
+ if (query_btinfo)
+ mci->need_flush_btinfo = true;
+ }
+
+ if (query_btinfo)
+ mci->query_bt = false;
+}
+
+static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_HALT_BT_GPM);
+
+ if (halt) {
+ mci->query_bt = true;
+ /* Send next unhalt no matter halt sent or not */
+ mci->unhalt_bt_gpm = true;
+ mci->need_flush_btinfo = true;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_HALT;
+ } else
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_UNHALT;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 saved_mci_int_en;
+ u32 mci_timeout = 150;
+
+ mci->bt_state = MCI_BT_SLEEP;
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_req_wake(ah, true);
+
+ if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
+ goto clear_redunt;
+
+ mci->bt_state = MCI_BT_AWAKE;
+
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
+
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
+
+ if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
+
+ if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
+ }
+
+clear_redunt:
+ /* Clear the extra redundant SYS_WAKING from BT */
+ if ((mci->bt_state == MCI_BT_AWAKE) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ }
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_set_full_sleep(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
+ }
+
+ mci->ready = false;
+}
+
+static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+static void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+static bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+ u32 intr;
+
+ intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ *raw_intr = mci->raw_intr;
+ *rx_msg_intr = mci->rx_msg_intr;
+
+ /* Clean int bits after the values are read. */
+ mci->raw_intr = 0;
+ mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 raw_intr, rx_msg_intr;
+
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) {
+ ath_dbg(common, MCI,
+ "MCI gets 0xdeadbeef during int processing\n");
+ } else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status = REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ }
+}
+
+static void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!mci->update_2g5g &&
+ (mci->is_2g != is_2g))
+ mci->update_2g5g = true;
+
+ mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload;
+ u32 recv_type, offset;
+
+ if (msg_index == MCI_GPM_INVALID)
+ return false;
+
+ offset = msg_index << 4;
+
+ payload = (u32 *)(mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(payload);
+
+ if (recv_type == MCI_GPM_RSVD_PATTERN)
+ return false;
+
+ return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ } else
+ return;
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, ATH_MCI_CONFIG_MCI_OBS_GPIO);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+ REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+ u8 opcode, u32 bt_flags)
+{
+ u32 pld[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(pld, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+ *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+ return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+ wait_done, true);
+}
+
+static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
+
+ if (mci->bt_state != cur_bt_state)
+ mci->bt_state = cur_bt_state;
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true)
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+}
+
+void ar9003_mci_check_bt(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ if (!mci_hw->ready)
+ return;
+
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+}
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 *p_data = (u8 *) p_gpm;
+
+ if (gpm_type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+ *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+ if (recv_type == gpm_type) {
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ continue;
+ }
+ break;
+ }
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
+ break;
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+ continue;
+ } else {
+ ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+ *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0)
+ time_out = 0;
+
+ while (more_data == MCI_GPM_MORE) {
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state != MCI_BT_CAL_START)
+ return false;
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+ ar9003_mci_disable_interrupt(ah);
+
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, MCI, "MCI BT_CAL_DONE received\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT_CAL_DONE not received\n");
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+}
+
+int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ if (!mci_hw->ready)
+ return 0;
+
+ if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
+ goto exit;
+
+ if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
+ !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
+ goto exit;
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
+
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ REG_CLR_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
+ if (caldata) {
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ clear_bit(RTT_DONE, &caldata->cal_flags);
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ REG_SET_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
+exit:
+ ar9003_mci_enable_interrupt(ah);
+ return 0;
+}
+
+static void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
+
+ /*
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+ */
+ if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) {
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+ }
+
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 thresh;
+
+ if (!enable) {
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ return;
+ }
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ if (AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+ thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
+ } else
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
+}
+
+static void ar9003_mci_stat_setup(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!AR_SREV_9565(ah))
+ return;
+
+ if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) {
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_BT_LINKID,
+ MCI_STAT_ALL_BT_LINKID);
+ } else {
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
+ }
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(0, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(0, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah)
+{
+ u32 regval;
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+}
+
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval, i;
+
+ ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
+ is_full_sleep, is_2g);
+
+ if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+ ath_err(common, "BTCOEX control register is dead\n");
+ return -EINVAL;
+ }
+
+ /* Program MCI DMA related registers */
+ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+ REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+ REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+ /*
+ * To avoid MCI state machine be affected by incoming remote MCI msgs,
+ * MCI mode will be enabled later, right before reset the MCI TX and RX.
+ */
+ if (AR_SREV_9565(ah)) {
+ u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH);
+
+ if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED)
+ ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah);
+ else
+ ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah);
+ } else {
+ ar9003_mci_set_btcoex_ctrl_9462(ah);
+ }
+
+ if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
+ ar9003_mci_osla_setup(ah, true);
+ else
+ ar9003_mci_osla_setup(ah, false);
+
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ /* Set the time out to 3.125ms (5 BT slots) */
+ REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
+
+ /* concurrent tx priority */
+ if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
+ }
+
+ regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+ /* Resetting the Rx and Tx paths of MCI */
+ regval = REG_READ(ah, AR_MCI_COMMAND2);
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ udelay(1);
+
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ if (is_full_sleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(100);
+ }
+
+ /* Check pending GPM msg before MCI Reset Rx */
+ ar9003_mci_check_gpm_offset(ah);
+
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+ udelay(1);
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ /* Init GPM offset after MCI Reset Rx */
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
+
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+ (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+ SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+ if (MCI_ANT_ARCH_PA_LNA_SHARED(mci))
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ else
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ ar9003_mci_observation_set_up(ah);
+
+ mci->ready = true;
+ ar9003_mci_prep_interface(ah);
+ ar9003_mci_stat_setup(ah);
+
+ if (en_int)
+ ar9003_mci_enable_interrupt(ah);
+
+ if (ath9k_hw_is_aic_enabled(ah))
+ ar9003_aic_start_normal(ah);
+
+ return 0;
+}
+
+void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+
+ ar9003_mci_disable_interrupt(ah);
+
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+ }
+
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 new_flags, to_set, to_clear;
+
+ if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
+
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR,
+ to_clear);
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ to_set);
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+ u32 *payload, bool queue)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 type, opcode;
+
+ /* check if the message is to be queued */
+ if (header != MCI_GPM)
+ return;
+
+ type = MCI_GPM_TYPE(payload);
+ opcode = MCI_GPM_OPCODE(payload);
+
+ if (type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (opcode) {
+ case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+ MCI_GPM_COEX_BT_FLAGS_READ)
+ break;
+
+ mci->update_2g5g = queue;
+
+ break;
+ case MCI_GPM_COEX_WLAN_CHANNELS:
+ mci->wlan_channels_update = queue;
+ break;
+ case MCI_GPM_COEX_HALT_BT_GPM:
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
+ mci->unhalt_bt_gpm = queue;
+
+ if (!queue)
+ mci->halted_bt_gpm = false;
+ }
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_HALT) {
+
+ mci->halted_bt_gpm = !queue;
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!mci->update_2g5g && !force)
+ return;
+
+ if (mci->is_2g) {
+ ar9003_mci_send_2g5g_status(ah, true);
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
+ ar9003_mci_osla_setup(ah, true);
+
+ if (AR_SREV_9462(ah))
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
+ } else {
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+
+ ar9003_mci_osla_setup(ah, false);
+ ar9003_mci_send_2g5g_status(ah, true);
+ }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ bool msg_sent = false;
+ u32 regval;
+ u32 saved_mci_int_en;
+ int i;
+
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+ regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+ ath_dbg(common, MCI,
+ "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
+ header, (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+ ath_dbg(common, MCI,
+ "MCI Don't send message 0x%x. BT is in sleep state\n",
+ header);
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+ /* Need to clear SW_MSG_DONE raw bit before wait */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ (AR_MCI_INTERRUPT_SW_MSG_DONE |
+ AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+ if (payload) {
+ for (i = 0; (i * 4) < len; i++)
+ REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+ *(payload + i));
+ }
+
+ REG_WRITE(ah, AR_MCI_COMMAND0,
+ (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+ AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+ SM(len, AR_MCI_COMMAND0_LEN) |
+ SM(header, AR_MCI_COMMAND0_HEADER)));
+
+ if (wait_done &&
+ !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ else {
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+ msg_sent = true;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+ return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 pld[4] = {0, 0, 0, 0};
+
+ if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
+ (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
+ return;
+
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
+ ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
+ } else {
+ *is_reusable = false;
+ ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
+ }
+}
+
+void ar9003_mci_init_cal_done(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 pld[4] = {0, 0, 0, 0};
+
+ if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
+ (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
+ return;
+
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+}
+
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
+
+ return ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 value = 0, tsf;
+ u8 query_type;
+
+ switch (state_type) {
+ case MCI_STATE_ENABLE:
+ if (mci->ready) {
+ value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((value == 0xdeadbeef) || (value == 0xffffffff))
+ value = 0;
+ }
+ value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+ break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+ if (value < mci->gpm_len)
+ mci->gpm_idx = value;
+ else
+ mci->gpm_idx = 0;
+ break;
+ case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+ /* Make it in bytes */
+ value <<= 4;
+ break;
+ case MCI_STATE_REMOTE_SLEEP:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_REMOTE_SLEEP) ?
+ MCI_BT_SLEEP : MCI_BT_AWAKE;
+ break;
+ case MCI_STATE_SET_BT_AWAKE:
+ mci->bt_state = MCI_BT_AWAKE;
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm)
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+
+ ar9003_mci_2g5g_switch(ah, false);
+ break;
+ case MCI_STATE_RESET_REQ_WAKE:
+ ar9003_mci_reset_req_wakeup(ah);
+ mci->update_2g5g = true;
+
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK) {
+ /* Check if we still have control of the GPIOs */
+ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+ ar9003_mci_observation_set_up(ah);
+ }
+ }
+ break;
+ case MCI_STATE_SEND_WLAN_COEX_VERSION:
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_STATE_SEND_VERSION_QUERY:
+ ar9003_mci_send_coex_version_query(ah, true);
+ break;
+ case MCI_STATE_SEND_STATUS_QUERY:
+ query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+ ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+ break;
+ case MCI_STATE_RECOVER_RX:
+ tsf = ath9k_hw_gettsf32(ah);
+ if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "(MCI) ignore Rx recovery\n");
+ break;
+ }
+ ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
+ mci->last_recovery = tsf;
+ ar9003_mci_prep_interface(ah);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ ar9003_mci_2g5g_switch(ah, false);
+ break;
+ case MCI_STATE_NEED_FTP_STOMP:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+ break;
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
+ mci->need_flush_btinfo = false;
+ break;
+ case MCI_STATE_AIC_CAL:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_calibration(ah);
+ break;
+ case MCI_STATE_AIC_START:
+ if (ath9k_hw_is_aic_enabled(ah))
+ ar9003_aic_start_normal(ah);
+ break;
+ case MCI_STATE_AIC_CAL_RESET:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_cal_reset(ah);
+ break;
+ case MCI_STATE_AIC_CAL_SINGLE:
+ if (ath9k_hw_is_aic_enabled(ah))
+ value = ar9003_aic_calibration_single(ah);
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
+
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
+
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(50);
+
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ mci->is_2g = false;
+ mci->update_2g5g = true;
+ ar9003_mci_send_2g5g_status(ah, true);
+
+ /* Force another 2g5g update at next scanning */
+ mci->update_2g5g = true;
+}
+
+void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+ u32 btcoex_ctrl2, diag_sw;
+ int i;
+ u8 lna_ctrl, bt_sleep;
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
+ if (btcoex_ctrl2 != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ diag_sw = REG_READ(ah, AR_DIAG_SW);
+ if (diag_sw != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
+ lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
+ bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
+ REG_WRITE(ah, AR_DIAG_SW, diag_sw);
+
+ if (bt_sleep && (lna_ctrl == 2)) {
+ REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
+ REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
+ udelay(50);
+ }
+}
+
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset;
+
+ /*
+ * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
+ */
+ offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ if (mci->gpm_idx == offset)
+ return;
+ ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
+ mci->gpm_idx, offset);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ mci->gpm_idx = 0;
+}
+
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset, more_gpm = 0, gpm_ptr;
+
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ offset = gpm_ptr;
+
+ if (!offset)
+ offset = mci->gpm_len - 1;
+ else if (offset >= mci->gpm_len) {
+ if (offset != 0xFFFF)
+ offset = 0;
+ } else {
+ offset--;
+ }
+
+ if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
+ offset = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ goto out;
+ }
+ for (;;) {
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (offset != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+
+ if (temp_index >= mci->gpm_len)
+ temp_index = 0;
+
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >= mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
+ offset = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ offset = MCI_GPM_INVALID;
+ break;
+ }
+ }
+
+ if (offset != MCI_GPM_INVALID)
+ offset <<= 4;
+out:
+ if (more)
+ *more = more_gpm;
+
+ return offset;
+}
+EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
+
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->bt_ver_major = major;
+ mci->bt_ver_minor = minor;
+ mci->bt_version_known = true;
+ ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+}
+EXPORT_SYMBOL(ar9003_mci_set_bt_version);
+
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+}
+EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
+
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ if (!ah->btcoex_hw.mci.concur_tx)
+ goto out;
+
+ if (ctlmode == CTL_2GHT20)
+ return ATH_BTCOEX_HT20_MAX_TXPOWER;
+ else if (ctlmode == CTL_2GHT40)
+ return ATH_BTCOEX_HT40_MAX_TXPOWER;
+
+out:
+ return -1;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 000000000..e288611c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+#define MCI_RECOVERY_DUR_TSF (100 * 1000) /* 100 ms */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
+
+enum mci_gpm_coex_query_type {
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
+ MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+ MCI_GPM_COEX_BT_GPM_UNHALT,
+ MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+ MCI_GPM_COEX_BT_FLAGS_READ,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS 79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+ MCI_BT_MCI_FLAGS_UPDATE_HDR | \
+ MCI_BT_MCI_FLAGS_UPDATE_PLD | \
+ MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
+#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK 0x00000000
+#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
+ ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S 12
+#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_DISABLE_AIC 0x00008000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN 0x007f0000
+#define ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN_S 16
+#define ATH_MCI_CONFIG_NO_QUIET_ACK 0x00800000
+#define ATH_MCI_CONFIG_NO_QUIET_ACK_S 23
+#define ATH_MCI_CONFIG_ANT_ARCH 0x07000000
+#define ATH_MCI_CONFIG_ANT_ARCH_S 24
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK 0x08000000
+#define ATH_MCI_CONFIG_FORCE_QUIET_ACK_S 27
+#define ATH_MCI_CONFIG_FORCE_2CHAIN_ACK 0x10000000
+#define ATH_MCI_CONFIG_MCI_STAT_DBG 0x20000000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
+ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+ ATH_MCI_CONFIG_MCI_OBS_BT)
+
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_NON_SHARED 0x00
+#define ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED 0x01
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_NON_SHARED 0x02
+#define ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED 0x03
+#define ATH_MCI_ANT_ARCH_3_ANT 0x04
+
+#define MCI_ANT_ARCH_PA_LNA_SHARED(mci) \
+ ((MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) || \
+ (MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH) == ATH_MCI_ANT_ARCH_2_ANT_PA_LNA_SHARED))
+
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+ MCI_GPM_COEX_PROFILE_UNKNOWN,
+ MCI_GPM_COEX_PROFILE_RFCOMM,
+ MCI_GPM_COEX_PROFILE_A2DP,
+ MCI_GPM_COEX_PROFILE_HID,
+ MCI_GPM_COEX_PROFILE_BNEP,
+ MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_A2DPVO,
+ MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+enum mci_ps_state {
+ MCI_PS_DISABLE,
+ MCI_PS_ENABLE,
+ MCI_PS_ENABLE_OFF,
+ MCI_PS_ENABLE_ON
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_CHECK_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_STATUS,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_NEED_STAT_DEBUG,
+ MCI_STATE_SHARED_CHAIN_CONCUR_TX,
+ MCI_STATE_AIC_CAL,
+ MCI_STATE_AIC_START,
+ MCI_STATE_AIC_CAL_RESET,
+ MCI_STATE_AIC_CAL_SINGLE,
+ MCI_STATE_IS_AR9462,
+ MCI_STATE_IS_AR9565_1ANT,
+ MCI_STATE_IS_AR9565_2ANT,
+ MCI_STATE_WLAN_WEAK_SIGNAL,
+ MCI_STATE_SET_WLAN_PS_STATE,
+ MCI_STATE_GET_WLAN_PS_STATE,
+ MCI_STATE_DEBUG,
+ MCI_STATE_STAT_DEBUG,
+ MCI_STATE_ALLOW_FCS,
+ MCI_STATE_SET_2G_CONTENTION,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS,
+ MCI_GPM_COEX_NOOP,
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
+/*
+ * Functions that are available to the MCI driver core.
+ */
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more);
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
+/*
+ * These functions are used by ath9k_hw.
+ */
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
+void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
+void ar9003_mci_init_cal_done(struct ath_hw *ah);
+void ar9003_mci_set_full_sleep(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
+void ar9003_mci_check_bt(struct ath_hw *ah);
+bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
+int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata);
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
+void ar9003_mci_set_power_awake(struct ath_hw *ah);
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode);
+
+#else
+
+static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
+{
+}
+static inline void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
+{
+}
+static inline void ar9003_mci_init_cal_done(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_set_full_sleep(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+}
+static inline void ar9003_mci_check_bt(struct ath_hw *ah)
+{
+}
+static inline bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ return false;
+}
+static inline int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata)
+{
+ return 0;
+}
+static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+}
+static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+}
+static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+}
+static inline u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ return -1;
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
new file mode 100644
index 000000000..6343cc919
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+
+void ar9003_paprd_enable(struct ath_hw *ah, bool val)
+{
+ struct ath9k_channel *chan = ah->curchan;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ /*
+ * 3 bits for modalHeader5G.papdRateMaskHt20
+ * is used for sub-band disabling of PAPRD.
+ * 5G band is divided into 3 sub-bands -- upper,
+ * middle, lower.
+ * if bit 30 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for upper band 5GHz
+ * if bit 29 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for middle band 5GHz
+ * if bit 28 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for lower band 5GHz
+ */
+
+ if (IS_CHAN_5GHZ(chan)) {
+ if (chan->channel >= UPPER_5G_SUB_BAND_START) {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(30))
+ val = false;
+ } else if (chan->channel >= MID_5G_SUB_BAND_START) {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(29))
+ val = false;
+ } else {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(28))
+ val = false;
+ }
+ }
+
+ if (val) {
+ ah->paprd_table_write_done = true;
+ ath9k_hw_apply_txpower(ah, chan, false);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+}
+EXPORT_SYMBOL(ar9003_paprd_enable);
+
+static int ar9003_get_training_power_2g(struct ath_hw *ah)
+{
+ struct ath9k_channel *chan = ah->curchan;
+ unsigned int power, scale, delta;
+
+ scale = ar9003_get_paprd_scale_factor(ah, chan);
+
+ if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ power = ah->paprd_target_power + 2;
+ } else if (AR_SREV_9485(ah)) {
+ power = 25;
+ } else {
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+ delta = abs((int) ah->paprd_target_power - (int) power);
+ if (delta > scale)
+ return -1;
+
+ if (delta < 4)
+ power -= 4 - delta;
+ }
+
+ return power;
+}
+
+static int ar9003_get_training_power_5g(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ unsigned int power, scale, delta;
+
+ scale = ar9003_get_paprd_scale_factor(ah, chan);
+
+ if (IS_CHAN_HT40(chan))
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE8,
+ AR_PHY_POWERTX_RATE8_POWERTXHT40_5);
+ else
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE6,
+ AR_PHY_POWERTX_RATE6_POWERTXHT20_5);
+
+ power += scale;
+ delta = abs((int) ah->paprd_target_power - (int) power);
+ if (delta > scale)
+ return -1;
+
+ switch (get_streams(ah->txchainmask)) {
+ case 1:
+ delta = 6;
+ break;
+ case 2:
+ delta = 4;
+ break;
+ case 3:
+ delta = 2;
+ break;
+ default:
+ delta = 0;
+ ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n",
+ ah->txchainmask);
+ }
+
+ power += delta;
+ return power;
+}
+
+static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ static const u32 ctrl0[3] = {
+ AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_B2
+ };
+ static const u32 ctrl1[3] = {
+ AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_B2
+ };
+ int training_power;
+ int i, val;
+ u32 am2pm_mask = ah->paprd_ratemask;
+
+ if (IS_CHAN_2GHZ(ah->curchan))
+ training_power = ar9003_get_training_power_2g(ah);
+ else
+ training_power = ar9003_get_training_power_5g(ah);
+
+ ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n",
+ training_power, ah->paprd_target_power);
+
+ if (training_power < 0) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD target power delta out of range\n");
+ return -ERANGE;
+ }
+ ah->paprd_training_power = training_power;
+
+ if (AR_SREV_9330(ah))
+ am2pm_mask = 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
+ ah->paprd_ratemask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK,
+ am2pm_mask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
+ ah->paprd_ratemask_ht40);
+
+ ath_dbg(common, CALIBRATE, "PAPRD HT20 mask: 0x%x, HT40 mask: 0x%x\n",
+ ah->paprd_ratemask, ah->paprd_ratemask_ht40);
+
+ for (i = 0; i < ah->caps.max_txchains; i++) {
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3);
+ }
+
+ ar9003_paprd_enable(ah, false);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
+
+ if (AR_SREV_9485(ah)) {
+ val = 148;
+ } else {
+ if (IS_CHAN_2GHZ(ah->curchan)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ val = 145;
+ else
+ val = 147;
+ } else {
+ val = 137;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+ AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
+
+ if (AR_SREV_9485(ah) ||
+ AR_SREV_9462(ah) ||
+ AR_SREV_9565(ah) ||
+ AR_SREV_9550(ah) ||
+ AR_SREV_9330(ah) ||
+ AR_SREV_9340(ah))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+
+ val = -10;
+
+ if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ val = -15;
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
+ val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
+ 100);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 261376);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 248079);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 233759);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 220464);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 208194);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 196949);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
+ return 0;
+}
+
+static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
+{
+ u32 *entry = ah->paprd_gain_table_entries;
+ u8 *index = ah->paprd_gain_table_index;
+ u32 reg = AR_PHY_TXGAIN_TABLE;
+ int i;
+
+ for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
+ entry[i] = REG_READ(ah, reg);
+ index[i] = (entry[i] >> 24) & 0xff;
+ reg += 4;
+ }
+}
+
+static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
+ int target_power)
+{
+ int olpc_gain_delta = 0, cl_gain_mod;
+ int alpha_therm, alpha_volt;
+ int therm_cal_value, volt_cal_value;
+ int therm_value, volt_value;
+ int thermal_gain_corr, voltage_gain_corr;
+ int desired_scale, desired_gain = 0;
+ u32 reg_olpc = 0, reg_cl_gain = 0;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+ desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
+ AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
+ alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM);
+ alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_VOLT);
+ therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_THERM_CAL_VALUE);
+ volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_VOLT_CAL_VALUE);
+ therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE);
+ volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
+
+ switch (chain) {
+ case 0:
+ reg_olpc = AR_PHY_TPC_11_B0;
+ reg_cl_gain = AR_PHY_CL_TAB_0;
+ break;
+ case 1:
+ reg_olpc = AR_PHY_TPC_11_B1;
+ reg_cl_gain = AR_PHY_CL_TAB_1;
+ break;
+ case 2:
+ reg_olpc = AR_PHY_TPC_11_B2;
+ reg_cl_gain = AR_PHY_CL_TAB_2;
+ break;
+ default:
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Invalid chainmask: %d\n", chain);
+ break;
+ }
+
+ olpc_gain_delta = REG_READ_FIELD(ah, reg_olpc,
+ AR_PHY_TPC_11_OLPC_GAIN_DELTA);
+ cl_gain_mod = REG_READ_FIELD(ah, reg_cl_gain,
+ AR_PHY_CL_TAB_CL_GAIN_MOD);
+
+ if (olpc_gain_delta >= 128)
+ olpc_gain_delta = olpc_gain_delta - 256;
+
+ thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) +
+ (256 / 2)) / 256;
+ voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
+ (128 / 2)) / 128;
+ desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
+ voltage_gain_corr + desired_scale + cl_gain_mod;
+
+ return desired_gain;
+}
+
+static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index)
+{
+ int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain;
+ int padrvgnA, padrvgnB, padrvgnC, padrvgnD;
+ u32 *gain_table_entries = ah->paprd_gain_table_entries;
+
+ selected_gain_entry = gain_table_entries[gain_index];
+ txbb1dbgain = selected_gain_entry & 0x7;
+ txbb6dbgain = (selected_gain_entry >> 3) & 0x3;
+ txmxrgain = (selected_gain_entry >> 5) & 0xf;
+ padrvgnA = (selected_gain_entry >> 9) & 0xf;
+ padrvgnB = (selected_gain_entry >> 13) & 0xf;
+ padrvgnC = (selected_gain_entry >> 17) & 0xf;
+ padrvgnD = (selected_gain_entry >> 21) & 0x3;
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0);
+}
+
+static inline int find_expn(int num)
+{
+ return fls(num) - 1;
+}
+
+static inline int find_proper_scale(int expn, int N)
+{
+ return (expn > N) ? expn - 10 : 0;
+}
+
+#define NUM_BIN 23
+
+static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
+{
+ unsigned int thresh_accum_cnt;
+ int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1];
+ int PA_in[NUM_BIN + 1];
+ int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1];
+ unsigned int B1_abs_max, B2_abs_max;
+ int max_index, scale_factor;
+ int y_est[NUM_BIN + 1];
+ int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1];
+ unsigned int x_tilde_abs;
+ int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad;
+ int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B;
+ int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2;
+ int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem;
+ int y5, y3, tmp;
+ int theta_low_bin = 0;
+ int i;
+
+ /* disregard any bin that contains <= 16 samples */
+ thresh_accum_cnt = 16;
+ scale_factor = 5;
+ max_index = 0;
+ memset(theta, 0, sizeof(theta));
+ memset(x_est, 0, sizeof(x_est));
+ memset(Y, 0, sizeof(Y));
+ memset(y_est, 0, sizeof(y_est));
+ memset(x_tilde, 0, sizeof(x_tilde));
+
+ for (i = 0; i < NUM_BIN; i++) {
+ s32 accum_cnt, accum_tx, accum_rx, accum_ang;
+
+ /* number of samples */
+ accum_cnt = data_L[i] & 0xffff;
+
+ if (accum_cnt <= thresh_accum_cnt)
+ continue;
+
+ max_index++;
+
+ /* sum(tx amplitude) */
+ accum_tx = ((data_L[i] >> 16) & 0xffff) |
+ ((data_U[i] & 0x7ff) << 16);
+
+ /* sum(rx amplitude distance to lower bin edge) */
+ accum_rx = ((data_U[i] >> 11) & 0x1f) |
+ ((data_L[i + 23] & 0xffff) << 5);
+
+ /* sum(angles) */
+ accum_ang = ((data_L[i + 23] >> 16) & 0xffff) |
+ ((data_U[i + 23] & 0x7ff) << 16);
+
+ accum_tx <<= scale_factor;
+ accum_rx <<= scale_factor;
+ x_est[max_index] =
+ (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor;
+
+ Y[max_index] =
+ ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor) +
+ (1 << scale_factor) * i + 16;
+
+ if (accum_ang >= (1 << 26))
+ accum_ang -= 1 << 27;
+
+ theta[max_index] =
+ ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+ accum_cnt;
+ }
+
+ /*
+ * Find average theta of first 5 bin and all of those to same value.
+ * Curve is linear at that range.
+ */
+ for (i = 1; i < 6; i++)
+ theta_low_bin += theta[i];
+
+ theta_low_bin = theta_low_bin / 5;
+ for (i = 1; i < 6; i++)
+ theta[i] = theta_low_bin;
+
+ /* Set values at origin */
+ theta[0] = theta_low_bin;
+ for (i = 0; i <= max_index; i++)
+ theta[i] -= theta_low_bin;
+
+ x_est[0] = 0;
+ Y[0] = 0;
+ scale_factor = 8;
+
+ /* low signal gain */
+ if (x_est[6] == x_est[3])
+ return false;
+
+ G_fxp =
+ (((Y[6] - Y[3]) * 1 << scale_factor) +
+ (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
+ Y_intercept =
+ (G_fxp * (x_est[0] - x_est[3]) +
+ (1 << scale_factor)) / (1 << scale_factor) + Y[3];
+
+ for (i = 0; i <= max_index; i++)
+ y_est[i] = Y[i] - Y_intercept;
+
+ for (i = 0; i <= 3; i++) {
+ y_est[i] = i * 32;
+ x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
+ }
+
+ if (y_est[max_index] == 0)
+ return false;
+
+ x_est_fxp1_nonlin =
+ x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
+ G_fxp) / G_fxp;
+
+ order_x_by_y =
+ (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index];
+
+ if (order_x_by_y == 0)
+ M = 10;
+ else if (order_x_by_y == 1)
+ M = 9;
+ else
+ M = 8;
+
+ I = (max_index > 15) ? 7 : max_index >> 1;
+ L = max_index - I;
+ scale_factor = 8;
+ sum_y_sqr = 0;
+ sum_y_quad = 0;
+ x_tilde_abs = 0;
+
+ for (i = 0; i <= L; i++) {
+ unsigned int y_sqr;
+ unsigned int y_quad;
+ unsigned int tmp_abs;
+
+ /* prevent division by zero */
+ if (y_est[i + I] == 0)
+ return false;
+
+ x_est_fxp1_nonlin =
+ x_est[i + I] - ((1 << scale_factor) * y_est[i + I] +
+ G_fxp) / G_fxp;
+
+ x_tilde[i] =
+ (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i +
+ I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ y_sqr =
+ (y_est[i + I] * y_est[i + I] +
+ (scale_factor * scale_factor)) / (scale_factor *
+ scale_factor);
+ tmp_abs = abs(x_tilde[i]);
+ if (tmp_abs > x_tilde_abs)
+ x_tilde_abs = tmp_abs;
+
+ y_quad = y_sqr * y_sqr;
+ sum_y_sqr = sum_y_sqr + y_sqr;
+ sum_y_quad = sum_y_quad + y_quad;
+ B1_tmp[i] = y_sqr * (L + 1);
+ B2_tmp[i] = y_sqr;
+ }
+
+ B1_abs_max = 0;
+ B2_abs_max = 0;
+ for (i = 0; i <= L; i++) {
+ int abs_val;
+
+ B1_tmp[i] -= sum_y_sqr;
+ B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i];
+
+ abs_val = abs(B1_tmp[i]);
+ if (abs_val > B1_abs_max)
+ B1_abs_max = abs_val;
+
+ abs_val = abs(B2_tmp[i]);
+ if (abs_val > B2_abs_max)
+ B2_abs_max = abs_val;
+ }
+
+ Q_x = find_proper_scale(find_expn(x_tilde_abs), 10);
+ Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10);
+ Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10);
+
+ beta_raw = 0;
+ alpha_raw = 0;
+ for (i = 0; i <= L; i++) {
+ x_tilde[i] = x_tilde[i] / (1 << Q_x);
+ B1_tmp[i] = B1_tmp[i] / (1 << Q_B1);
+ B2_tmp[i] = B2_tmp[i] / (1 << Q_B2);
+ beta_raw = beta_raw + B1_tmp[i] * x_tilde[i];
+ alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i];
+ }
+
+ scale_B =
+ ((sum_y_quad / scale_factor) * (L + 1) -
+ (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor;
+
+ Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
+ scale_B = scale_B / (1 << Q_scale_B);
+ if (scale_B == 0)
+ return false;
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ tmp = i * 32;
+ y5 = ((beta * tmp) >> 6) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = y5 >> order1_5x_rem;
+ y3 = (alpha * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = y3 >> order2_3x_rem;
+ PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp;
+
+ if (i >= 2) {
+ tmp = PA_in[i] - PA_in[i - 1];
+ if (tmp < 0)
+ PA_in[i] =
+ PA_in[i - 1] + (PA_in[i - 1] -
+ PA_in[i - 2]);
+ }
+
+ PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400;
+ }
+
+ beta_raw = 0;
+ alpha_raw = 0;
+
+ for (i = 0; i <= L; i++) {
+ int theta_tilde =
+ ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ beta_raw = beta_raw + B1_tmp[i] * theta_tilde;
+ alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde;
+ }
+
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ int PA_angle;
+
+ /* pa_table[4] is calculated from PA_angle for i=5 */
+ if (i == 4)
+ continue;
+
+ tmp = i * 32;
+ if (beta > 0)
+ y5 = (((beta * tmp - 64) >> 6) -
+ (1 << order1_5x)) / (1 << order1_5x);
+ else
+ y5 = ((((beta * tmp - 64) >> 6) +
+ (1 << order1_5x)) / (1 << order1_5x));
+
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = y5 / (1 << order1_5x_rem);
+
+ if (beta > 0)
+ y3 = (alpha * tmp -
+ (1 << order2_3x)) / (1 << order2_3x);
+ else
+ y3 = (alpha * tmp +
+ (1 << order2_3x)) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = y3 / (1 << order2_3x_rem);
+
+ if (i < 4) {
+ PA_angle = 0;
+ } else {
+ PA_angle = y5 + y3;
+ if (PA_angle < -150)
+ PA_angle = -150;
+ else if (PA_angle > 150)
+ PA_angle = 150;
+ }
+
+ pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff);
+ if (i == 5) {
+ PA_angle = (PA_angle + 2) >> 1;
+ pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) +
+ (PA_angle & 0x7ff);
+ }
+ }
+
+ *gain = G_fxp;
+ return true;
+}
+
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *caldata,
+ int chain)
+{
+ u32 *paprd_table_val = caldata->pa_table[chain];
+ u32 small_signal_gain = caldata->small_signal_gain[chain];
+ u32 training_power = ah->paprd_training_power;
+ u32 reg = 0;
+ int i;
+
+ if (chain == 0)
+ reg = AR_PHY_PAPRD_MEM_TAB_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PAPRD_MEM_TAB_B1;
+ else if (chain == 2)
+ reg = AR_PHY_PAPRD_MEM_TAB_B2;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ REG_WRITE(ah, reg, paprd_table_val[i]);
+ reg = reg + 4;
+ }
+
+ if (chain == 0)
+ reg = AR_PHY_PA_GAIN123_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PA_GAIN123_B1;
+ else
+ reg = AR_PHY_PA_GAIN123_B2;
+
+ REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ if (ah->caps.tx_chainmask & BIT(2))
+ /* val AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL correct? */
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+}
+EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
+
+void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
+{
+ unsigned int i, desired_gain, gain_index;
+ unsigned int train_power = ah->paprd_training_power;
+
+ desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
+
+ gain_index = 0;
+ for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
+ if (ah->paprd_gain_table_index[i] >= desired_gain)
+ break;
+ gain_index++;
+ }
+
+ ar9003_tx_force_gain(ah, gain_index);
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+}
+EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
+
+static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *caldata,
+ int chain)
+{
+ u32 *pa_in = caldata->pa_table[chain];
+ int capdiv_offset, quick_drop_offset;
+ int capdiv2g, quick_drop;
+ int count = 0;
+ int i;
+
+ if (!AR_SREV_9485(ah) && !AR_SREV_9330(ah))
+ return false;
+
+ capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
+ AR_PHY_65NM_CH0_TXRF3_CAPDIV2G);
+
+ quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP);
+
+ if (quick_drop)
+ quick_drop -= 0x40;
+
+ for (i = 0; i < NUM_BIN + 1; i++) {
+ if (pa_in[i] == 1400)
+ count++;
+ }
+
+ if (AR_SREV_9485(ah)) {
+ if (pa_in[23] < 800) {
+ capdiv_offset = (int)((1000 - pa_in[23] + 75) / 150);
+ capdiv2g += capdiv_offset;
+ if (capdiv2g > 7) {
+ capdiv2g = 7;
+ if (pa_in[23] < 600) {
+ quick_drop++;
+ if (quick_drop > 0)
+ quick_drop = 0;
+ }
+ }
+ } else if (pa_in[23] == 1400) {
+ quick_drop_offset = min_t(int, count / 3, 2);
+ quick_drop += quick_drop_offset;
+ capdiv2g += quick_drop_offset / 2;
+
+ if (capdiv2g > 7)
+ capdiv2g = 7;
+
+ if (quick_drop > 0) {
+ quick_drop = 0;
+ capdiv2g -= quick_drop_offset;
+ if (capdiv2g < 0)
+ capdiv2g = 0;
+ }
+ } else {
+ return false;
+ }
+ } else if (AR_SREV_9330(ah)) {
+ if (pa_in[23] < 1000) {
+ capdiv_offset = (1000 - pa_in[23]) / 100;
+ capdiv2g += capdiv_offset;
+ if (capdiv_offset > 3) {
+ capdiv_offset = 1;
+ quick_drop--;
+ }
+
+ capdiv2g += capdiv_offset;
+ if (capdiv2g > 6)
+ capdiv2g = 6;
+ if (quick_drop < -4)
+ quick_drop = -4;
+ } else if (pa_in[23] == 1400) {
+ if (count > 3) {
+ quick_drop++;
+ capdiv2g -= count / 4;
+ if (quick_drop > -2)
+ quick_drop = -2;
+ } else {
+ capdiv2g--;
+ }
+
+ if (capdiv2g < 0)
+ capdiv2g = 0;
+ } else {
+ return false;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
+ AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
+ quick_drop);
+
+ return true;
+}
+
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *caldata, int chain)
+{
+ u16 *small_signal_gain = &caldata->small_signal_gain[chain];
+ u32 *pa_table = caldata->pa_table[chain];
+ u32 *data_L, *data_U;
+ int i, status = 0;
+ u32 *buf;
+ u32 reg;
+
+ memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
+
+ buf = kmalloc(2 * 48 * sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data_L = &buf[0];
+ data_U = &buf[48];
+
+ REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ reg = AR_PHY_CHAN_INFO_TAB_0;
+ for (i = 0; i < 48; i++)
+ data_L[i] = REG_READ(ah, reg + (i << 2));
+
+ REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ for (i = 0; i < 48; i++)
+ data_U[i] = REG_READ(ah, reg + (i << 2));
+
+ if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
+ status = -2;
+
+ if (ar9003_paprd_retrain_pa_in(ah, caldata, chain))
+ status = -EINPROGRESS;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ kfree(buf);
+
+ return status;
+}
+EXPORT_SYMBOL(ar9003_paprd_create_curve);
+
+int ar9003_paprd_init_table(struct ath_hw *ah)
+{
+ int ret;
+
+ ret = ar9003_paprd_setup_single_table(ah);
+ if (ret < 0)
+ return ret;
+
+ ar9003_paprd_get_gain_table(ah);
+ return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_init_table);
+
+bool ar9003_paprd_is_done(struct ath_hw *ah)
+{
+ int paprd_done, agc2_pwr;
+
+ paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ if (AR_SREV_9485(ah))
+ goto exit;
+
+ if (paprd_done == 0x1) {
+ agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
+
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "AGC2_PWR = 0x%x training done = 0x%x\n",
+ agc2_pwr, paprd_done);
+ /*
+ * agc2_pwr range should not be less than 'IDEAL_AGC2_PWR_CHANGE'
+ * when the training is completely done, otherwise retraining is
+ * done to make sure the value is in ideal range
+ */
+ if (agc2_pwr <= PAPRD_IDEAL_AGC2_PWR_RANGE)
+ paprd_done = 0;
+ }
+exit:
+ return !!paprd_done;
+}
+EXPORT_SYMBOL(ar9003_paprd_is_done);
+
+bool ar9003_is_paprd_enabled(struct ath_hw *ah)
+{
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->config.enable_paprd)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ar9003_is_paprd_enabled);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 000000000..1ad66b767
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,2251 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+
+#define AR9300_OFDM_RATES 8
+#define AR9300_HT_SS_RATES 8
+#define AR9300_HT_DS_RATES 8
+#define AR9300_HT_TS_RATES 8
+
+#define AR9300_11NA_OFDM_SHIFT 0
+#define AR9300_11NA_HT_SS_SHIFT 8
+#define AR9300_11NA_HT_DS_SHIFT 16
+#define AR9300_11NA_HT_TS_SHIFT 24
+
+#define AR9300_11NG_OFDM_SHIFT 4
+#define AR9300_11NG_HT_SS_SHIFT 12
+#define AR9300_11NG_HT_DS_SHIFT 20
+#define AR9300_11NG_HT_TS_SHIFT 28
+
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
+static const u8 ofdm2pwr[] = {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54
+};
+
+static const u8 mcs2pwr_ht20[] = {
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23
+};
+
+static const u8 mcs2pwr_ht40[] = {
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+};
+
+/**
+ * ar9003_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * for AR9300 family of chipsets.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode = 0, aModeRefSel = 0;
+ u32 freq, chan_frac, div, channelSel = 0, reg32 = 0;
+ struct chan_centers centers;
+ int loadSynthChannel;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ if (AR_SREV_9330(ah)) {
+ if (ah->is_clk_25mhz)
+ div = 75;
+ else
+ div = 120;
+
+ channelSel = (freq * 4) / div;
+ chan_frac = (((freq * 4) % div) * 0x20000) / div;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
+ /*
+ * freq_ref = 40 / (refdiva >> amoderefsel);
+ * where refdiva=1 and amoderefsel=0
+ * ndiv = ((chan_mhz * 4) / 3) / freq_ref;
+ * chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
+ */
+ channelSel = (freq * 4) / 120;
+ chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else if (AR_SREV_9340(ah)) {
+ if (ah->is_clk_25mhz) {
+ channelSel = (freq * 2) / 75;
+ chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else {
+ channelSel = CHANSEL_2G(freq) >> 1;
+ }
+ } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) {
+ if (ah->is_clk_25mhz)
+ div = 75;
+ else
+ div = 120;
+
+ channelSel = (freq * 4) / div;
+ chan_frac = (((freq * 4) % div) * 0x20000) / div;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else {
+ channelSel = CHANSEL_2G(freq);
+ }
+ /* Set to 2G mode */
+ bMode = 1;
+ } else {
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) ||
+ AR_SREV_9531(ah) || AR_SREV_9561(ah)) &&
+ ah->is_clk_25mhz) {
+ channelSel = freq / 75;
+ chan_frac = ((freq % 75) * 0x20000) / 75;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else {
+ channelSel = CHANSEL_5G(freq);
+ /* Doubler is ON, so, divide channelSel by 2. */
+ channelSel >>= 1;
+ }
+ /* Set to 5G mode */
+ bMode = 0;
+ }
+
+ /* Enable fractional mode for all channels */
+ fracMode = 1;
+ aModeRefSel = 0;
+ loadSynthChannel = 0;
+
+ reg32 = (bMode << 29);
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ /* Enable Long shift Select for Synthesizer */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
+ AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
+
+ /* Program Synth. setting */
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ /* Toggle Load Synth channel bit */
+ loadSynthChannel = 1;
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ ah->curchan = chan;
+
+ return 0;
+}
+
+/**
+ * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ *
+ * Spur mitigation for MRC CCK
+ */
+static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ int cur_bb_spur, negative = 0, cck_spur_freq;
+ int i;
+ int range, max_spur_cnts, synth_freq;
+ u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
+
+ /*
+ * Need to verify range +/- 10 MHz in control channel, otherwise spur
+ * is out-of-band and can be ignored.
+ */
+
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah) || AR_SREV_9561(ah)) {
+ if (spur_fbin_ptr[0] == 0) /* No spur */
+ return;
+ max_spur_cnts = 5;
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0)
+ synth_freq = chan->channel + 10;
+ else
+ synth_freq = chan->channel - 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+ } else {
+ range = AR_SREV_9462(ah) ? 5 : 10;
+ max_spur_cnts = 4;
+ synth_freq = chan->channel;
+ }
+
+ for (i = 0; i < max_spur_cnts; i++) {
+ if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+ continue;
+
+ negative = 0;
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah) || AR_SREV_9561(ah))
+ cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
+ IS_CHAN_2GHZ(chan));
+ else
+ cur_bb_spur = spur_freq[i];
+
+ cur_bb_spur -= synth_freq;
+ if (cur_bb_spur < 0) {
+ negative = 1;
+ cur_bb_spur = -cur_bb_spur;
+ }
+ if (cur_bb_spur < range) {
+ cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
+
+ if (negative == 1)
+ cck_spur_freq = -cck_spur_freq;
+
+ cck_spur_freq = cck_spur_freq & 0xfffff;
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
+ 0x2);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
+ 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
+ cck_spur_freq);
+
+ return;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
+}
+
+/* Clean all spur register fields */
+static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
+}
+
+static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
+ int freq_offset,
+ int spur_freq_sd,
+ int spur_delta_phase,
+ int spur_subchannel_sd,
+ int range,
+ int synth_freq)
+{
+ int mask_index = 0;
+
+ /* OFDM Spur mitigation */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
+
+ if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
+
+ if (!AR_SREV_9340(ah) &&
+ REG_READ_FIELD(ah, AR_PHY_MODE,
+ AR_PHY_MODE_DYNAMIC) == 0x1)
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
+}
+
+static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
+ int freq_offset)
+{
+ int mask_index = 0;
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
+ mask_index);
+
+ /* A == B */
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
+ mask_index);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
+ mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
+
+ /* A == B */
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+}
+
+static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int freq_offset,
+ int range,
+ int synth_freq)
+{
+ int spur_freq_sd = 0;
+ int spur_subchannel_sd = 0;
+ int spur_delta_phase = 0;
+
+ if (IS_CHAN_HT40(chan)) {
+ if (freq_offset < 0) {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 1;
+ else
+ spur_subchannel_sd = 0;
+
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+
+ } else {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 0;
+ else
+ spur_subchannel_sd = 1;
+
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+
+ }
+
+ spur_delta_phase = (freq_offset << 17) / 5;
+
+ } else {
+ spur_subchannel_sd = 0;
+ spur_freq_sd = (freq_offset << 9) /11;
+ spur_delta_phase = (freq_offset << 18) / 5;
+ }
+
+ spur_freq_sd = spur_freq_sd & 0x3ff;
+ spur_delta_phase = spur_delta_phase & 0xfffff;
+
+ ar9003_hw_spur_ofdm(ah,
+ freq_offset,
+ spur_freq_sd,
+ spur_delta_phase,
+ spur_subchannel_sd,
+ range, synth_freq);
+}
+
+/* Spur mitigation for OFDM */
+static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int synth_freq;
+ int range = 10;
+ int freq_offset = 0;
+ int mode;
+ u8* spurChansPtr;
+ unsigned int i;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
+ mode = 0;
+ }
+ else {
+ spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
+ mode = 1;
+ }
+
+ if (spurChansPtr[0] == 0)
+ return; /* No spur in the mode */
+
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ synth_freq = chan->channel - 10;
+ else
+ synth_freq = chan->channel + 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+
+ ar9003_hw_spur_ofdm_clear(ah);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
+ freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
+ freq_offset -= synth_freq;
+ if (abs(freq_offset) < range) {
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
+ range, synth_freq);
+
+ if (AR_SREV_9565(ah) && (i < 4)) {
+ freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
+ mode);
+ freq_offset -= synth_freq;
+ if (abs(freq_offset) < range)
+ ar9003_hw_spur_ofdm_9565(ah, freq_offset);
+ }
+
+ break;
+ }
+ }
+}
+
+static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (!AR_SREV_9565(ah))
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ ar9003_hw_spur_mitigate_ofdm(ah, chan);
+}
+
+static u32 ar9003_hw_compute_pll_control_soc(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_SOC_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_SOC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_SOC_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_SOC_PLL_DIV_INT);
+
+ return pll;
+}
+
+static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
+
+ return pll;
+}
+
+static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ enableDacFifo =
+ (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
+
+ /* Enable 11n HT, 20 MHz */
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ if (!AR_SREV_9561(ah))
+ phymode |= AR_PHY_GC_SINGLE_HT_LTF1;
+
+ /* Configure baseband for dynamic 20/40 operation */
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_GC_DYN2040_EN;
+ /* Configure control (primary) channel at +-10MHz */
+ if (IS_CHAN_HT40PLUS(chan))
+ phymode |= AR_PHY_GC_DYN2040_PRI_CH;
+
+ }
+
+ /* make sure we preserve INI settings */
+ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
+ /* turn off Green Field detection for STA for now */
+ phymode &= ~AR_PHY_GC_GF_DETECT_EN;
+
+ REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
+
+ /* Configure MAC for 20/40 operation */
+ ath9k_hw_set11nmac2040(ah, chan);
+
+ /* global transmit timeout (25 TUs default)*/
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ /* carrier sense timeout */
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ar9003_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ /*
+ * Wait for the frequency synth to settle (synth goes on
+ * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
+ * Value is in 100ns increments.
+ */
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+
+ /* Activate the PHY (includes baseband activate + synthesizer on) */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ ath9k_hw_synth_delay(ah, chan, synthDelay);
+}
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+{
+ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
+ tx = 3;
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+}
+
+/*
+ * Override INI values with chip specific configuration.
+ */
+static void ar9003_hw_override_ini(struct ath_hw *ah)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear it only after
+ * RXE is set for MAC. This prevents frames with
+ * corrupted descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID. By default,
+ * this feature is enabled. But since the driver is not using this
+ * feature, we switch it off; otherwise multicast search based on
+ * MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
+ val |= AR_AGG_WEP_ENABLE_FIX |
+ AR_AGG_WEP_ENABLE |
+ AR_PCU_MISC_MODE2_CFP_IGNORE;
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+ AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+ ah->enabled_cals |= TX_IQ_CAL;
+ else
+ ah->enabled_cals &= ~TX_IQ_CAL;
+
+ }
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
+
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
+ AR_SREV_9561(ah)) {
+ if (ah->is_clk_25mhz) {
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
+ } else {
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+ REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
+ REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
+ }
+ udelay(100);
+ }
+}
+
+static void ar9003_hw_prog_ini(struct ath_hw *ah,
+ struct ar5416IniArray *iniArr,
+ int column)
+{
+ unsigned int i, regWrites = 0;
+
+ /* New INI format: Array may be undefined (pre, core, post arrays) */
+ if (!iniArr->ia_array)
+ return;
+
+ /*
+ * New INI format: Pre, core, and post arrays for a given subsystem
+ * may be modal (> 2 columns) or non-modal (2 columns). Determine if
+ * the array is non-modal and force the column to 1.
+ */
+ if (column >= iniArr->ia_columns)
+ column = 1;
+
+ for (i = 0; i < iniArr->ia_rows; i++) {
+ u32 reg = INI_RA(iniArr, i, 0);
+ u32 val = INI_RA(iniArr, i, column);
+
+ REG_WRITE(ah, reg, val);
+
+ DO_DELAY(regWrites);
+ }
+}
+
+static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int ret;
+
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 7;
+ else
+ return 8;
+ }
+
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
+
+ if (IS_CHAN_HT40(chan))
+ ret++;
+
+ return ret;
+}
+
+static int ar9561_hw_get_modes_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ return 1;
+ else
+ return 2;
+ }
+
+ return 0;
+}
+
+static void ar9003_doubler_fix(struct ath_hw *ah)
+{
+ if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) {
+ REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+ REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+ REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0);
+
+ udelay(200);
+
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH0_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH1_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+ REG_CLR_BIT(ah, AR_PHY_65NM_CH2_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK);
+
+ udelay(1);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX2,
+ AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1);
+
+ udelay(200);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH12,
+ AR_PHY_65NM_CH0_SYNTH12_VREFMUL3, 0xf);
+
+ REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
+ REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
+ REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, 0,
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S |
+ 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S);
+ }
+}
+
+static int ar9003_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ unsigned int regWrites = 0, i;
+ u32 modesIndex;
+
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
+
+ /*
+ * SOC, MAC, BB, RADIO initvals.
+ */
+ for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ if (i == ATH_INI_POST && AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_hw_prog_ini(ah,
+ &ah->ini_radio_post_sys2ant,
+ modesIndex);
+ }
+
+ ar9003_doubler_fix(ah);
+
+ /*
+ * RXGAIN initvals.
+ */
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /*
+ * CUS217 mix LNA mode.
+ */
+ if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ 1, regWrites);
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ modesIndex, regWrites);
+ }
+
+ /*
+ * 5G-XLNA
+ */
+ if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
+ (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ modesIndex, regWrites);
+ }
+
+ if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+ modesIndex, regWrites);
+ }
+
+ if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
+ REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
+ regWrites);
+
+ /*
+ * TXGAIN initvals.
+ */
+ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
+ int modes_txgain_index = 1;
+
+ if (AR_SREV_9550(ah))
+ modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
+
+ if (AR_SREV_9561(ah))
+ modes_txgain_index =
+ ar9561_hw_get_modes_txgain_index(ah, chan);
+
+ if (modes_txgain_index < 0)
+ return -EINVAL;
+
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index,
+ regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ }
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock,
+ modesIndex, regWrites);
+
+ /*
+ * Clock frequency initvals.
+ */
+ REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
+
+ /*
+ * JAPAN regulatory.
+ */
+ if (chan->channel == 2484)
+ ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
+
+ ah->modes_index = modesIndex;
+ ar9003_hw_override_ini(ah);
+ ar9003_hw_set_channel_regs(ah, chan);
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+ ath9k_hw_apply_txpower(ah, chan, false);
+
+ return 0;
+}
+
+static void ar9003_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ if (IS_CHAN_2GHZ(chan))
+ rfMode |= AR_PHY_MODE_DYNAMIC;
+ else
+ rfMode |= AR_PHY_MODE_OFDM;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
+ REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
+ AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ /*
+ * half and quarter rate can divide the scaled clock by 2 or 4
+ * scale for selected channel bandwidth
+ */
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ /*
+ * ALGO -> coef = 1e8/fcarrier*fclock/40;
+ * scaled coef to provide precision for this floating calculation
+ */
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ /*
+ * For Short GI,
+ * scaled coeff is 9/10 that of normal coeff
+ */
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ /* for short gi */
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+/*
+ * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
+ * Read the phy active delay register. Value is in 100ns increments.
+ */
+static void ar9003_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+
+ ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ar5416AniState *aniState = &ah->ani;
+ int m1ThreshLow, m2ThreshLow;
+ int m1Thresh, m2Thresh;
+ int m2CountThr, m2CountThrLow;
+ int m1ThreshLowExt, m2ThreshLowExt;
+ int m1ThreshExt, m2ThreshExt;
+ s32 value, value2;
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
+ u32 on = param ? 1 : 0;
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ goto skip_ws_det;
+
+ m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1ThreshExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2ThreshExt);
+skip_ws_det:
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (on != aniState->ofdmWeakSigDetect) {
+ ath_dbg(common, ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ aniState->ofdmWeakSigDetect ?
+ "on" : "off",
+ on ? "on" : "off");
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetect = on;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep_table)) {
+ ath_dbg(common, ANI,
+ "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(firstep_table));
+ return false;
+ }
+
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
+ aniState->iniDef.firstep;
+ if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ value);
+ /*
+ * we need to set first step low register too
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
+ aniState->iniDef.firstepLow;
+ if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
+
+ if (level != aniState->firstepLevel) {
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL,
+ value,
+ aniState->iniDef.firstep);
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL,
+ value2,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
+ ath_dbg(common, ANI,
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(cycpwrThr1_table));
+ return false;
+ }
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
+ aniState->iniDef.cycpwrThr1;
+ if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ value);
+
+ /*
+ * set AR_PHY_EXT_CCA for extension channel
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
+ aniState->iniDef.cycpwrThr1Ext;
+ if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1, value2);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_dbg(common, ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_MRC_CCK:{
+ /*
+ * is_on == 1 means MRC CCK ON (default, less noise imm)
+ * is_on == 0 means MRC CCK is OFF (more noise imm)
+ */
+ bool is_on = param ? 1 : 0;
+
+ if (ah->caps.rx_chainmask == 1)
+ break;
+
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_ENABLE, is_on);
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_MUX_REG, is_on);
+ if (is_on != aniState->mrcCCK) {
+ ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
+ chan->channel,
+ aniState->mrcCCK ? "on" : "off",
+ is_on ? "on" : "off");
+ if (is_on)
+ ah->stats.ast_ani_ccklow++;
+ else
+ ah->stats.ast_ani_cckhigh++;
+ aniState->mrcCCK = is_on;
+ }
+ break;
+ }
+ default:
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_dbg(common, ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ aniState->ofdmWeakSigDetect ? "on" : "off",
+ aniState->firstepLevel,
+ aniState->mrcCCK ? "on" : "off",
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+ return true;
+}
+
+static void ar9003_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH_MINCCA_PWR_S 20
+#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
+
+ int16_t nf;
+ int i;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->rxchainmask & BIT(i)) {
+ nf = MS(REG_READ(ah, ah->nf_regs[i]),
+ AR_PHY_CH_MINCCA_PWR);
+ nfarray[i] = sign_extend32(nf, 8);
+
+ if (IS_CHAN_HT40(ah->curchan)) {
+ u8 ext_idx = AR9300_MAX_CHAINS + i;
+
+ nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
+ AR_PHY_CH_EXT_MINCCA_PWR);
+ nfarray[ext_idx] = sign_extend32(nf, 8);
+ }
+ }
+ }
+}
+
+static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ;
+
+ if (AR_SREV_9330(ah))
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
+ ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
+ ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9462_5GHZ;
+ }
+}
+
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath9k_ani_default *iniDef;
+ u32 val;
+
+ aniState = &ah->ani;
+ iniDef = &aniState->iniDef;
+
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = true;
+ aniState->mrcCCK = true;
+}
+
+static void ar9003_hw_set_radar_params(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf)
+{
+ unsigned int regWrites = 0;
+ u32 radar_0 = 0, radar_1;
+
+ if (!conf) {
+ REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
+ return;
+ }
+
+ radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
+ radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
+ radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
+ radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
+ radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
+ radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+
+ radar_1 = REG_READ(ah, AR_PHY_RADAR_1);
+ radar_1 &= ~(AR_PHY_RADAR_1_MAXLEN | AR_PHY_RADAR_1_RELSTEP_THRESH |
+ AR_PHY_RADAR_1_RELPWR_THRESH);
+ radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
+ radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
+ radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
+ radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
+
+ REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
+ REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
+ if (conf->ext_channel)
+ REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+ else
+ REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+
+ if (AR_SREV_9300(ah) || AR_SREV_9340(ah) || AR_SREV_9580(ah)) {
+ REG_WRITE_ARRAY(&ah->ini_dfs,
+ IS_CHAN_HT40(ah->curchan) ? 2 : 1, regWrites);
+ }
+}
+
+static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
+{
+ struct ath_hw_radar_conf *conf = &ah->radar_conf;
+
+ conf->fir_power = -28;
+ conf->radar_rssi = 0;
+ conf->pulse_height = 10;
+ conf->pulse_rssi = 15;
+ conf->pulse_inband = 8;
+ conf->pulse_maxlen = 255;
+ conf->pulse_inband_step = 12;
+ conf->radar_inband = 8;
+}
+
+static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ u32 regval;
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S;
+ antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
+ AR_PHY_ANT_DIV_ALT_LNACONF_S;
+ antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
+ AR_PHY_ANT_FAST_DIV_BIAS_S;
+
+ if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
+ antconf->lna1_lna2_delta = -9;
+ antconf->div_group = 1;
+ } else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_switch_delta = -1;
+ antconf->lna1_lna2_delta = -9;
+ antconf->div_group = 2;
+ } else if (AR_SREV_9565(ah)) {
+ antconf->lna1_lna2_switch_delta = 3;
+ antconf->lna1_lna2_delta = -9;
+ antconf->div_group = 3;
+ } else {
+ antconf->lna1_lna2_switch_delta = -1;
+ antconf->lna1_lna2_delta = -3;
+ antconf->div_group = 0;
+ }
+}
+
+static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ u32 regval;
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_FAST_DIV_BIAS |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
+ & AR_PHY_ANT_DIV_MAIN_LNACONF);
+ regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
+ & AR_PHY_ANT_DIV_ALT_LNACONF);
+ regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
+ & AR_PHY_ANT_FAST_DIV_BIAS);
+ regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
+ & AR_PHY_ANT_DIV_MAIN_GAINTB);
+ regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
+ & AR_PHY_ANT_DIV_ALT_GAINTB);
+
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+}
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u8 ant_div_ctl1;
+ u32 regval;
+
+ if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
+ return;
+
+ if (AR_SREV_9485(ah)) {
+ regval = ar9003_hw_ant_ctrl_common_2_get(ah,
+ IS_CHAN_2GHZ(ah->curchan));
+ if (enable) {
+ regval &= ~AR_SWITCH_TABLE_COM2_ALL;
+ regval |= ah->config.ant_ctrl_comm2g_switch_enable;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
+ AR_SWITCH_TABLE_COM2_ALL, regval);
+ }
+
+ ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+
+ /*
+ * Set MAIN/ALT LNA conf.
+ * Set MAIN/ALT gain_tb.
+ */
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~AR_ANT_DIV_CTRL_ALL);
+ regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ if (AR_SREV_9485_11_OR_LATER(ah)) {
+ /*
+ * Enable LNA diversity.
+ */
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~AR_PHY_ANT_DIV_LNADIV;
+ regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+ if (enable)
+ regval |= AR_ANT_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ /*
+ * Enable fast antenna diversity.
+ */
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= ~AR_FAST_DIV_ENABLE;
+ regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+ if (enable)
+ regval |= AR_FAST_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_ALT_GAINTB |
+ AR_PHY_ANT_DIV_MAIN_GAINTB));
+ /*
+ * Set MAIN to LNA1 and ALT to LNA2 at the
+ * beginning.
+ */
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+ } else if (AR_SREV_9565(ah)) {
+ if (enable) {
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+ AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+ }
+}
+
+#endif
+
+static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *ini_reloaded)
+{
+ unsigned int regWrites = 0;
+ u32 modesIndex, txgain_index;
+
+ if (IS_CHAN_5GHZ(chan))
+ modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+ else
+ modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
+
+ txgain_index = AR_SREV_9531(ah) ? 1 : modesIndex;
+
+ if (modesIndex == ah->modes_index) {
+ *ini_reloaded = false;
+ goto set_rfmode;
+ }
+
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
+ modesIndex);
+
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, txgain_index, regWrites);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /*
+ * CUS217 mix LNA mode.
+ */
+ if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ 1, regWrites);
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ modesIndex, regWrites);
+ }
+ }
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
+
+ if (AR_SREV_9565(ah))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
+
+ /*
+ * JAPAN regulatory.
+ */
+ if (chan->channel == 2484)
+ ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
+
+ ah->modes_index = modesIndex;
+ *ini_reloaded = true;
+
+set_rfmode:
+ ar9003_hw_set_rfmode(ah, chan);
+ return 0;
+}
+
+static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ /* on AR93xx and newer, count = 0 will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless)
+ count = 0;
+ else if (param->count == 0)
+ count = 1;
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, 0x9864, 0x7f000);
+ REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+ REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+ REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+ REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+ REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+ static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ unsigned int i;
+
+ if (txpower <= MAX_RATE_POWER) {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
+ } else {
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = MAX_RATE_POWER;
+ }
+
+ REG_WRITE(ah, 0xa458, 0);
+
+ REG_WRITE(ah, 0xa3c0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+ REG_WRITE(ah, 0xa3c8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3cc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
+ REG_WRITE(ah, 0xa3d0,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+ REG_WRITE(ah, 0xa3d4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
+ REG_WRITE(ah, 0xa3e4,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
+ REG_WRITE(ah, 0xa3e8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
+ REG_WRITE(ah, 0xa3d8,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+ REG_WRITE(ah, 0xa3dc,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
+ REG_WRITE(ah, 0xa3ec,
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
+ ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+}
+
+static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)
+{
+ ah->tx_power[0] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[1] = rate_array[ALL_TARGET_LEGACY_1L_5L];
+ ah->tx_power[2] = min(rate_array[ALL_TARGET_LEGACY_1L_5L],
+ rate_array[ALL_TARGET_LEGACY_5S]);
+ ah->tx_power[3] = min(rate_array[ALL_TARGET_LEGACY_11L],
+ rate_array[ALL_TARGET_LEGACY_11S]);
+}
+
+static void ar9003_hw_init_txpower_ofdm(struct ath_hw *ah, u8 *rate_array,
+ int offset)
+{
+ int i, j;
+
+ for (i = offset; i < offset + AR9300_OFDM_RATES; i++) {
+ /* OFDM rate to power table idx */
+ j = ofdm2pwr[i - offset];
+ ah->tx_power[i] = rate_array[j];
+ }
+}
+
+static void ar9003_hw_init_txpower_ht(struct ath_hw *ah, u8 *rate_array,
+ int ss_offset, int ds_offset,
+ int ts_offset, bool is_40)
+{
+ int i, j, mcs_idx = 0;
+ const u8 *mcs2pwr = (is_40) ? mcs2pwr_ht40 : mcs2pwr_ht20;
+
+ for (i = ss_offset; i < ss_offset + AR9300_HT_SS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ds_offset; i < ds_offset + AR9300_HT_DS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+
+ for (i = ts_offset; i < ts_offset + AR9300_HT_TS_RATES; i++) {
+ j = mcs2pwr[mcs_idx];
+ ah->tx_power[i] = rate_array[j];
+ mcs_idx++;
+ }
+}
+
+static void ar9003_hw_init_txpower_stbc(struct ath_hw *ah, int ss_offset,
+ int ds_offset, int ts_offset)
+{
+ memcpy(&ah->tx_power_stbc[ss_offset], &ah->tx_power[ss_offset],
+ AR9300_HT_SS_RATES);
+ memcpy(&ah->tx_power_stbc[ds_offset], &ah->tx_power[ds_offset],
+ AR9300_HT_DS_RATES);
+ memcpy(&ah->tx_power_stbc[ts_offset], &ah->tx_power[ts_offset],
+ AR9300_HT_TS_RATES);
+}
+
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan)
+{
+ if (IS_CHAN_5GHZ(chan)) {
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NA_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NA_HT_SS_SHIFT,
+ AR9300_11NA_HT_DS_SHIFT,
+ AR9300_11NA_HT_TS_SHIFT);
+ }
+ } else {
+ ar9003_hw_init_txpower_cck(ah, rate_array);
+ ar9003_hw_init_txpower_ofdm(ah, rate_array,
+ AR9300_11NG_OFDM_SHIFT);
+ if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
+ ar9003_hw_init_txpower_ht(ah, rate_array,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT,
+ IS_CHAN_HT40(chan));
+ ar9003_hw_init_txpower_stbc(ah,
+ AR9300_11NG_HT_SS_SHIFT,
+ AR9300_11NG_HT_DS_SHIFT,
+ AR9300_11NG_HT_TS_SHIFT);
+ }
+ }
+}
+
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ static const u32 ar9300_cca_regs[6] = {
+ AR_PHY_CCA_0,
+ AR_PHY_CCA_1,
+ AR_PHY_CCA_2,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_1,
+ AR_PHY_EXT_CCA_2,
+ };
+
+ priv_ops->rf_set_freq = ar9003_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
+
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control_soc;
+ else
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
+
+ priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
+ priv_ops->init_bb = ar9003_hw_init_bb;
+ priv_ops->process_ini = ar9003_hw_process_ini;
+ priv_ops->set_rfmode = ar9003_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar9003_hw_rfbus_req;
+ priv_ops->rfbus_done = ar9003_hw_rfbus_done;
+ priv_ops->ani_control = ar9003_hw_ani_control;
+ priv_ops->do_getnf = ar9003_hw_do_getnf;
+ priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
+ priv_ops->set_radar_params = ar9003_hw_set_radar_params;
+ priv_ops->fast_chan_change = ar9003_hw_fast_chan_change;
+
+ ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
+ ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
+ ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
+#endif
+ ops->tx99_start = ar9003_hw_tx99_start;
+ ops->tx99_stop = ar9003_hw_tx99_stop;
+ ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
+
+ ar9003_hw_set_nf_limits(ah);
+ ar9003_hw_set_radar_conf(ah);
+ memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
+}
+
+/*
+ * Baseband Watchdog signatures:
+ *
+ * 0x04000539: BB hang when operating in HT40 DFS Channel.
+ * Full chip reset is not required, but a recovery
+ * mechanism is needed.
+ *
+ * 0x1300000a: Related to CAC deafness.
+ * Chip reset is not required.
+ *
+ * 0x0400000a: Related to CAC deafness.
+ * Full chip reset is required.
+ *
+ * 0x04000b09: RX state machine gets into an illegal state
+ * when a packet with unsupported rate is received.
+ * Full chip reset is required and PHY_RESTART has
+ * to be disabled.
+ *
+ * 0x04000409: Packet stuck on receive.
+ * Full chip reset is required for all chips except AR9340.
+ */
+
+/*
+ * ar9003_hw_bb_watchdog_check(): Returns true if a chip reset is required.
+ */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah)
+{
+ u32 val;
+
+ switch(ah->bb_watchdog_last_status) {
+ case 0x04000539:
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= (~AR_PHY_RADAR_0_FIRPWR);
+ val |= SM(0x7f, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+ udelay(1);
+ val = REG_READ(ah, AR_PHY_RADAR_0);
+ val &= ~AR_PHY_RADAR_0_FIRPWR;
+ val |= SM(AR9300_DFS_FIRPWR, AR_PHY_RADAR_0_FIRPWR);
+ REG_WRITE(ah, AR_PHY_RADAR_0, val);
+
+ return false;
+ case 0x1300000a:
+ return false;
+ case 0x0400000a:
+ case 0x04000b09:
+ return true;
+ case 0x04000409:
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+ return false;
+ else
+ return true;
+ default:
+ /*
+ * For any other unknown signatures, do a
+ * full chip reset.
+ */
+ return true;
+ }
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_check);
+
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms;
+ u32 val, idle_count;
+
+ if (!idle_tmo_ms) {
+ /* disable IRQ, disable chip-reset for BB panic */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) &
+ ~(AR_PHY_WATCHDOG_RST_ENABLE |
+ AR_PHY_WATCHDOG_IRQ_ENABLE));
+
+ /* disable watchdog in non-IDLE mode, disable in IDLE mode */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) &
+ ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+ AR_PHY_WATCHDOG_IDLE_ENABLE));
+
+ ath_dbg(common, RESET, "Disabled BB Watchdog\n");
+ return;
+ }
+
+ /* enable IRQ, disable chip-reset for BB watchdog */
+ val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK;
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+ (val | AR_PHY_WATCHDOG_IRQ_ENABLE) &
+ ~AR_PHY_WATCHDOG_RST_ENABLE);
+
+ /* bound limit to 10 secs */
+ if (idle_tmo_ms > 10000)
+ idle_tmo_ms = 10000;
+
+ /*
+ * The time unit for watchdog event is 2^15 44/88MHz cycles.
+ *
+ * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick
+ * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick
+ *
+ * Given we use fast clock now in 5 GHz, these time units should
+ * be common for both 2 GHz and 5 GHz.
+ */
+ idle_count = (100 * idle_tmo_ms) / 74;
+ if (ah->curchan && IS_CHAN_HT40(ah->curchan))
+ idle_count = (100 * idle_tmo_ms) / 37;
+
+ /*
+ * enable watchdog in non-IDLE mode, disable in IDLE mode,
+ * set idle time-out.
+ */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+ AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+ AR_PHY_WATCHDOG_IDLE_MASK |
+ (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
+
+ ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
+ idle_tmo_ms);
+}
+
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
+{
+ /*
+ * we want to avoid printing in ISR context so we save the
+ * watchdog status to be printed later in bottom half context.
+ */
+ ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS);
+
+ /*
+ * the watchdog timer should reset on status read but to be sure
+ * sure we write 0 to the watchdog status bit.
+ */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS,
+ ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR);
+}
+
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 status;
+
+ if (likely(!(common->debug_mask & ATH_DBG_RESET)))
+ return;
+
+ status = ah->bb_watchdog_last_status;
+ ath_dbg(common, RESET,
+ "\n==== BB update: BB status=0x%08x ====\n", status);
+ ath_dbg(common, RESET,
+ "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
+ MS(status, AR_PHY_WATCHDOG_INFO),
+ MS(status, AR_PHY_WATCHDOG_DET_HANG),
+ MS(status, AR_PHY_WATCHDOG_RADAR_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_AGC_SM),
+ MS(status, AR_PHY_WATCHDOG_SRCH_SM));
+
+ ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
+ ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
+ REG_READ(ah, AR_PHY_GEN_CTRL));
+
+#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
+ if (common->cc_survey.cycles)
+ ath_dbg(common, RESET,
+ "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
+ PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
+
+ ath_dbg(common, RESET, "==== BB update: done ====\n\n");
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
+
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
+{
+ u8 result;
+ u32 val;
+
+ /* While receiving unsupported rate frame rx state machine
+ * gets into a state 0xb and if phy_restart happens in that
+ * state, BB would go hang. If RXSM is in 0xb state after
+ * first bb panic, ensure to disable the phy_restart.
+ */
+ result = MS(ah->bb_watchdog_last_status, AR_PHY_WATCHDOG_RX_OFDM_SM);
+
+ if ((result == 0xb) || ah->bb_hang_rx_ofdm) {
+ ah->bb_hang_rx_ofdm = true;
+ val = REG_READ(ah, AR_PHY_RESTART);
+ val &= ~AR_PHY_RESTART_ENA;
+ REG_WRITE(ah, AR_PHY_RESTART, val);
+ }
+}
+EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 000000000..fc595b92a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,1326 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_PHY_H
+#define AR9003_PHY_H
+
+/*
+ * Channel Register Map
+ */
+#define AR_CHAN_BASE 0x9800
+
+#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
+#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
+#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
+#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
+#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
+#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
+#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
+#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
+#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
+#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+#define AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT 16
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
+
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
+
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
+
+#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
+
+#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
+#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
+#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
+
+#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
+#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
+#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
+#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
+#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
+#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
+
+#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
+#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
+#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
+#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
+
+/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
+#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
+#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
+#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
+
+#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
+#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
+#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
+
+/*
+ * Channel Field Definitions
+ */
+#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
+#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
+
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
+
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8
+#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_CYCPWR_THR1_S 9
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
+#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
+#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
+#define AR_PHY_TIMING5_RSSI_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
+#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
+#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
+#define AR_PHY_RADAR_LB_DC_CAP_S 23
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
+#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
+#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
+#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
+
+/*
+ * MRC Register Map
+ */
+#define AR_MRC_BASE 0x9c00
+
+#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
+#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
+#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
+#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
+#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
+#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
+#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
+#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
+#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
+
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B 0x00FE0000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S 17
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B 0x0001F000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S 12
+
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B 0x00FE0000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S 17
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B 0x0001F000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S 12
+
+
+/*
+ * MRC Feild Definitions
+ */
+#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_SGI_DSC_MAN_S 4
+#define AR_PHY_SGI_DSC_EXP 0x0000000F
+#define AR_PHY_SGI_DSC_EXP_S 0
+/*
+ * BBB Register Map
+ */
+#define AR_BBB_BASE 0x9d00
+
+/*
+ * AGC Register Map
+ */
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
+#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
+#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
+#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
+#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
+#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
+#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
+#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+
+/*
+ * Antenna Diversity settings
+ */
+#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_ANT_DIV_CTRL_ALL 0x7e000000
+#define AR_ANT_DIV_CTRL_ALL_S 25
+#define AR_ANT_DIV_ENABLE 0x1000000
+#define AR_ANT_DIV_ENABLE_S 24
+
+
+#define AR_PHY_ANT_FAST_DIV_BIAS 0x00007e00
+#define AR_PHY_ANT_FAST_DIV_BIAS_S 9
+#define AR_PHY_ANT_SW_RX_PROT 0x00800000
+#define AR_PHY_ANT_SW_RX_PROT_S 23
+#define AR_PHY_ANT_DIV_LNADIV 0x01000000
+#define AR_PHY_ANT_DIV_LNADIV_S 24
+#define AR_PHY_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
+
+#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
+#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
+#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
+#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
+#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
+#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
+#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
+#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
+#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+
+#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_FAST_DIV_ENABLE 0x2000
+#define AR_FAST_DIV_ENABLE_S 13
+
+#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
+#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
+
+#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+
+#define AR_PHY_MRC_CCK_CTRL (AR_AGC_BASE + 0x1d0)
+#define AR_PHY_MRC_CCK_ENABLE 0x00000001
+#define AR_PHY_MRC_CCK_ENABLE_S 0
+#define AR_PHY_MRC_CCK_MUX_REG 0x00000002
+#define AR_PHY_MRC_CCK_MUX_REG_S 1
+
+#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
+
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ -100
+
+#define AR_PHY_CCA_NOM_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ -60
+#define AR_PHY_CCA_NOM_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ -60
+
+#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+
+#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
+
+/*
+ * AGC Field Definitions
+ */
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+#define AR_PHY_MINCCA_PWR 0x1FF00000
+#define AR_PHY_MINCCA_PWR_S 20
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_EXT_CCA0_THRESH62_1 0x000001FF
+#define AR_PHY_EXT_CCA0_THRESH62_1_S 0
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP 0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S 22
+#define AR_PHY_AGC_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_COARSE_LOW_S 7
+#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_COARSE_HIGH_S 15
+#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
+#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
+#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
+#define AR_PHY_FIND_SIG_RELPWR_S 6
+#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
+#define AR_PHY_FIND_SIG_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+#define AR_PHY_RESTART_ENA 0x01
+#define AR_PHY_DC_RESTART_DIS 0x40000000
+
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
+#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
+#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
+
+/*
+ * SM Register Map
+ */
+#define AR_SM_BASE 0xa200
+
+#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
+#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
+#define AR_PHY_MODE (AR_SM_BASE + 0x8)
+#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
+#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
+#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
+#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
+#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
+#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
+#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
+#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
+#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
+#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
+
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
+#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
+#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
+#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
+#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
+#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
+#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
+#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
+#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
+#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
+#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
+#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
+#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
+#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
+#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
+#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
+#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+
+#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3
+#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0
+
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
+
+#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+
+#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
+#define AR_PHY_TEST_BBB_OBS_SEL_S 19
+
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
+
+#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
+#define AR_PHY_TEST_CHAIN_SEL_S 30
+
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164))
+#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
+#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
+#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
+#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
+#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
+#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
+#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
+#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
+
+
+#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
+
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
+#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
+#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
+#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
+
+#define AR_PHY_TPC_1 (AR_SM_BASE + 0x1f8)
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S 0
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA 0x00ff0000
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_12 (AR_SM_BASE + 0x224)
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5 0x3e000000
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S 25
+
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0x000000ff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE 0x0000ff00
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S 8
+
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_19_ALPHA_VOLT 0x001f0000
+#define AR_PHY_TPC_19_ALPHA_VOLT_S 16
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN 0x00000001
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S 0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S 1
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN 0x00000030
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S 4
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA 0x00003c00
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S 10
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB 0x0003c000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S 14
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC 0x003c0000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S 18
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND 0x00c00000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S 22
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL 0x01000000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S 24
+
+
+#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+
+#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
+
+#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+ 0x3c4 : 0x444))
+#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+ 0x3c8 : 0x448))
+#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+ 0x3c4 : 0x440))
+#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+ 0x3f0 : 0x48c))
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x3d0 : 0x450) + ((_i) << 2))
+#define AR_PHY_RTT_CTRL (AR_SM_BASE + 0x380)
+
+#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_WATCHDOG_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_WATCHDOG_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
+#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
+#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
+
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+#define AR_PHY_BB_THERM_ADC_3 (AR_SM_BASE + 0x250)
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN 0x0001ff00
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S 8
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET 0x000000ff
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S 0
+
+#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
+
+#define AR_PHY_65NM_CH0_TXRF3 0x16048
+#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e
+#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
+
+#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
+#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_SYNTH12 0x160ac
+#define AR_PHY_65NM_CH0_BIAS1 0x160c0
+#define AR_PHY_65NM_CH0_BIAS2 0x160c4
+#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
+#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH1_RXTX4 0x1650c
+#define AR_PHY_65NM_CH2_RXTX4 0x1690c
+
+#define AR_PHY_65NM_CH0_BB1 0x16140
+#define AR_PHY_65NM_CH0_BB2 0x16144
+#define AR_PHY_65NM_CH0_BB3 0x16148
+#define AR_PHY_65NM_CH1_BB1 0x16540
+#define AR_PHY_65NM_CH1_BB2 0x16544
+#define AR_PHY_65NM_CH1_BB3 0x16548
+#define AR_PHY_65NM_CH2_BB1 0x16940
+#define AR_PHY_65NM_CH2_BB2 0x16944
+#define AR_PHY_65NM_CH2_BB3 0x16948
+
+#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000
+#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S 2
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008
+#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3
+
+#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
+ (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
+#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
+#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
+
+#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
+ ((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
+#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
+#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
+#define AR_CH0_THERM_XPASHORT2GND 0x4
+#define AR_CH0_THERM_XPASHORT2GND_S 2
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
+#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
+#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
+
+#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
+#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
+#define AR_PHY_65NM_CH0_THERM_START 0x20000000
+#define AR_PHY_65NM_CH0_THERM_START_S 29
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+
+#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
+ (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
+#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
+#define AR_CH0_TOP2_XPABIASLVL_S 12
+
+#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
+ (AR_SREV_9561(ah) ? 0x162c0 : 0x16290)))
+#define AR_CH0_XTAL_CAPINDAC 0x7f000000
+#define AR_CH0_XTAL_CAPINDAC_S 24
+#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
+#define AR_CH0_XTAL_CAPOUTDAC_S 17
+
+#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \
+ (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40))
+#define AR_PHY_PMU1_PWD 0x1
+#define AR_PHY_PMU1_PWD_S 0
+
+#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \
+ (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44))
+#define AR_PHY_PMU2_PGM 0x00200000
+#define AR_PHY_PMU2_PGM_S 21
+
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
+#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
+#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
+#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
+#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
+#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
+#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
+#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
+#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
+
+/*
+ * SM Field Definitions
+ */
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
+
+#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
+#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
+
+#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
+#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
+#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
+#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
+#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
+#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
+#define AR_PHY_GC_DYN2040_PRI_CH_S 4
+#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
+#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
+#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
+#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
+#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
+#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
+#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+#define AR_PHY_MODE_OFDM 0x00000000
+#define AR_PHY_MODE_CCK 0x00000001
+#define AR_PHY_MODE_DYNAMIC 0x00000004
+#define AR_PHY_MODE_DYNAMIC_S 2
+#define AR_PHY_MODE_HALF 0x00000020
+#define AR_PHY_MODE_QUARTER 0x00000040
+#define AR_PHY_MAC_CLK_MODE 0x00000080
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
+#define AR_PHY_MODE_SVD_HALF 0x00000200
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCE_S 0
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x0FFF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x10000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 28
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY 0x20000000
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY_S 29
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5 0x40000000
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5_S 30
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT 0x80000000
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT_S 31
+
+#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
+#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
+#define AR_PHY_RTT_CTRL_RESTORE_MASK 0x0000007E
+#define AR_PHY_RTT_CTRL_RESTORE_MASK_S 1
+#define AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE 0x00000080
+#define AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE_S 7
+#define AR_PHY_RTT_SW_RTT_TABLE_ACCESS 0x00000001
+#define AR_PHY_RTT_SW_RTT_TABLE_ACCESS_S 0
+#define AR_PHY_RTT_SW_RTT_TABLE_WRITE 0x00000002
+#define AR_PHY_RTT_SW_RTT_TABLE_WRITE_S 1
+#define AR_PHY_RTT_SW_RTT_TABLE_ADDR 0x0000001C
+#define AR_PHY_RTT_SW_RTT_TABLE_ADDR_S 2
+#define AR_PHY_RTT_SW_RTT_TABLE_DATA 0xFFFFFFF0
+#define AR_PHY_RTT_SW_RTT_TABLE_DATA_S 4
+#define AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL_S 31
+#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
+#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
+#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
+#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
+
+#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
+#define AR_PHY_CALIBRATED_GAINS_0 0x3e
+#define AR_PHY_CALIBRATED_GAINS_0_S 1
+
+#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE_S 0
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x0fffc000
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 14
+
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR 0x20000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S 29
+
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
+
+/*
+ * Channel 1 Register Map
+ */
+#define AR_CHAN1_BASE 0xa800
+
+#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
+
+/*
+ * Channel 1 Field Definitions
+ */
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+/*
+ * AGC 1 Register Map
+ */
+#define AR_AGC1_BASE 0xae00
+
+#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
+#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
+#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
+#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
+
+/*
+ * AGC 1 Field Definitions
+ */
+#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH1_MINCCA_PWR_S 20
+
+/*
+ * SM 1 Register Map
+ */
+#define AR_SM1_BASE 0xb200
+
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
+ 0x280 : 0x240))
+#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0
+#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
+
+#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \
+ AR_SM1_BASE : AR_SM_BASE))
+#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \
+ AR_SM1_BASE : AR_SM_BASE))
+/*
+ * Channel 2 Register Map
+ */
+#define AR_CHAN2_BASE 0xb800
+
+#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
+
+/*
+ * Channel 2 Field Definitions
+ */
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
+/*
+ * AGC 2 Register Map
+ */
+#define AR_AGC2_BASE 0xbe00
+
+#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
+#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
+#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
+
+/*
+ * AGC 2 Field Definitions
+ */
+#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH2_MINCCA_PWR_S 20
+
+/*
+ * SM 2 Register Map
+ */
+#define AR_SM2_BASE 0xc200
+
+#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
+#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
+#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
+#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
+#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
+#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_TPC_19_B2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2))
+
+#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
+
+/*
+ * AGC 3 Register Map
+ */
+#define AR_AGC3_BASE 0xce00
+
+#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+
+/* GLB Registers */
+#define AR_GLB_BASE 0x20000
+#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
+#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
+#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
+ (AR_SREV_9462_20_OR_LATER(_ah) ? 0x4c : 0x50))
+#define AR_GLB_STATUS (AR_GLB_BASE + 0x48)
+
+/*
+ * Misc helper defines
+ */
+#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
+
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_WATCHDOG_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_WATCHDOG_IDLE_ENABLE 0x00000002
+#define AR_PHY_WATCHDOG_IDLE_MASK 0xFFFF0000
+#define AR_PHY_WATCHDOG_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_WATCHDOG_RST_ENABLE 0x00000002
+#define AR_PHY_WATCHDOG_IRQ_ENABLE 0x00000004
+#define AR_PHY_WATCHDOG_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_WATCHDOG_INFO 0x00000007
+#define AR_PHY_WATCHDOG_INFO_S 0
+#define AR_PHY_WATCHDOG_DET_HANG 0x00000008
+#define AR_PHY_WATCHDOG_DET_HANG_S 3
+#define AR_PHY_WATCHDOG_RADAR_SM 0x000000F0
+#define AR_PHY_WATCHDOG_RADAR_SM_S 4
+#define AR_PHY_WATCHDOG_RX_OFDM_SM 0x00000F00
+#define AR_PHY_WATCHDOG_RX_OFDM_SM_S 8
+#define AR_PHY_WATCHDOG_RX_CCK_SM 0x0000F000
+#define AR_PHY_WATCHDOG_RX_CCK_SM_S 12
+#define AR_PHY_WATCHDOG_TX_OFDM_SM 0x000F0000
+#define AR_PHY_WATCHDOG_TX_OFDM_SM_S 16
+#define AR_PHY_WATCHDOG_TX_CCK_SM 0x00F00000
+#define AR_PHY_WATCHDOG_TX_CCK_SM_S 20
+#define AR_PHY_WATCHDOG_AGC_SM 0x0F000000
+#define AR_PHY_WATCHDOG_AGC_SM_S 24
+#define AR_PHY_WATCHDOG_SRCH_SM 0xF0000000
+#define AR_PHY_WATCHDOG_SRCH_SM_S 28
+
+#define AR_PHY_WATCHDOG_STATUS_CLR 0x00000008
+
+/*
+ * PAPRD registers
+ */
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+
+#define AR_PHY_PAPRD_AM2AM (AR_CHAN_BASE + 0xe4)
+#define AR_PHY_PAPRD_AM2AM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2AM_MASK_S 0
+
+#define AR_PHY_PAPRD_AM2PM (AR_CHAN_BASE + 0xe8)
+#define AR_PHY_PAPRD_AM2PM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2PM_MASK_S 0
+
+#define AR_PHY_PAPRD_HT40 (AR_CHAN_BASE + 0xec)
+#define AR_PHY_PAPRD_HT40_MASK 0x01ffffff
+#define AR_PHY_PAPRD_HT40_MASK_S 0
+
+#define AR_PHY_PAPRD_CTRL0_B0 (AR_CHAN_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B1 (AR_CHAN1_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B2 (AR_CHAN2_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE 0x00000001
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S 0
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK 0x00000002
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S 1
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH 0xf8000000
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S 27
+
+#define AR_PHY_PAPRD_CTRL1_B0 (AR_CHAN_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B1 (AR_CHAN1_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B2 (AR_CHAN2_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA 0x00000001
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S 0
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE 0x00000002
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S 1
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE 0x00000004
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S 2
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL 0x000001f8
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S 3
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK 0x0001fe00
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S 9
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
+
+#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490))
+
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S 1
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE 0x00000100
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S 8
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE 0x00000200
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S 9
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE 0x00000400
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S 10
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE 0x00000800
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S 11
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
+
+#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494))
+
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
+
+#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498))
+
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S 6
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL 0x0001f000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES 0x000e0000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S 17
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN 0x00f00000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S 20
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN 0x0f000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S 24
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
+
+#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c))
+
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR 0x00000fff
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S 0
+
+#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0 (AR_CHAN_BASE + 0x100)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0 (AR_CHAN_BASE + 0x104)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0 (AR_CHAN_BASE + 0x108)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0 (AR_CHAN_BASE + 0x10c)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0 (AR_CHAN_BASE + 0x110)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0 (AR_CHAN_BASE + 0x114)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0 (AR_CHAN_BASE + 0x118)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0 (AR_CHAN_BASE + 0x11c)
+#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
+#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
+
+#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0))
+
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S 1
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR 0x00000004
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S 2
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE 0x00000008
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S 3
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX 0x000001f0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S 4
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
+
+#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4))
+
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S 16
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
+
+#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8))
+
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
+
+#define AR_PHY_PAPRD_MEM_TAB_B0 (AR_CHAN_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B1 (AR_CHAN1_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B2 (AR_CHAN2_BASE + 0x120)
+
+#define AR_PHY_PA_GAIN123_B0 (AR_CHAN_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B1 (AR_CHAN1_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B2 (AR_CHAN2_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_PA_GAIN1 0x3FF
+#define AR_PHY_PA_GAIN123_PA_GAIN1_S 0
+
+#define AR_PHY_POWERTX_RATE5 (AR_SM_BASE + 0x1d0)
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
+
+#define AR_PHY_POWERTX_RATE6 (AR_SM_BASE + 0x1d4)
+#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5 0x3F00
+#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5_S 8
+
+#define AR_PHY_POWERTX_RATE8 (AR_SM_BASE + 0x1dc)
+#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5 0x3F00
+#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5_S 8
+
+#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
+#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
+
+#define AR_BTCOEX_WL_LNADIV 0x1a64
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD 0x00003FFF
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S 0
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY 0x00004000
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S 14
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON 0x00008000
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S 15
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION 0x00030000
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S 16
+#define AR_BTCOEX_WL_LNADIV_MODE 0x007c0000
+#define AR_BTCOEX_WL_LNADIV_MODE_S 18
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ 0x00800000
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S 23
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE 0x01000000
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S 24
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT 0x02000000
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
+
+/* Manual Peak detector calibration */
+#define AR_PHY_65NM_BASE 0x16000
+#define AR_PHY_65NM_RXRF_GAINSTAGES(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0x8)
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE 0x80000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE_S 31
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC 0x00000002
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC_S 1
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR 0x70000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_S 28
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR 0x03800000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_S 23
+
+#define AR_PHY_65NM_RXTX2(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0x104)
+#define AR_PHY_65NM_RXTX2_RXON_OVR 0x00001000
+#define AR_PHY_65NM_RXTX2_RXON_OVR_S 12
+#define AR_PHY_65NM_RXTX2_RXON 0x00000800
+#define AR_PHY_65NM_RXTX2_RXON_S 11
+
+#define AR_PHY_65NM_RXRF_AGC(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0xc)
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE 0x80000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE_S 31
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR 0x40000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR_S 30
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR 0x20000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR_S 29
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR 0x1E000000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR_S 25
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR 0x00078000
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR_S 15
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR 0x01F80000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR_S 19
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR 0x00007e00
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR_S 9
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT 0x00000004
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S 2
+
+#define AR9300_DFS_FIRPWR -28
+
+#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
new file mode 100644
index 000000000..e4d11fa7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
+
+#define RTT_RESTORE_TIMEOUT 1000
+#define RTT_ACCESS_TIMEOUT 100
+#define RTT_BAD_VALUE 0x0bad0bad
+
+/*
+ * RTT (Radio Retention Table) hardware implementation information
+ *
+ * There is an internal table (i.e. the rtt) for each chain (or bank).
+ * Each table contains 6 entries and each entry is corresponding to
+ * a specific calibration parameter as depicted below.
+ * 0~2 - DC offset DAC calibration: loop, low, high (offsetI/Q_...)
+ * 3 - Filter cal (filterfc)
+ * 4 - RX gain settings
+ * 5 - Peak detector offset calibration (agc_caldac)
+ */
+
+void ar9003_hw_rtt_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RTT_CTRL, 1);
+}
+
+void ar9003_hw_rtt_disable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RTT_CTRL, 0);
+}
+
+void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask)
+{
+ REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_RESTORE_MASK, rtt_mask);
+}
+
+bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
+{
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE,
+ 0, RTT_RESTORE_TIMEOUT))
+ return false;
+
+ REG_RMW_FIELD(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE, 1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_CTRL,
+ AR_PHY_RTT_CTRL_FORCE_RADIO_RESTORE,
+ 0, RTT_RESTORE_TIMEOUT))
+ return false;
+
+ return true;
+}
+
+static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
+ u32 index, u32 data28)
+{
+ u32 val;
+
+ val = SM(data28, AR_PHY_RTT_SW_RTT_TABLE_DATA);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain), val);
+
+ val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) |
+ SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE) |
+ SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT))
+ return;
+
+ val &= ~SM(1, AR_PHY_RTT_SW_RTT_TABLE_WRITE);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT);
+}
+
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
+{
+ int chain, i;
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i,
+ ah->caldata->rtt_table[chain][i]);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Load RTT value at idx %d, chain %d: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
+}
+
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+ int agc, caldac;
+
+ if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ return;
+
+ if ((index != 5) || (chain >= 2))
+ return;
+
+ agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+ if (!agc)
+ return;
+
+ caldac = ah->caldata->caldac[chain];
+ ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+ caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+ ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
+static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
+{
+ u32 val;
+
+ val = SM(0, AR_PHY_RTT_SW_RTT_TABLE_ACCESS) |
+ SM(0, AR_PHY_RTT_SW_RTT_TABLE_WRITE) |
+ SM(index, AR_PHY_RTT_SW_RTT_TABLE_ADDR);
+
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ val |= SM(1, AR_PHY_RTT_SW_RTT_TABLE_ACCESS);
+ REG_WRITE(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain), val);
+ udelay(1);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_RTT_TABLE_SW_INTF_B(chain),
+ AR_PHY_RTT_SW_RTT_TABLE_ACCESS, 0,
+ RTT_ACCESS_TIMEOUT))
+ return RTT_BAD_VALUE;
+
+ val = MS(REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain)),
+ AR_PHY_RTT_SW_RTT_TABLE_DATA);
+
+
+ return val;
+}
+
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
+{
+ int chain, i;
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ah->caldata->rtt_table[chain][i] =
+ ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+ ar9003_hw_patch_rtt(ah, i, chain);
+
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "RTT value at idx %d, chain %d is: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
+
+ set_bit(RTT_DONE, &ah->caldata->cal_flags);
+}
+
+void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
+{
+ int chain, i;
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.rx_chainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
+ }
+
+ if (ah->caldata)
+ clear_bit(RTT_DONE, &ah->caldata->cal_flags);
+}
+
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ bool restore;
+
+ if (!ah->caldata)
+ return false;
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+ if (IS_CHAN_2GHZ(chan)){
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[0]);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ ah->caldata->caldac[1]);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ }
+
+ if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
+ return false;
+
+ ar9003_hw_rtt_enable(ah);
+
+ if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+ ar9003_hw_rtt_set_mask(ah, 0x30);
+ else
+ ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (!ath9k_hw_rfbus_req(ah)) {
+ ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
+ restore = false;
+ goto fail;
+ }
+
+ ar9003_hw_rtt_load_hist(ah);
+ restore = ar9003_hw_rtt_force_restore(ah);
+
+fail:
+ ath9k_hw_rfbus_done(ah);
+ ar9003_hw_rtt_disable(ah);
+ return restore;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
new file mode 100644
index 000000000..6290467a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_RTT_H
+#define AR9003_RTT_H
+
+#ifdef CONFIG_ATH9K_PCOEM
+void ar9003_hw_rtt_enable(struct ath_hw *ah);
+void ar9003_hw_rtt_disable(struct ath_hw *ah);
+void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
+bool ar9003_hw_rtt_force_restore(struct ath_hw *ah);
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah);
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah);
+void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan);
+#else
+static inline void ar9003_hw_rtt_enable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_disable(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask)
+{
+}
+
+static inline bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
+{
+ return false;
+}
+
+static inline void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
+{
+}
+
+static inline void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
+{
+}
+
+static inline bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
new file mode 100644
index 000000000..bea41df9f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "reg_wow.h"
+#include "hw-ops.h"
+
+static void ath9k_hw_set_sta_powersave(struct ath_hw *ah)
+{
+ if (!ath9k_hw_mci_is_enabled(ah))
+ goto set;
+ /*
+ * If MCI is being used, set PWR_SAV only when MCI's
+ * PS state is disabled.
+ */
+ if (ar9003_mci_state(ah, MCI_STATE_GET_WLAN_PS_STATE) != MCI_PS_DISABLE)
+ return;
+set:
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+}
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath9k_hw_set_sta_powersave(ah);
+
+ /* set rx disable bit */
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
+ REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
+ } else if (AR_SREV_9485(ah)){
+ if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
+ AR_GEN_TIMERS2_MODE_ENABLE_MASK))
+ REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
+ }
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+ u32 ctl[13] = {0};
+ u32 data_word[KAL_NUM_DATA_WORDS];
+ u8 i;
+ u32 wow_ka_data_word0;
+
+ memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+ memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+ /* set the transmit buffer */
+ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+ ctl[1] = 0;
+ ctl[4] = 0;
+ ctl[7] = (ah->txchainmask) << 2;
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
+
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ctl[3] = 0x1b; /* CCK_1M */
+ else
+ ctl[3] = 0xb; /* OFDM_6M */
+
+ for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+ (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+ data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+ (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+ data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+ (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+ data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
+ /*
+ * AR9462 2.0 and AR9565 have an extra descriptor word
+ * (time based discard) compared to other chips.
+ */
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+ wow_ka_data_word0 = AR_WOW_TXBUF(13);
+ } else {
+ wow_ka_data_word0 = AR_WOW_TXBUF(12);
+ }
+
+ for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+ REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+}
+
+int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
+{
+ int i;
+ u32 pattern_val, mask_val;
+ u32 set, clr;
+
+ if (pattern_count >= ah->wow.max_patterns)
+ return -ENOSPC;
+
+ if (pattern_count < MAX_NUM_PATTERN_LEGACY)
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+ else
+ REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
+
+ for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+ memcpy(&pattern_val, user_pattern, 4);
+ REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+ pattern_val);
+ user_pattern += 4;
+ }
+
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+ memcpy(&mask_val, user_mask, 4);
+ REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+ user_mask += 4;
+ }
+
+ if (pattern_count < MAX_NUM_PATTERN_LEGACY)
+ ah->wow.wow_event_mask |=
+ BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+ else
+ ah->wow.wow_event_mask2 |=
+ BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
+
+ if (pattern_count < 4) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN1_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH1_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+ } else if (pattern_count < 8) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN2_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH2_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ } else if (pattern_count < 12) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN3_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH3_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
+ } else if (pattern_count < MAX_NUM_PATTERN) {
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN4_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH4_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ u32 wow_status = 0;
+ u32 val = 0, rval;
+
+ /*
+ * Read the WoW status register to know
+ * the wakeup reason.
+ */
+ rval = REG_READ(ah, AR_WOW_PATTERN);
+ val = AR_WOW_STATUS(rval);
+
+ /*
+ * Mask only the WoW events that we have enabled. Sometimes
+ * we have spurious WoW events from the AR_WOW_PATTERN
+ * register. This mask will clean it up.
+ */
+ val &= ah->wow.wow_event_mask;
+
+ if (val) {
+ if (val & AR_WOW_MAGIC_PAT_FOUND)
+ wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+ if (AR_WOW_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+ if (val & AR_WOW_KEEP_ALIVE_FAIL)
+ wow_status |= AH_WOW_LINK_CHANGE;
+ if (val & AR_WOW_BEACON_FAIL)
+ wow_status |= AH_WOW_BEACON_MISS;
+ }
+
+ rval = REG_READ(ah, AR_MAC_PCU_WOW4);
+ val = AR_WOW_STATUS2(rval);
+ val &= ah->wow.wow_event_mask2;
+
+ if (val) {
+ if (AR_WOW2_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+ }
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip to
+ * generate next wow signal.
+ * disable D3 before accessing other registers ?
+ */
+
+ /* do we need to check the bit value 0x01000000 (7-10) ?? */
+ REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ AR_PMCTRL_PWR_STATE_D1D3);
+
+ /*
+ * Clear all events.
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN,
+ AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+ REG_WRITE(ah, AR_MAC_PCU_WOW4,
+ AR_WOW_CLEAR_EVENTS2(REG_READ(ah, AR_MAC_PCU_WOW4)));
+
+ /*
+ * restore the beacon threshold to init value
+ */
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ /*
+ * Restore the way the PCI-E reset, Power-On-Reset, external
+ * PCIE_POR_SHORT pins are tied to its original value.
+ * Previously just before WoW sleep, we untie the PCI-E
+ * reset to our Chip's Power On Reset so that any PCI-E
+ * reset from the bus will not reset our chip
+ */
+ if (ah->is_pciexpress)
+ ath9k_hw_configpcipowersave(ah, false);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah) || AR_SREV_9485(ah)) {
+ u32 dc = REG_READ(ah, AR_DIRECT_CONNECT);
+
+ if (!(dc & AR_DC_TSF2_ENABLE))
+ ath9k_hw_gen_timer_start_tsf2(ah);
+ }
+
+ ah->wow.wow_event_mask = 0;
+ ah->wow.wow_event_mask2 = 0;
+
+ return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
+{
+ u32 wa_reg;
+
+ if (!ah->is_pciexpress)
+ return;
+
+ /*
+ * We need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
+ */
+ wa_reg = REG_READ(ah, AR_WA);
+ wa_reg &= ~AR_WA_UNTIE_RESET_EN;
+ wa_reg |= AR_WA_RESET_EN;
+ wa_reg |= AR_WA_POR_SHORT;
+
+ REG_WRITE(ah, AR_WA, wa_reg);
+}
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 keep_alive, magic_pattern, host_pm_ctrl;
+
+ wow_event_mask = ah->wow.wow_event_mask;
+
+ /*
+ * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
+ * space and allow MAC to generate WoW anyway.
+ *
+ * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
+ *
+ * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
+ * needs to be set for WoW in PCI mode.
+ *
+ * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
+ *
+ * Set the power states appropriately and enable PME.
+ *
+ * Set and clear WOW_PME_CLEAR for the chip
+ * to generate next wow signal.
+ */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET |
+ AR_PMCTRL_WOW_PME_CLR);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
+
+ /*
+ * Random Backoff.
+ *
+ * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
+ * contention window. For value N,
+ * the random backoff will be selected between
+ * 0 and (2 ^ N) - 1.
+ */
+ REG_SET_BIT(ah, AR_WOW_PATTERN,
+ AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
+
+ /*
+ * AIFS time, Slot time, Keep Alive count.
+ */
+ REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
+ /*
+ * Beacon timeout.
+ */
+ if (pattern_enable & AH_WOW_BEACON_MISS)
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
+ else
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
+
+ /*
+ * Keep alive timeout in ms.
+ */
+ if (!pattern_enable)
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
+ else
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
+
+ /*
+ * Keep alive delay in us.
+ */
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
+
+ /*
+ * Create keep alive pattern to respond to beacons.
+ */
+ ath9k_wow_create_keep_alive_pattern(ah);
+
+ /*
+ * Configure keep alive register.
+ */
+ keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
+
+ /* Send keep alive timeouts anyway */
+ keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+ if (pattern_enable & AH_WOW_LINK_CHANGE) {
+ keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+ } else {
+ keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
+ }
+
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
+
+ /*
+ * We are relying on a bmiss failure, ensure we have
+ * enough threshold to prevent false positives.
+ */
+ REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+ AR_WOW_BMISSTHRESHOLD);
+
+ if (pattern_enable & AH_WOW_BEACON_MISS) {
+ wow_event_mask |= AR_WOW_BEACON_FAIL;
+ REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
+ } else {
+ REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
+ }
+
+ /*
+ * Enable the magic packet registers.
+ */
+ magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
+ magic_pattern |= AR_WOW_MAC_INTR_EN;
+
+ if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+ magic_pattern |= AR_WOW_MAGIC_EN;
+ wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+ } else {
+ magic_pattern &= ~AR_WOW_MAGIC_EN;
+ }
+
+ REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
+
+ /*
+ * Enable pattern matching for packets which are less
+ * than 256 bytes.
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
+
+ /*
+ * Set the power states appropriately and enable PME.
+ */
+ host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
+ host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
+ AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+ host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
+
+ if (AR_SREV_9462(ah)) {
+ /*
+ * This is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS.
+ */
+ host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
+ host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ }
+
+ REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
+
+ /*
+ * Enable sequence number generation when asleep.
+ */
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ /* To bring down WOW power low margin */
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
+
+ ath9k_hw_wow_set_arwr_reg(ah);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ /* HW WoW */
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
+
+ ath9k_hw_set_powermode_wow_sleep(ah);
+ ah->wow.wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
new file mode 100644
index 000000000..2c42ff05e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9330_1P1_H
+#define INITVALS_9330_1P1_H
+
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_modes_high_power_tx_gain_1p1 ar9331_modes_lowest_ob_db_tx_gain_1p1
+
+static const u32 ar9331_1p1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
+ {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
+ {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
+ {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+};
+
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
+ {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
+ {0x0000a2e4, 0xff43e000, 0xff43e000, 0xff43e000, 0xff43e000},
+ {0x0000a2e8, 0xfffc0000, 0xfffc0000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3d001620, 0x3d001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x3f001621, 0x3f001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x42001640, 0x42001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x44001641, 0x44001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x46001642, 0x46001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49001644, 0x49001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4c001a81, 0x4c001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4f001a83, 0x4f001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x52001c84, 0x52001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001ce3, 0x55001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x59001ce5, 0x59001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5d001ce9, 0x5d001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x64001eec, 0x64001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x64001eec, 0x64001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+};
+
+static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
+ {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
+ {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d4, 0x000050d4},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
+ {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+};
+
+static const u32 ar9331_1p1_xtal_25M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000002f8},
+ {0x00008244, 0x0010f3d7},
+ {0x0000824c, 0x0001e7ae},
+ {0x0001609c, 0x0f508f29},
+};
+
+static const u32 ar9331_1p1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016044, 0x03db62db},
+ {0x00016048, 0x6c924268},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081c},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd411eb84},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160ac, 0x24651800},
+ {0x000160b0, 0x03284f3e},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c300},
+ {0x000160d0, 0x14500820},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x50804000},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01001015},
+ {0x00016284, 0x14d20000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+};
+
+static const u32 ar9331_1p1_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
+};
+
+static const u32 ar9331_common_wo_xlna_rx_gain_1p1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9331_1p1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261800},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x210d0401},
+ {0x0000a3a0, 0xab9a7144},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0x3c3c003d},
+ {0x0000a3ac, 0x30310030},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
+
+static const u32 ar9331_1p1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9331_1p1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000002f8},
+};
+
+static const u32 ar9331_1p1_xtal_40M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000004c2},
+ {0x00008244, 0x0010f400},
+ {0x0000824c, 0x0001e800},
+ {0x0001609c, 0x0b283f31},
+};
+
+static const u32 ar9331_1p1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008248, 0x00000800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9331_common_rx_gain_1p1[][2] = {
+ /* Addr allmodes */
+ {0x00009e18, 0x05000000},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
+ {0x00000000},
+ {0x00000003},
+ {0x00000000},
+ {0x00000000},
+};
+
+#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
new file mode 100644
index 000000000..2154efcd3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9330_1P2_H
+#define INITVALS_9330_1P2_H
+
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
+
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
+
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
+
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
+
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
+ {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+};
+
+static const u32 ar9331_1p2_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016044, 0x03d6d2db},
+ {0x00016048, 0x6c924268},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081c},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd411eb84},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160ac, 0x24651800},
+ {0x000160b0, 0x03284f3e},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c300},
+ {0x000160d0, 0x14500820},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804000},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x14d20000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+};
+
+static const u32 ar9331_1p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261800},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
+
+static const u32 ar9331_1p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9331_common_rx_gain_1p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
new file mode 100644
index 000000000..b995ffe88
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -0,0 +1,1298 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9340_H
+#define INITVALS_9340_H
+
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
+
+#define ar9340Common_rx_gain_table_1p0 ar9300Common_rx_gain_table_2p2
+
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9340_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9340_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
+static const u32 ar9340_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
+};
+
+static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+};
+
+static const u32 ar9340_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016044, 0x03b6d2db},
+ {0x00016048, 0x24925266},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x6db6db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081c},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x00016098, 0xd411eb84},
+ {0x0001609c, 0x03e47f32},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160ac, 0xa4646800},
+ {0x000160b0, 0x00fe7f46},
+ {0x000160b4, 0x92480000},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6db6c},
+ {0x000160d0, 0xb6da4924},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016140, 0x50804008},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x15530000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x00016444, 0x03b6d2db},
+ {0x00016448, 0x24927266},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x6db6db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x04cb0001},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x0001650c, 0x00000000},
+ {0x00016540, 0x50804008},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x000080c0},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+};
+
+static const u32 ar9340_1p0_radio_core_40M[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x02566f3a},
+ {0x000160ac, 0xa4647c00},
+ {0x000160b0, 0x01885f5a},
+ {0x00008244, 0x0010f400},
+ {0x0000824c, 0x0001e800},
+};
+
+static const u32 ar9340_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x09103881, 0x09103881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003ec0, 0x00003ec4, 0x00003ec4, 0x00003ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9340_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x3280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261800},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+};
+
+static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x02000001, 0x02000001},
+ {0x0000a508, 0x09002421, 0x09002421, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000203, 0x11000203},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x18000403, 0x18000403},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x25000820, 0x25000820},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x29000822, 0x29000822},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38000849, 0x38000849},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x52001290, 0x52001290},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x02800001, 0x02800001},
+ {0x0000a588, 0x09802421, 0x09802421, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800203, 0x11800203},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
+ {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
+ {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+};
+
+static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
+};
+
+static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
+};
+
+static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+};
+
+static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+};
+
+static const u32 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x0000a394, 0x00000444, 0x00000444, 0x00000404, 0x00000404},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x02000001, 0x02000001},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000203, 0x11000203},
+ {0x0000a518, 0x21002220, 0x21002220, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x18000403, 0x18000403},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x34022225, 0x34022225, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x25000820, 0x25000820},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x29000822, 0x29000822},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38000849, 0x38000849},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x52001290, 0x52001290},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x02800001, 0x02800001},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800203, 0x11800203},
+ {0x0000a598, 0x21820220, 0x21820220, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404501, 0x01404501},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x01404501, 0x01404501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x03c0cf02, 0x03c0cf02},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03c0cf03, 0x03c0cf03},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x05419405, 0x05419405},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x00016044, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
+ {0x00016444, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+};
+
+static const u32 ar9340_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008010, 0x00080800},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f3d7},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e7ae},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000101ff},
+};
+
+static const u32 ar9340_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9340_cus227_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2c022220, 0x2c022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x30022222, 0x30022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x35022225, 0x35022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3b02222a, 0x3b02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3f02222c, 0x3f02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x0000a3a4, 0x00000011, 0x00000011, 0x00000011, 0x00000011},
+ {0x0000a3a8, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c},
+ {0x0000a3ac, 0x30303030, 0x30303030, 0x30303030, 0x30303030},
+};
+
+#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
new file mode 100644
index 000000000..1b6b4d0cf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -0,0 +1,1250 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_2P0_H
+#define INITVALS_9462_2P0_H
+
+/* AR9462 2.0 */
+
+#define ar9462_2p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9462_2p0_common_wo_xlna_rx_gain ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9462_2p0_common_5g_xlna_only_rxgain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9462_2p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_2p0_common_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p0_pciephy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18213ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003780c},
+};
+
+static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9462_2p0_modes_low_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
+};
+
+static const u32 ar9462_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009bf0, 0x80000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x15262820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x0a193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x07000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00002037},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b0, 0x0000000a},
+ {0x0000a6b4, 0x00512c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a7f0, 0x80000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000abf0, 0x80000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b6b0, 0x0000000a},
+ {0x0000b6b4, 0x00000001},
+};
+
+static const u32 ar9462_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+};
+
+static const u32 ar9462_2p0_modes_mix_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+};
+
+static const u32 ar9462_2p0_modes_high_ob_db_tx_gain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+ {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+ {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d820001},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x2699e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x000c0000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x1203040b},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc491},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x0de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841418},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1005080},
+ {0x00016294, 0x00000020},
+ {0x00016298, 0x54a82900},
+ {0x00016340, 0x121e4276},
+ {0x00016344, 0x00300000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016410, 0x6c800001},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x4699e04f},
+ {0x00016450, 0x6db6db6c},
+ {0x00016500, 0x3fffbe04},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00200400},
+ {0x00016510, 0x00000000},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+};
+
+static const u32 ar9462_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9462_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000e0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00080000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486e00},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x99c00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_2p0_common_mixed_rx_gain[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+};
+
+static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
+ /* Addr allmodes */
+ {0x00009fd0, 0x0a2d6b93},
+};
+
+static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
+};
+
+#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
new file mode 100644
index 000000000..dc3adda46
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_2P1_H
+#define INITVALS_9462_2P1_H
+
+/* AR9462 2.1 */
+
+#define ar9462_2p1_mac_postamble ar9462_2p0_mac_postamble
+
+#define ar9462_2p1_baseband_core ar9462_2p0_baseband_core
+
+#define ar9462_2p1_radio_core ar9462_2p0_radio_core
+
+#define ar9462_2p1_radio_postamble ar9462_2p0_radio_postamble
+
+#define ar9462_2p1_soc_postamble ar9462_2p0_soc_postamble
+
+#define ar9462_2p1_radio_postamble_sys2ant ar9462_2p0_radio_postamble_sys2ant
+
+#define ar9462_2p1_common_rx_gain ar9462_2p0_common_rx_gain
+
+#define ar9462_2p1_common_mixed_rx_gain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p1_common_5g_xlna_only_rxgain ar9462_2p0_common_5g_xlna_only_rxgain
+
+#define ar9462_2p1_baseband_core_mix_rxgain ar9462_2p0_baseband_core_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_mix_rxgain ar9462_2p0_baseband_postamble_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_5g_xlna ar9462_2p0_baseband_postamble_5g_xlna
+
+#define ar9462_2p1_common_wo_xlna_rx_gain ar9462_2p0_common_wo_xlna_rx_gain
+
+#define ar9462_2p1_modes_low_ob_db_tx_gain ar9462_2p0_modes_low_ob_db_tx_gain
+
+#define ar9462_2p1_modes_high_ob_db_tx_gain ar9462_2p0_modes_high_ob_db_tx_gain
+
+#define ar9462_2p1_modes_mix_ob_db_tx_gain ar9462_2p0_modes_mix_ob_db_tx_gain
+
+#define ar9462_2p1_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+#define ar9462_2p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
+#define ar9462_2p1_pciephy_clkreq_disable_L1 ar9462_2p0_pciephy_clkreq_disable_L1
+
+static const u32 ar9462_2p1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000e0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00080000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486e00},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x99c00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_2p1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_2p1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+#endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
new file mode 100644
index 000000000..ce83ce47a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -0,0 +1,1240 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9485_H
+#define INITVALS_9485_H
+
+/* AR9485 1.1 */
+
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+#define ar9485_1_1_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00009e00, 0x037216a0},
+ {0x00009e04, 0x00182020},
+ {0x00009e18, 0x00000000},
+ {0x00009e20, 0x000003a8},
+ {0x00009e2c, 0x00004121},
+ {0x00009e44, 0x02282324},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x07000203, 0x07000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x14000406, 0x14000406},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1800040a, 0x1800040a},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000460, 0x1c000460},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x22000463, 0x22000463},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x26000465, 0x26000465},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2e0006e0, 0x2e0006e0},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+ {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x01804000},
+ {0x0000a590, 0x02808a02},
+ {0x0000a594, 0x0340ca02},
+ {0x0000a598, 0x0340cd03},
+ {0x0000a59c, 0x0340cd03},
+ {0x0000a5a0, 0x06415304},
+ {0x0000a5a4, 0x04c11905},
+ {0x0000a5a8, 0x06415905},
+ {0x0000a5ac, 0x06415905},
+ {0x0000a5b0, 0x06415905},
+ {0x0000a5b4, 0x06415905},
+ {0x0000a5b8, 0x06415905},
+ {0x0000a5bc, 0x06415905},
+};
+
+static const u32 ar9485_1_1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6fbe0},
+ {0x000160d0, 0xf7dfcf3c},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008040},
+ {0x00016240, 0x08400000},
+ {0x00016244, 0x1bf90f00},
+ {0x00016248, 0x00000000},
+ {0x0001624c, 0x00000000},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x00d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016c40, 0x13188278},
+ {0x00016c44, 0x12000000},
+};
+
+static const u32 ar9485_1_1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x992bb515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x3b3b3b3b},
+ {0x0000a3ac, 0x2f2f2f2f},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739cf},
+ {0x0000a418, 0x2d0021ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a5c4, 0xbfad9d74},
+ {0x0000a5c8, 0x00480605},
+ {0x0000a5cc, 0x00002e37},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+};
+
+static const u32 ar9485_common_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00009e00, 0x03721b20},
+ {0x00009e04, 0x00082020},
+ {0x00009e18, 0x0300501e},
+ {0x00009e20, 0x000003ba},
+ {0x00009e2c, 0x00002e21},
+ {0x00009e44, 0x02182324},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x18181818},
+ {0x0000a084, 0x18181818},
+ {0x0000a088, 0x18181818},
+ {0x0000a08c, 0x18181818},
+ {0x0000a090, 0x18181818},
+ {0x0000a094, 0x18181818},
+ {0x0000a098, 0x17181818},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004014, 0xba280400},
+ {0x00004090, 0x00aa10aa},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x50804008},
+};
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x1801265e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
new file mode 100644
index 000000000..6fc0d07e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -0,0 +1,1355 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_953X_H
+#define INITVALS_953X_H
+
+#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define qca953x_1p0_soc_preamble ar955x_1p0_soc_preamble
+
+#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define qca953x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+
+#define qca953x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
+
+#define qca953x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
+
+static const u32 qca953x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f3d7},
+ {0x00008248, 0x00000852},
+ {0x0000824c, 0x0001e7ae},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 qca953x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x018848c6},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b91},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x6a6a6a6a},
+ {0x0000a3ac, 0x6a6a6a6a},
+ {0x0000a3b0, 0x00c8641a},
+ {0x0000a3b4, 0x0000001a},
+ {0x0000a3b8, 0x0088642a},
+ {0x0000a3bc, 0x000001fa},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce42108},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce73908},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce738e7},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x08000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca953x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x3f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x8036db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f080a},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d8},
+ {0x000160c4, 0x24b6db6c},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6db6fb7c},
+ {0x000160d0, 0x6db6da44},
+ {0x00016100, 0x07ff8001},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080d8},
+ {0x00016280, 0x01000901},
+ {0x00016284, 0x15d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x3f80fff8},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x8036db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x07ff8001},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x000080d8},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+};
+
+static const u32 qca953x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+ {0x00016104, 0xb7a00001, 0xb7a00001, 0xfff80005, 0xfff80005},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016504, 0xb7a00001, 0xb7a00001, 0xfff80001, 0xfff80001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffe9ccc},
+ {0x0000a2e4, 0xffffe0f0},
+ {0x0000a2e8, 0xfffcff00},
+ {0x0000a410, 0x000050da},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x0f00000a},
+ {0x0000a514, 0x1300000c},
+ {0x0000a518, 0x1700000e},
+ {0x0000a51c, 0x1b000064},
+ {0x0000a520, 0x1f000242},
+ {0x0000a524, 0x23000229},
+ {0x0000a528, 0x270002a2},
+ {0x0000a52c, 0x2c001203},
+ {0x0000a530, 0x30001803},
+ {0x0000a534, 0x33000881},
+ {0x0000a538, 0x38001809},
+ {0x0000a53c, 0x3a000814},
+ {0x0000a540, 0x3f001a0c},
+ {0x0000a544, 0x43001a0e},
+ {0x0000a548, 0x46001812},
+ {0x0000a54c, 0x49001884},
+ {0x0000a550, 0x4d001e84},
+ {0x0000a554, 0x50001e69},
+ {0x0000a558, 0x550006f4},
+ {0x0000a55c, 0x59000ad3},
+ {0x0000a560, 0x5e000ad5},
+ {0x0000a564, 0x61001ced},
+ {0x0000a568, 0x660018d4},
+ {0x0000a56c, 0x660018d4},
+ {0x0000a570, 0x660018d4},
+ {0x0000a574, 0x660018d4},
+ {0x0000a578, 0x660018d4},
+ {0x0000a57c, 0x660018d4},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x0300ca02},
+ {0x0000a614, 0x00000e04},
+ {0x0000a618, 0x03014000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x03014000},
+ {0x0000a628, 0x03804c05},
+ {0x0000a62c, 0x0701de06},
+ {0x0000a630, 0x07819c07},
+ {0x0000a634, 0x0701dc07},
+ {0x0000a638, 0x0701dc07},
+ {0x0000a63c, 0x0701dc07},
+ {0x0000b2dc, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffe9ccc},
+ {0x0000b2e4, 0xffffe0f0},
+ {0x0000b2e8, 0xfffcff00},
+ {0x00016044, 0x010002d4},
+ {0x00016048, 0x66482400},
+ {0x00016280, 0x01000015},
+ {0x00016444, 0x010002d4},
+ {0x00016448, 0x66482400},
+};
+
+static const u32 qca953x_1p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050d6},
+ {0x0000a500, 0x00060020},
+ {0x0000a504, 0x04060060},
+ {0x0000a508, 0x080600a0},
+ {0x0000a50c, 0x0c068020},
+ {0x0000a510, 0x10068060},
+ {0x0000a514, 0x140680a0},
+ {0x0000a518, 0x18090040},
+ {0x0000a51c, 0x1b090080},
+ {0x0000a520, 0x1f0900c0},
+ {0x0000a524, 0x240c0041},
+ {0x0000a528, 0x280d0021},
+ {0x0000a52c, 0x2d0f0061},
+ {0x0000a530, 0x310f00a1},
+ {0x0000a534, 0x350e00a2},
+ {0x0000a538, 0x360e80a2},
+ {0x0000a53c, 0x380f00a2},
+ {0x0000a540, 0x3b0e00a3},
+ {0x0000a544, 0x3d110083},
+ {0x0000a548, 0x3e1100a3},
+ {0x0000a54c, 0x401100e3},
+ {0x0000a550, 0x421380e3},
+ {0x0000a554, 0x431780e3},
+ {0x0000a558, 0x461f80e3},
+ {0x0000a55c, 0x461f80e3},
+ {0x0000a560, 0x461f80e3},
+ {0x0000a564, 0x461f80e3},
+ {0x0000a568, 0x461f80e3},
+ {0x0000a56c, 0x461f80e3},
+ {0x0000a570, 0x461f80e3},
+ {0x0000a574, 0x461f80e3},
+ {0x0000a578, 0x461f80e3},
+ {0x0000a57c, 0x461f80e3},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x0301cc07},
+ {0x0000a630, 0x0301cc07},
+ {0x0000a634, 0x0301cc07},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x018848c6},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x02993b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000400ff},
+ {0x0000a3a8, 0x6a6a6a6a},
+ {0x0000a3ac, 0x6a6a6a6a},
+ {0x0000a3b0, 0x00c8641a},
+ {0x0000a3b4, 0x0000001a},
+ {0x0000a3b8, 0x0088642a},
+ {0x0000a3bc, 0x000001fa},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce42108},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce73908},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce738e7},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x08000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 qca953x_2p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_2p0_modes_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfffb52aa},
+ {0x0000a2e0, 0xfffd64cc},
+ {0x0000a2e4, 0xfffe80f0},
+ {0x0000a2e8, 0xffffff00},
+ {0x0000a410, 0x000050d5},
+ {0x0000a500, 0x00000000},
+ {0x0000a504, 0x04000002},
+ {0x0000a508, 0x08000004},
+ {0x0000a50c, 0x0c000006},
+ {0x0000a510, 0x1000000a},
+ {0x0000a514, 0x1400000c},
+ {0x0000a518, 0x1800000e},
+ {0x0000a51c, 0x1c000048},
+ {0x0000a520, 0x2000004a},
+ {0x0000a524, 0x2400004c},
+ {0x0000a528, 0x2800004e},
+ {0x0000a52c, 0x2b00024a},
+ {0x0000a530, 0x2f00024c},
+ {0x0000a534, 0x3300024e},
+ {0x0000a538, 0x36000668},
+ {0x0000a53c, 0x38000669},
+ {0x0000a540, 0x3a000868},
+ {0x0000a544, 0x3d00086a},
+ {0x0000a548, 0x4000086c},
+ {0x0000a54c, 0x4200086e},
+ {0x0000a550, 0x43000a6e},
+ {0x0000a554, 0x43000a6e},
+ {0x0000a558, 0x43000a6e},
+ {0x0000a55c, 0x43000a6e},
+ {0x0000a560, 0x43000a6e},
+ {0x0000a564, 0x43000a6e},
+ {0x0000a568, 0x43000a6e},
+ {0x0000a56c, 0x43000a6e},
+ {0x0000a570, 0x43000a6e},
+ {0x0000a574, 0x43000a6e},
+ {0x0000a578, 0x43000a6e},
+ {0x0000a57c, 0x43000a6e},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x03804000},
+ {0x0000a610, 0x03804e01},
+ {0x0000a614, 0x03804e01},
+ {0x0000a618, 0x03804e01},
+ {0x0000a61c, 0x04009002},
+ {0x0000a620, 0x04009002},
+ {0x0000a624, 0x04009002},
+ {0x0000a628, 0x04009002},
+ {0x0000a62c, 0x04009002},
+ {0x0000a630, 0x04009002},
+ {0x0000a634, 0x04009002},
+ {0x0000a638, 0x04009002},
+ {0x0000a63c, 0x04009002},
+ {0x0000b2dc, 0xfffb52aa},
+ {0x0000b2e0, 0xfffd64cc},
+ {0x0000b2e4, 0xfffe80f0},
+ {0x0000b2e8, 0xffffff00},
+ {0x00016044, 0x024922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x024922db},
+ {0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_2p0_modes_no_xpa_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xffd5f552},
+ {0x0000a2e0, 0xffe60664},
+ {0x0000a2e4, 0xfff80780},
+ {0x0000a2e8, 0xfffff800},
+ {0x0000a410, 0x000050de},
+ {0x0000a500, 0x00000061},
+ {0x0000a504, 0x04000063},
+ {0x0000a508, 0x08000065},
+ {0x0000a50c, 0x0c000261},
+ {0x0000a510, 0x10000263},
+ {0x0000a514, 0x14000265},
+ {0x0000a518, 0x18000482},
+ {0x0000a51c, 0x1b000484},
+ {0x0000a520, 0x1f000486},
+ {0x0000a524, 0x240008c2},
+ {0x0000a528, 0x28000cc1},
+ {0x0000a52c, 0x2d000ce3},
+ {0x0000a530, 0x31000ce5},
+ {0x0000a534, 0x350010e5},
+ {0x0000a538, 0x360012e5},
+ {0x0000a53c, 0x380014e5},
+ {0x0000a540, 0x3b0018e5},
+ {0x0000a544, 0x3d001d04},
+ {0x0000a548, 0x3e001d05},
+ {0x0000a54c, 0x40001d07},
+ {0x0000a550, 0x42001f27},
+ {0x0000a554, 0x43001f67},
+ {0x0000a558, 0x46001fe7},
+ {0x0000a55c, 0x47001f2b},
+ {0x0000a560, 0x49001f0d},
+ {0x0000a564, 0x4b001ed2},
+ {0x0000a568, 0x4c001ed4},
+ {0x0000a56c, 0x4e001f15},
+ {0x0000a570, 0x4f001ff6},
+ {0x0000a574, 0x4f001ff6},
+ {0x0000a578, 0x4f001ff6},
+ {0x0000a57c, 0x4f001ff6},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00804201},
+ {0x0000a610, 0x01008201},
+ {0x0000a614, 0x0180c402},
+ {0x0000a618, 0x0180c603},
+ {0x0000a61c, 0x0180c603},
+ {0x0000a620, 0x01c10603},
+ {0x0000a624, 0x01c10704},
+ {0x0000a628, 0x02c18b05},
+ {0x0000a62c, 0x02c14c07},
+ {0x0000a630, 0x01008704},
+ {0x0000a634, 0x01c10402},
+ {0x0000a638, 0x0301cc07},
+ {0x0000a63c, 0x0301cc07},
+ {0x0000b2dc, 0xffd5f552},
+ {0x0000b2e0, 0xffe60664},
+ {0x0000b2e4, 0xfff80780},
+ {0x0000b2e8, 0xfffff800},
+ {0x00016044, 0x049242db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x049242db},
+ {0x00016448, 0x6c927a70},
+};
+
+#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
new file mode 100644
index 000000000..148562add
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -0,0 +1,760 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_955X_1P0_H
+#define INITVALS_955X_1P0_H
+
+/* AR955X 1.0 */
+
+#define ar955x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar955x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define ar955x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar955x_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar955x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24647c00, 0x24647c00},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x01885f52, 0x01885f52},
+ {0x00016104, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016504, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016904, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
+ {0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+};
+
+static const u32 ar955x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 ar955x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x557cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x00016068, 0x6db6db6c},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f101e},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8100},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x11999601},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00008040},
+ {0x00016280, 0x01800804},
+ {0x00016284, 0x00038dc5},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0x00000040},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00400705},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x557cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x00016468, 0x6db6db6c},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x11999601},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00008040},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00400705},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x557cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x00016868, 0x6db6db6c},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x11999601},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00008040},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00400705},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
+ {0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000002, 0x04000002},
+ {0x0000a508, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
+ {0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x1700002b, 0x1700002b, 0x1700002b, 0x1700002b, 0x1600002b, 0x1600002b, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1a00002d, 0x1a00002d, 0x1b000064, 0x1b000064},
+ {0x0000a520, 0x20000031, 0x20000031, 0x1f000031, 0x1f000031, 0x1e000031, 0x1e000031, 0x1f000242, 0x1f000242},
+ {0x0000a524, 0x24000051, 0x24000051, 0x23000051, 0x23000051, 0x23000051, 0x23000051, 0x23000229, 0x23000229},
+ {0x0000a528, 0x27000071, 0x27000071, 0x27000071, 0x27000071, 0x26000071, 0x26000071, 0x270002a2, 0x270002a2},
+ {0x0000a52c, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2c001203, 0x2c001203},
+ {0x0000a530, 0x3000028c, 0x3000028c, 0x2f00028c, 0x2f00028c, 0x2e00028c, 0x2e00028c, 0x30001803, 0x30001803},
+ {0x0000a534, 0x34000290, 0x34000290, 0x33000290, 0x33000290, 0x32000290, 0x32000290, 0x33000881, 0x33000881},
+ {0x0000a538, 0x37000292, 0x37000292, 0x36000292, 0x36000292, 0x35000292, 0x35000292, 0x38001809, 0x38001809},
+ {0x0000a53c, 0x3b02028d, 0x3b02028d, 0x3a02028d, 0x3a02028d, 0x3902028d, 0x3902028d, 0x3a000814, 0x3a000814},
+ {0x0000a540, 0x3f020291, 0x3f020291, 0x3e020291, 0x3e020291, 0x3d020291, 0x3d020291, 0x3f001a0c, 0x3f001a0c},
+ {0x0000a544, 0x44020490, 0x44020490, 0x43020490, 0x43020490, 0x42020490, 0x42020490, 0x43001a0e, 0x43001a0e},
+ {0x0000a548, 0x48020492, 0x48020492, 0x47020492, 0x47020492, 0x46020492, 0x46020492, 0x46001812, 0x46001812},
+ {0x0000a54c, 0x4c020692, 0x4c020692, 0x4b020692, 0x4b020692, 0x4a020692, 0x4a020692, 0x49001884, 0x49001884},
+ {0x0000a550, 0x50020892, 0x50020892, 0x4f020892, 0x4f020892, 0x4e020892, 0x4e020892, 0x4d001e84, 0x4d001e84},
+ {0x0000a554, 0x53040891, 0x53040891, 0x53040891, 0x53040891, 0x52040891, 0x52040891, 0x50001e69, 0x50001e69},
+ {0x0000a558, 0x58040893, 0x58040893, 0x57040893, 0x57040893, 0x56040893, 0x56040893, 0x550006f4, 0x550006f4},
+ {0x0000a55c, 0x5c0408b4, 0x5c0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x59000ad3, 0x59000ad3},
+ {0x0000a560, 0x610408b6, 0x610408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e000ad5, 0x5e000ad5},
+ {0x0000a564, 0x670408f6, 0x670408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x61001ced, 0x61001ced},
+ {0x0000a568, 0x6a040cf6, 0x6a040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x660018d4, 0x660018d4},
+ {0x0000a56c, 0x6d040d76, 0x6d040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x660018d4, 0x660018d4},
+ {0x0000a570, 0x70060db6, 0x70060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x660018d4, 0x660018d4},
+ {0x0000a574, 0x730a0df6, 0x730a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x660018d4, 0x660018d4},
+ {0x0000a578, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
+ {0x0000a57c, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x03804000, 0x03804000},
+ {0x0000a610, 0x04008b01, 0x04008b01, 0x04008b01, 0x04008b01, 0x03c08b01, 0x03c08b01, 0x0300ca02, 0x0300ca02},
+ {0x0000a614, 0x05811403, 0x05811403, 0x05411303, 0x05411303, 0x05411303, 0x05411303, 0x00000e04, 0x00000e04},
+ {0x0000a618, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a61c, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a620, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a624, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a628, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03804c05, 0x03804c05},
+ {0x0000a62c, 0x06815604, 0x06815604, 0x06415504, 0x06415504, 0x06015504, 0x06015504, 0x0701de06, 0x0701de06},
+ {0x0000a630, 0x07819a05, 0x07819a05, 0x07419905, 0x07419905, 0x07019805, 0x07019805, 0x07819c07, 0x07819c07},
+ {0x0000a634, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a638, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a63c, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000b2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000c2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000c2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 ar955x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01884061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar955x_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007000, 0x00000000},
+ {0x00007004, 0x00000000},
+ {0x00007008, 0x00000000},
+ {0x0000700c, 0x00000000},
+ {0x0000701c, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007024, 0x00000000},
+ {0x00007028, 0x00000000},
+ {0x0000702c, 0x00000000},
+ {0x00007030, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000000},
+};
+
+static const u32 ar955x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 ar955x_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar955x_1p0_common_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0b000006, 0x0b000006},
+ {0x0000a510, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x1b000012, 0x1b000012},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x1f00004a, 0x1f00004a},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x23000244, 0x23000244},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2700022b, 0x2700022b},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x2b000625, 0x2b000625},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x2f001006, 0x2f001006},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x330008a0, 0x330008a0},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x37000a2a, 0x37000a2a},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x3b001c23, 0x3b001c23},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x3f0014a0, 0x3f0014a0},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x43001882, 0x43001882},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x47001ca2, 0x47001ca2},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x4b001ec3, 0x4b001ec3},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x4f00148c, 0x4f00148c},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x53001c6e, 0x53001c6e},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x57001c92, 0x57001c92},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x5c001af6, 0x5c001af6},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x04005001, 0x04005001},
+ {0x0000a614, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x03808e02, 0x03808e02},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0300c000, 0x0300c000},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x03808e02, 0x03808e02},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x03410c03, 0x03410c03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04014c03, 0x04014c03},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x05818d04, 0x05818d04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801cd04, 0x0801cd04},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+#endif /* INITVALS_955X_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644
index 000000000..10d4a6cb1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -0,0 +1,1168 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P0_H
+#define INITVALS_9565_1P0_H
+
+/* AR9565 1.0 */
+
+#define ar9565_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9565_1p0_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9565_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000a0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x800301ff},
+};
+
+static const u32 ar9565_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009bf0, 0x80000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x9907b515},
+ {0x00009e28, 0x126f0600},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0xe4c355c7},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x823e4fc8},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000011},
+ {0x0000a3a8, 0xaaaaaa6e},
+ {0x0000a3ac, 0x3c466455},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739c5},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739c5},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739c5},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000096},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x03000000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b4, 0x00512c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a7f0, 0x80000000},
+};
+
+static const u32 ar9565_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
+ {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d823601},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x1c99e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x000c0000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x1203040b},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc491},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x006db6d0},
+ {0x000160c4, 0x6dd6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841440},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1004080},
+ {0x00016294, 0x40000028},
+ {0x00016298, 0x55aa2900},
+ {0x00016340, 0x131c827a},
+ {0x00016344, 0x00300000},
+};
+
+static const u32 ar9565_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9565_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004078, 0x00000002},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9565_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x00004050, 0x00300300},
+ {0x0000406c, 0x00100000},
+ {0x00009e20, 0x000003b6},
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x00bf00a0},
+ {0x0000a0c4, 0x11a011a1},
+ {0x0000a0c8, 0x11be11bf},
+ {0x0000a0cc, 0x11bc11bd},
+ {0x0000a0d0, 0x22632264},
+ {0x0000a0d4, 0x22612262},
+ {0x0000a0d8, 0x227f2260},
+ {0x0000a0dc, 0x4322227e},
+ {0x0000a0e0, 0x43204321},
+ {0x0000a0e4, 0x433e433f},
+ {0x0000a0e8, 0x4462433d},
+ {0x0000a0ec, 0x44604461},
+ {0x0000a0f0, 0x447e447f},
+ {0x0000a0f4, 0x5582447d},
+ {0x0000a0f8, 0x55805581},
+ {0x0000a0fc, 0x559e559f},
+ {0x0000a100, 0x66816682},
+ {0x0000a104, 0x669f6680},
+ {0x0000a108, 0x669d669e},
+ {0x0000a10c, 0x77627763},
+ {0x0000a110, 0x77607761},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x00bf00a0},
+ {0x0000a144, 0x11a011a1},
+ {0x0000a148, 0x11be11bf},
+ {0x0000a14c, 0x11bc11bd},
+ {0x0000a150, 0x22632264},
+ {0x0000a154, 0x22612262},
+ {0x0000a158, 0x227f2260},
+ {0x0000a15c, 0x4322227e},
+ {0x0000a160, 0x43204321},
+ {0x0000a164, 0x433e433f},
+ {0x0000a168, 0x4462433d},
+ {0x0000a16c, 0x44604461},
+ {0x0000a170, 0x447e447f},
+ {0x0000a174, 0x5582447d},
+ {0x0000a178, 0x55805581},
+ {0x0000a17c, 0x559e559f},
+ {0x0000a180, 0x66816682},
+ {0x0000a184, 0x669f6680},
+ {0x0000a188, 0x669d669e},
+ {0x0000a18c, 0x77e677e7},
+ {0x0000a190, 0x77e477e5},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18212ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003780c},
+};
+
+static const u32 ar9565_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x00bf00a0},
+ {0x0000a0c4, 0x11a011a1},
+ {0x0000a0c8, 0x11be11bf},
+ {0x0000a0cc, 0x11bc11bd},
+ {0x0000a0d0, 0x22632264},
+ {0x0000a0d4, 0x22612262},
+ {0x0000a0d8, 0x227f2260},
+ {0x0000a0dc, 0x4322227e},
+ {0x0000a0e0, 0x43204321},
+ {0x0000a0e4, 0x433e433f},
+ {0x0000a0e8, 0x4462433d},
+ {0x0000a0ec, 0x44604461},
+ {0x0000a0f0, 0x447e447f},
+ {0x0000a0f4, 0x5582447d},
+ {0x0000a0f8, 0x55805581},
+ {0x0000a0fc, 0x559e559f},
+ {0x0000a100, 0x66816682},
+ {0x0000a104, 0x669f6680},
+ {0x0000a108, 0x669d669e},
+ {0x0000a10c, 0x77627763},
+ {0x0000a110, 0x77607761},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x00bf00a0},
+ {0x0000a144, 0x11a011a1},
+ {0x0000a148, 0x11be11bf},
+ {0x0000a14c, 0x11bc11bd},
+ {0x0000a150, 0x22632264},
+ {0x0000a154, 0x22612262},
+ {0x0000a158, 0x227f2260},
+ {0x0000a15c, 0x4322227e},
+ {0x0000a160, 0x43204321},
+ {0x0000a164, 0x433e433f},
+ {0x0000a168, 0x4462433d},
+ {0x0000a16c, 0x44604461},
+ {0x0000a170, 0x447e447f},
+ {0x0000a174, 0x5582447d},
+ {0x0000a178, 0x55805581},
+ {0x0000a17c, 0x559e559f},
+ {0x0000a180, 0x66816682},
+ {0x0000a184, 0x669f6680},
+ {0x0000a188, 0x669d669e},
+ {0x0000a18c, 0x77e677e7},
+ {0x0000a190, 0x77e477e5},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
+ {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
+ {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
+ {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
+ {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
+ {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
+ {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
+ {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
+ {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
+ {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
+ {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
+ {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
+ {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
+ {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
+ {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
+ {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
+ {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
+ {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
+ {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
+ {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
+ {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
+ {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
+ {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0},
+ {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1},
+ {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2},
+ {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3},
+ {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4},
+ {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
+ {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
+ {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
+ {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
+ {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
+ {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
+ {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
new file mode 100644
index 000000000..568105399
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P1_H
+#define INITVALS_9565_1P1_H
+
+/* AR9565 1.1 */
+
+#define ar9565_1p1_mac_core ar9565_1p0_mac_core
+
+#define ar9565_1p1_mac_postamble ar9565_1p0_mac_postamble
+
+#define ar9565_1p1_baseband_core ar9565_1p0_baseband_core
+
+#define ar9565_1p1_baseband_postamble ar9565_1p0_baseband_postamble
+
+#define ar9565_1p1_radio_core ar9565_1p0_radio_core
+
+#define ar9565_1p1_soc_preamble ar9565_1p0_soc_preamble
+
+#define ar9565_1p1_soc_postamble ar9565_1p0_soc_postamble
+
+#define ar9565_1p1_Common_rx_gain_table ar9565_1p0_Common_rx_gain_table
+
+#define ar9565_1p1_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_Modes_lowest_ob_db_tx_gain_table
+
+#define ar9565_1p1_pciephy_clkreq_disable_L1 ar9565_1p0_pciephy_clkreq_disable_L1
+
+#define ar9565_1p1_modes_fast_clock ar9565_1p0_modes_fast_clock
+
+#define ar9565_1p1_common_wo_xlna_rx_gain_table ar9565_1p0_common_wo_xlna_rx_gain_table
+
+#define ar9565_1p1_modes_low_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_ob_db_tx_gain_table ar9565_1p0_modes_high_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_power_tx_gain_table ar9565_1p0_modes_high_power_tx_gain_table
+
+#define ar9565_1p1_baseband_core_txfir_coeff_japan_2484 ar9565_1p0_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9565_1p1_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+#endif /* INITVALS_9565_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
new file mode 100644
index 000000000..c3a47eaaf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_956X_H
+#define INITVALS_956X_H
+
+#define qca956x_1p0_mac_core ar955x_1p0_mac_core
+
+#define qca956x_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define qca956x_1p0_soc_preamble ar955x_1p0_soc_preamble
+
+#define qca956x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca956x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca956x_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
+#define qca956x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
+
+#define qca956x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
+
+#define qca956x_1p0_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+static const u32 qca956x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840cbf},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb514},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4789},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x02993b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a218, 0x00000000},
+ {0x0000a21c, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a360, 0x00000000},
+ {0x0000a36c, 0x00000000},
+ {0x0000a384, 0x00000001},
+ {0x0000a388, 0x00000444},
+ {0x0000a38c, 0x00000000},
+ {0x0000a390, 0x210d0401},
+ {0x0000a394, 0xab9a7144},
+ {0x0000a398, 0x00000201},
+ {0x0000a39c, 0x42424848},
+ {0x0000a3a0, 0x3c466478},
+ {0x0000a3a4, 0x3a363600},
+ {0x0000a3a8, 0x0000003a},
+ {0x0000a3ac, 0x00000000},
+ {0x0000a3b0, 0x009011fe},
+ {0x0000a3b4, 0x00000034},
+ {0x0000a3b8, 0x00b3ec0a},
+ {0x0000a3bc, 0x00000036},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x05000000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9fee},
+ {0x0000a648, 0x0048660d},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x21200504},
+ {0x0000a678, 0x61602322},
+ {0x0000a67c, 0x65646362},
+ {0x0000a680, 0x6b6a6968},
+ {0x0000a684, 0xe2706d6c},
+ {0x0000a688, 0x000000e3},
+ {0x0000a690, 0x00000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 qca956x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
+ {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000de},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x337d605e, 0x337d5d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x045c0cc4, 0x045c0cc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca956x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x3f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x8036db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f080a},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480000},
+ {0x000160c0, 0x006db6d8},
+ {0x000160c4, 0x24b6db6c},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6db6fb7c},
+ {0x000160d0, 0x6db6da44},
+ {0x00016100, 0x07ff8001},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008058},
+ {0x00016288, 0x001c6000},
+ {0x0001628c, 0x50000000},
+ {0x000162c0, 0x4b962100},
+ {0x000162c4, 0x00000480},
+ {0x000162c8, 0x04000144},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x3f80fff8},
+ {0x0001644c, 0x000f0278},
+ {0x00016450, 0x8036db6c},
+ {0x00016454, 0x6db60000},
+ {0x00016500, 0x07ff8001},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x01884080},
+ {0x00016548, 0x00008058},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x3f80fff8},
+ {0x0001684c, 0x000f0278},
+ {0x00016850, 0x8036db6c},
+ {0x00016854, 0x6db60000},
+ {0x00016900, 0x07ff8001},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x01884080},
+ {0x00016948, 0x00008058},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 qca956x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+ {0x00016104, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016504, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+ {0x00016904, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
+ {0x0001690c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca956x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a38c, 0x00000000},
+ {0x0000a390, 0x6f7f0301},
+ {0x0000a394, 0xca9228ee},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000a2e0, 0xff323118, 0xff323118},
+ {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000a2e8, 0xffc00000, 0xffc00000},
+ {0x0000a39c, 0x42424242, 0x42424242},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a3b0, 0x00a01404, 0x00a01404},
+ {0x0000a3b4, 0x00000034, 0x00000034},
+ {0x0000a3b8, 0x00800408, 0x00800408},
+ {0x0000a3bc, 0x00000036, 0x00000036},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a500, 0x09000040, 0x09000040},
+ {0x0000a504, 0x0b000041, 0x0b000041},
+ {0x0000a508, 0x0d000042, 0x0d000042},
+ {0x0000a50c, 0x11000044, 0x11000044},
+ {0x0000a510, 0x15000046, 0x15000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x280004e0, 0x280004e0},
+ {0x0000a528, 0x2c0004e2, 0x2c0004e2},
+ {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
+ {0x0000a530, 0x300004e4, 0x300004e4},
+ {0x0000a534, 0x340004e6, 0x340004e6},
+ {0x0000a538, 0x37000ce0, 0x37000ce0},
+ {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
+ {0x0000a540, 0x3d000ce3, 0x3d000ce3},
+ {0x0000a544, 0x3f000ce4, 0x3f000ce4},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ef5, 0x63001ef5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000b2e0, 0xff323118, 0xff323118},
+ {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000b2e8, 0xffc00000, 0xffc00000},
+ {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000c2e0, 0xff323118, 0xff323118},
+ {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000c2e8, 0xffc00000, 0xffc00000},
+ {0x00016044, 0x049242db, 0x049242db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016148, 0x00008050, 0x00008050},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x049242db, 0x049242db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016548, 0x00008050, 0x00008050},
+ {0x00016844, 0x049242db, 0x049242db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x00016948, 0x00008050, 0x00008050},
+};
+
+static const u32 qca956x_1p0_modes_xpa_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000a2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000a2e4, 0xffffc000, 0xffffc000},
+ {0x0000a2e8, 0xc0000000, 0xc0000000},
+ {0x0000a410, 0x000050d2, 0x000050d2},
+ {0x0000a500, 0x0a000040, 0x0a000040},
+ {0x0000a504, 0x0c000041, 0x0c000041},
+ {0x0000a508, 0x0e000042, 0x0e000042},
+ {0x0000a50c, 0x12000044, 0x12000044},
+ {0x0000a510, 0x16000046, 0x16000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x29000a40, 0x29000a40},
+ {0x0000a528, 0x2d000a42, 0x2d000a42},
+ {0x0000a52c, 0x2f000a43, 0x2f000a43},
+ {0x0000a530, 0x31000a44, 0x31000a44},
+ {0x0000a534, 0x35000a46, 0x35000a46},
+ {0x0000a538, 0x38000ce0, 0x38000ce0},
+ {0x0000a53c, 0x3c000ce2, 0x3c000ce2},
+ {0x0000a540, 0x3e000ce3, 0x3e000ce3},
+ {0x0000a544, 0x40000ce4, 0x40000ce4},
+ {0x0000a548, 0x46001ee0, 0x46001ee0},
+ {0x0000a54c, 0x4a001ee2, 0x4a001ee2},
+ {0x0000a550, 0x4e001ee4, 0x4e001ee4},
+ {0x0000a554, 0x52001ee6, 0x52001ee6},
+ {0x0000a558, 0x56001eea, 0x56001eea},
+ {0x0000a55c, 0x5a001eec, 0x5a001eec},
+ {0x0000a560, 0x5e001ef0, 0x5e001ef0},
+ {0x0000a564, 0x60001ef1, 0x60001ef1},
+ {0x0000a568, 0x61001ef2, 0x61001ef2},
+ {0x0000a56c, 0x62001ef3, 0x62001ef3},
+ {0x0000a570, 0x63001ef4, 0x63001ef4},
+ {0x0000a574, 0x64001ef5, 0x64001ef5},
+ {0x0000a578, 0x65001ffc, 0x65001ffc},
+ {0x0000a57c, 0x65001ffc, 0x65001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000b2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000b2e4, 0xffffc000, 0xffffc000},
+ {0x0000b2e8, 0xc0000000, 0xc0000000},
+ {0x0000c2dc, 0xcc69ac94, 0xcc69ac94},
+ {0x0000c2e0, 0xf0b23118, 0xf0b23118},
+ {0x0000c2e4, 0xffffc000, 0xffffc000},
+ {0x0000c2e8, 0xc0000000, 0xc0000000},
+ {0x00016044, 0x012492db, 0x012492db},
+ {0x00016048, 0x6c927a70, 0x6c927a70},
+ {0x00016050, 0x8036d36c, 0x8036d36c},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a7e00, 0x453a7e00},
+ {0x00016444, 0x012492db, 0x012492db},
+ {0x00016448, 0x6c927a70, 0x6c927a70},
+ {0x00016450, 0x8036d36c, 0x8036d36c},
+ {0x00016844, 0x012492db, 0x012492db},
+ {0x00016848, 0x6c927a70, 0x6c927a70},
+ {0x00016850, 0x8036d36c, 0x8036d36c},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000a2e0, 0xff323118, 0xff323118},
+ {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000a2e8, 0xffc00000, 0xffc00000},
+ {0x0000a39c, 0x42424242, 0x42424242},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a3b0, 0x00a01404, 0x00a01404},
+ {0x0000a3b4, 0x00000034, 0x00000034},
+ {0x0000a3b8, 0x00800408, 0x00800408},
+ {0x0000a3bc, 0x00000036, 0x00000036},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a414, 0x16b739ce, 0x16b739ce},
+ {0x0000a418, 0x2d00198b, 0x2d00198b},
+ {0x0000a41c, 0x16b5adce, 0x16b5adce},
+ {0x0000a420, 0x0000014a, 0x0000014a},
+ {0x0000a424, 0x14a525cc, 0x14a525cc},
+ {0x0000a428, 0x0000012a, 0x0000012a},
+ {0x0000a42c, 0x14a5294a, 0x14a5294a},
+ {0x0000a430, 0x1294a929, 0x1294a929},
+ {0x0000a500, 0x09000040, 0x09000040},
+ {0x0000a504, 0x0b000041, 0x0b000041},
+ {0x0000a508, 0x0d000042, 0x0d000042},
+ {0x0000a50c, 0x11000044, 0x11000044},
+ {0x0000a510, 0x15000046, 0x15000046},
+ {0x0000a514, 0x1d000440, 0x1d000440},
+ {0x0000a518, 0x1f000441, 0x1f000441},
+ {0x0000a51c, 0x23000443, 0x23000443},
+ {0x0000a520, 0x25000444, 0x25000444},
+ {0x0000a524, 0x280004e0, 0x280004e0},
+ {0x0000a528, 0x2c0004e2, 0x2c0004e2},
+ {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
+ {0x0000a530, 0x300004e4, 0x300004e4},
+ {0x0000a534, 0x340004e6, 0x340004e6},
+ {0x0000a538, 0x37000ce0, 0x37000ce0},
+ {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
+ {0x0000a540, 0x3d000ce3, 0x3d000ce3},
+ {0x0000a544, 0x3f000ce4, 0x3f000ce4},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ef5, 0x63001ef5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000b2e0, 0xff323118, 0xff323118},
+ {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000b2e8, 0xffc00000, 0xffc00000},
+ {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
+ {0x0000c2e0, 0xff323118, 0xff323118},
+ {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
+ {0x0000c2e8, 0xffc00000, 0xffc00000},
+ {0x00016044, 0x046e42db, 0x046e42db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016148, 0x00008050, 0x00008050},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x046e42db, 0x046e42db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016548, 0x00008050, 0x00008050},
+ {0x00016844, 0x046e42db, 0x046e42db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x00016948, 0x00008050, 0x00008050},
+};
+
+static const u32 qca956x_1p0_modes_no_xpa_green_tx_gain_table[][3] = {
+ /* Addr 5G 2G */
+ {0x000098bc, 0x00000001, 0x00000001},
+ {0x0000a2dc, 0xd3555284, 0xd3555284},
+ {0x0000a2e0, 0x1c666318, 0x1c666318},
+ {0x0000a2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000a2e8, 0xff800000, 0xff800000},
+ {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
+ {0x0000a410, 0x000050dc, 0x000050dc},
+ {0x0000a500, 0x02000040, 0x02000040},
+ {0x0000a504, 0x04000041, 0x04000041},
+ {0x0000a508, 0x06000042, 0x06000042},
+ {0x0000a50c, 0x0a000044, 0x0a000044},
+ {0x0000a510, 0x0c000045, 0x0c000045},
+ {0x0000a514, 0x13000440, 0x13000440},
+ {0x0000a518, 0x15000441, 0x15000441},
+ {0x0000a51c, 0x19000443, 0x19000443},
+ {0x0000a520, 0x1b000444, 0x1b000444},
+ {0x0000a524, 0x1e0004e0, 0x1e0004e0},
+ {0x0000a528, 0x220004e2, 0x220004e2},
+ {0x0000a52c, 0x240004e3, 0x240004e3},
+ {0x0000a530, 0x260004e4, 0x260004e4},
+ {0x0000a534, 0x2a0004e6, 0x2a0004e6},
+ {0x0000a538, 0x32000ce0, 0x32000ce0},
+ {0x0000a53c, 0x36000ce2, 0x36000ce2},
+ {0x0000a540, 0x3a000ce4, 0x3a000ce4},
+ {0x0000a544, 0x3e000ce6, 0x3e000ce6},
+ {0x0000a548, 0x45001ee0, 0x45001ee0},
+ {0x0000a54c, 0x49001ee2, 0x49001ee2},
+ {0x0000a550, 0x4d001ee4, 0x4d001ee4},
+ {0x0000a554, 0x51001ee6, 0x51001ee6},
+ {0x0000a558, 0x55001eea, 0x55001eea},
+ {0x0000a55c, 0x59001eec, 0x59001eec},
+ {0x0000a560, 0x5d001ef0, 0x5d001ef0},
+ {0x0000a564, 0x5f001ef1, 0x5f001ef1},
+ {0x0000a568, 0x60001ef2, 0x60001ef2},
+ {0x0000a56c, 0x61001ef3, 0x61001ef3},
+ {0x0000a570, 0x62001ef4, 0x62001ef4},
+ {0x0000a574, 0x63001ff5, 0x63001ff5},
+ {0x0000a578, 0x64001ffc, 0x64001ffc},
+ {0x0000a57c, 0x64001ffc, 0x64001ffc},
+ {0x0000a600, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000},
+ {0x0000a614, 0x00804201, 0x00804201},
+ {0x0000a618, 0x00804201, 0x00804201},
+ {0x0000a61c, 0x00804201, 0x00804201},
+ {0x0000a620, 0x00804201, 0x00804201},
+ {0x0000a624, 0x00804201, 0x00804201},
+ {0x0000a628, 0x00804201, 0x00804201},
+ {0x0000a62c, 0x02808a02, 0x02808a02},
+ {0x0000a630, 0x0340cd03, 0x0340cd03},
+ {0x0000a634, 0x0340cd03, 0x0340cd03},
+ {0x0000a638, 0x0340cd03, 0x0340cd03},
+ {0x0000a63c, 0x05011404, 0x05011404},
+ {0x0000b2dc, 0xd3555284, 0xd3555284},
+ {0x0000b2e0, 0x1c666318, 0x1c666318},
+ {0x0000b2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000b2e8, 0xff800000, 0xff800000},
+ {0x0000c2dc, 0xd3555284, 0xd3555284},
+ {0x0000c2e0, 0x1c666318, 0x1c666318},
+ {0x0000c2e4, 0xe07bbc00, 0xe07bbc00},
+ {0x0000c2e8, 0xff800000, 0xff800000},
+ {0x00016044, 0x849242db, 0x849242db},
+ {0x00016048, 0x64925a70, 0x64925a70},
+ {0x00016280, 0x41110005, 0x41110005},
+ {0x00016284, 0x453a6000, 0x453a6000},
+ {0x00016444, 0x849242db, 0x849242db},
+ {0x00016448, 0x6c925a70, 0x6c925a70},
+ {0x00016844, 0x849242db, 0x849242db},
+ {0x00016848, 0x6c925a70, 0x6c925a70},
+ {0x0000a7f0, 0x800002cc, 0x800002cc},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000018, 0x00000018},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000028, 0x00000028},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+ {0x0000a7f4, 0x00000048, 0x00000048},
+};
+
+static const u32 qca956x_1p0_common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222222},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x17171717},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 qca956x_1p0_xlna_only[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
+ {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x03721720},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000da},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec8ad2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x317a6062, 0x317a5ae2},
+ {0x00009e18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003b2, 0x000003b2},
+ {0x00009fc0, 0x813e4788, 0x813e4788, 0x813e4789, 0x813e4789},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
+ {0x0000be18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
+};
+
+#endif /* INITVALS_956X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
new file mode 100644
index 000000000..5d4629f96
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -0,0 +1,1361 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9580_1P0_H
+#define INITVALS_9580_1P0_H
+
+/* AR9580 1.0 */
+
+#define ar9580_1p0_soc_preamble ar9300_2p2_soc_preamble
+
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+static const u32 ar9580_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db2db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458a214f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db2db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db2db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+
+#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9580_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9580_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01884061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261800},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+ {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016288, 0x05a2040a, 0x05a2040a, 0x05a20408, 0x05a20408},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9580_1p0_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9580_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009814, 0x3280c00a, 0x3280c00a, 0x3280c00a, 0x3280c00a},
+ {0x00009818, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000036c0, 0x000036c4, 0x000036c4, 0x000036c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0835365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0831365e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x0831265e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
+ /* Addr 5G 2G */
+ {0x00009814, 0x3400c00f, 0x3400c00f},
+ {0x00009824, 0x5ac668d0, 0x5ac668d0},
+ {0x00009828, 0x06903080, 0x06903080},
+ {0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+ {0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
+#endif /* INITVALS_9580_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
new file mode 100644
index 000000000..a7a81b396
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_H
+#define ATH9K_H
+
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/leds.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+
+#include "common.h"
+#include "debug.h"
+#include "mci.h"
+#include "dfs.h"
+
+struct ath_node;
+struct ath_vif;
+
+extern struct ieee80211_ops ath9k_ops;
+extern int ath9k_modparam_nohwcrypt;
+extern int ath9k_led_blink;
+extern bool is_ath9k_unloaded;
+extern int ath9k_use_chanctx;
+
+/*************************/
+/* Descriptor Management */
+/*************************/
+
+#define ATH_TXSTATUS_RING_SIZE 512
+
+/* Macro to expand scalars to 64-bit objects */
+#define ito64(x) (sizeof(x) == 1) ? \
+ (((unsigned long long int)(x)) & (0xff)) : \
+ (sizeof(x) == 2) ? \
+ (((unsigned long long int)(x)) & 0xffff) : \
+ ((sizeof(x) == 4) ? \
+ (((unsigned long long int)(x)) & 0xffffffff) : \
+ (unsigned long long int)(x))
+
+#define ATH_TXBUF_RESET(_bf) do { \
+ (_bf)->bf_lastbf = NULL; \
+ (_bf)->bf_next = NULL; \
+ memset(&((_bf)->bf_state), 0, \
+ sizeof(struct ath_buf_state)); \
+ } while (0)
+
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+
+struct ath_descdma {
+ void *dd_desc;
+ dma_addr_t dd_desc_paddr;
+ u32 dd_desc_len;
+};
+
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc, bool is_tx);
+
+/***********/
+/* RX / TX */
+/***********/
+
+#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz) do { \
+ (_l)++; \
+ (_l) &= ((_sz) - 1); \
+ } while (0)
+
+#define ATH_RXBUF 512
+#define ATH_TXBUF 512
+#define ATH_TXBUF_RESERVE 5
+#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
+#define ATH_TXMAXTRY 13
+#define ATH_MAX_SW_RETRIES 30
+
+#define TID_TO_WME_AC(_tid) \
+ ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
+
+#define ATH_AGGR_DELIM_SZ 4
+#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
+/* number of delimiters for encryption padding */
+#define ATH_AGGR_ENCRYPTDELIM 10
+/* minimum h/w qdepth to be sustained to maximize aggregation */
+#define ATH_AGGR_MIN_QDEPTH 2
+/* minimum h/w qdepth for non-aggregated traffic */
+#define ATH_NON_AGGR_MIN_QDEPTH 8
+#define ATH_TX_COMPLETE_POLL_INT 1000
+#define ATH_TXFIFO_DEPTH 8
+#define ATH_TX_ERROR 0x01
+
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME 1000
+
+#define IEEE80211_SEQ_SEQ_SHIFT 4
+#define IEEE80211_SEQ_MAX 4096
+#define IEEE80211_WEP_IVLEN 3
+#define IEEE80211_WEP_KIDLEN 1
+#define IEEE80211_WEP_CRCLEN 4
+#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
+ (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN))
+
+/* return whether a bit at index _n in bitmap _bm is set
+ * _sz is the size of the bitmap */
+#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
+ ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
+
+/* return block-ack bitmap index given sequence and starting sequence */
+#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
+/* returns delimiter padding required given the packet length */
+#define ATH_AGGR_GET_NDELIM(_len) \
+ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
+ DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
+
+#define BAW_WITHIN(_start, _bawsz, _seqno) \
+ ((((_seqno) - (_start)) & 4095) < (_bawsz))
+
+#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
+
+#define IS_HT_RATE(rate) (rate & 0x80)
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
+
+enum {
+ WLAN_RC_PHY_OFDM,
+ WLAN_RC_PHY_CCK,
+};
+
+struct ath_txq {
+ int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+ u32 axq_qnum; /* ath9k hardware queue number */
+ void *axq_link;
+ struct list_head axq_q;
+ spinlock_t axq_lock;
+ u32 axq_depth;
+ u32 axq_ampdu_depth;
+ bool stopped;
+ bool axq_tx_inprogress;
+ struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
+ u8 txq_headidx;
+ u8 txq_tailidx;
+ int pending_frames;
+ struct sk_buff_head complete_q;
+};
+
+struct ath_atx_ac {
+ struct ath_txq *txq;
+ struct list_head list;
+ struct list_head tid_q;
+ bool clear_ps_filter;
+ bool sched;
+};
+
+struct ath_frame_info {
+ struct ath_buf *bf;
+ u16 framelen;
+ s8 txq;
+ u8 keyix;
+ u8 rtscts_rate;
+ u8 retries : 7;
+ u8 baw_tracked : 1;
+ u8 tx_power;
+ enum ath9k_key_type keytype:2;
+};
+
+struct ath_rxbuf {
+ struct list_head list;
+ struct sk_buff *bf_mpdu;
+ void *bf_desc;
+ dma_addr_t bf_daddr;
+ dma_addr_t bf_buf_addr;
+};
+
+/**
+ * enum buffer_type - Buffer type flags
+ *
+ * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
+ * @BUF_AGGR: Indicates whether the buffer can be aggregated
+ * (used in aggregation scheduling)
+ */
+enum buffer_type {
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+};
+
+#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
+
+struct ath_buf_state {
+ u8 bf_type;
+ u8 bfs_paprd;
+ u8 ndelim;
+ bool stale;
+ u16 seqno;
+ unsigned long bfs_paprd_timestamp;
+};
+
+struct ath_buf {
+ struct list_head list;
+ struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
+ an aggregate) */
+ struct ath_buf *bf_next; /* next subframe in the aggregate */
+ struct sk_buff *bf_mpdu; /* enclosing frame structure */
+ void *bf_desc; /* virtual addr of desc */
+ dma_addr_t bf_daddr; /* physical addr of desc */
+ dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
+ struct ieee80211_tx_rate rates[4];
+ struct ath_buf_state bf_state;
+};
+
+struct ath_atx_tid {
+ struct list_head list;
+ struct sk_buff_head buf_q;
+ struct sk_buff_head retry_q;
+ struct ath_node *an;
+ struct ath_atx_ac *ac;
+ unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+ u16 seq_start;
+ u16 seq_next;
+ u16 baw_size;
+ u8 tidno;
+ int baw_head; /* first un-acked tx buffer */
+ int baw_tail; /* next unused tx buffer slot */
+
+ s8 bar_index;
+ bool sched;
+ bool active;
+};
+
+struct ath_node {
+ struct ath_softc *sc;
+ struct ieee80211_sta *sta; /* station struct we're part of */
+ struct ieee80211_vif *vif; /* interface with which we're associated */
+ struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
+ struct ath_atx_ac ac[IEEE80211_NUM_ACS];
+
+ u16 maxampdu;
+ u8 mpdudensity;
+ s8 ps_key;
+
+ bool sleeping;
+ bool no_ps_filter;
+
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+ struct ath_rx_rate_stats rx_rate_stats;
+#endif
+ u8 key_idx[4];
+
+ u32 ackto;
+ struct list_head list;
+};
+
+struct ath_tx_control {
+ struct ath_txq *txq;
+ struct ath_node *an;
+ struct ieee80211_sta *sta;
+ u8 paprd;
+ bool force_channel;
+};
+
+
+/**
+ * @txq_map: Index is mac80211 queue number. This is
+ * not necessarily the same as the hardware queue number
+ * (axq_qnum).
+ */
+struct ath_tx {
+ u32 txqsetup;
+ spinlock_t txbuflock;
+ struct list_head txbuf;
+ struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
+ struct ath_descdma txdma;
+ struct ath_txq *txq_map[IEEE80211_NUM_ACS];
+ struct ath_txq *uapsdq;
+ u32 txq_max_pending[IEEE80211_NUM_ACS];
+ u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
+};
+
+struct ath_rx_edma {
+ struct sk_buff_head rx_fifo;
+ u32 rx_fifo_hwsize;
+};
+
+struct ath_rx {
+ u8 defant;
+ u8 rxotherant;
+ bool discard_next;
+ u32 *rxlink;
+ u32 num_pkts;
+ struct list_head rxbuf;
+ struct ath_descdma rxdma;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+ struct ath_rxbuf *buf_hold;
+ struct sk_buff *frag;
+
+ u32 ampdu_ref;
+};
+
+/*******************/
+/* Channel Context */
+/*******************/
+
+struct ath_chanctx {
+ struct cfg80211_chan_def chandef;
+ struct list_head vifs;
+ struct list_head acq[IEEE80211_NUM_ACS];
+ int hw_queue_base;
+
+ /* do not dereference, use for comparison only */
+ struct ieee80211_vif *primary_sta;
+
+ struct ath_beacon_config beacon;
+ struct ath9k_hw_cal_data caldata;
+ struct timespec tsf_ts;
+ u64 tsf_val;
+ u32 last_beacon;
+
+ int flush_timeout;
+ u16 txpower;
+ u16 cur_txpower;
+ bool offchannel;
+ bool stopped;
+ bool active;
+ bool assigned;
+ bool switch_after_beacon;
+
+ short nvifs;
+ short nvifs_assigned;
+ unsigned int rxfilter;
+};
+
+enum ath_chanctx_event {
+ ATH_CHANCTX_EVENT_BEACON_PREPARE,
+ ATH_CHANCTX_EVENT_BEACON_SENT,
+ ATH_CHANCTX_EVENT_TSF_TIMER,
+ ATH_CHANCTX_EVENT_BEACON_RECEIVED,
+ ATH_CHANCTX_EVENT_AUTHORIZED,
+ ATH_CHANCTX_EVENT_SWITCH,
+ ATH_CHANCTX_EVENT_ASSIGN,
+ ATH_CHANCTX_EVENT_UNASSIGN,
+ ATH_CHANCTX_EVENT_CHANGE,
+ ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL,
+};
+
+enum ath_chanctx_state {
+ ATH_CHANCTX_STATE_IDLE,
+ ATH_CHANCTX_STATE_WAIT_FOR_BEACON,
+ ATH_CHANCTX_STATE_WAIT_FOR_TIMER,
+ ATH_CHANCTX_STATE_SWITCH,
+ ATH_CHANCTX_STATE_FORCE_ACTIVE,
+};
+
+struct ath_chanctx_sched {
+ bool beacon_pending;
+ bool beacon_adjust;
+ bool offchannel_pending;
+ bool wait_switch;
+ bool force_noa_update;
+ bool extend_absence;
+ bool mgd_prepare_tx;
+ enum ath_chanctx_state state;
+ u8 beacon_miss;
+
+ u32 next_tbtt;
+ u32 switch_start_time;
+ unsigned int offchannel_duration;
+ unsigned int channel_switch_time;
+
+ /* backup, in case the hardware timer fails */
+ struct timer_list timer;
+};
+
+enum ath_offchannel_state {
+ ATH_OFFCHANNEL_IDLE,
+ ATH_OFFCHANNEL_PROBE_SEND,
+ ATH_OFFCHANNEL_PROBE_WAIT,
+ ATH_OFFCHANNEL_SUSPEND,
+ ATH_OFFCHANNEL_ROC_START,
+ ATH_OFFCHANNEL_ROC_WAIT,
+ ATH_OFFCHANNEL_ROC_DONE,
+};
+
+struct ath_offchannel {
+ struct ath_chanctx chan;
+ struct timer_list timer;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_vif *scan_vif;
+ int scan_idx;
+ enum ath_offchannel_state state;
+ struct ieee80211_channel *roc_chan;
+ struct ieee80211_vif *roc_vif;
+ int roc_duration;
+ int duration;
+};
+
+#define case_rtn_string(val) case val: return #val
+
+#define ath_for_each_chanctx(_sc, _ctx) \
+ for (ctx = &sc->chanctx[0]; \
+ ctx <= &sc->chanctx[ARRAY_SIZE(sc->chanctx) - 1]; \
+ ctx++)
+
+void ath_chanctx_init(struct ath_softc *sc);
+void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
+ struct cfg80211_chan_def *chandef);
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+
+static inline struct ath_chanctx *
+ath_chanctx_get(struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath_chanctx **ptr = (void *) ctx->drv_priv;
+ return *ptr;
+}
+
+bool ath9k_is_chanctx_enabled(void);
+void ath9k_fill_chanctx_ops(void);
+void ath9k_init_channel_context(struct ath_softc *sc);
+void ath9k_offchannel_init(struct ath_softc *sc);
+void ath9k_deinit_channel_context(struct ath_softc *sc);
+int ath9k_init_p2p(struct ath_softc *sc);
+void ath9k_deinit_p2p(struct ath_softc *sc);
+void ath9k_p2p_remove_vif(struct ath_softc *sc,
+ struct ieee80211_vif *vif);
+void ath9k_p2p_beacon_sync(struct ath_softc *sc);
+void ath9k_p2p_bss_info_changed(struct ath_softc *sc,
+ struct ieee80211_vif *vif);
+void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp,
+ struct sk_buff *skb);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx);
+void ath9k_chanctx_stop_queues(struct ath_softc *sc, struct ath_chanctx *ctx);
+void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx);
+
+void ath_chanctx_beacon_recv_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev);
+void ath_chanctx_beacon_sent_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev);
+void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
+ enum ath_chanctx_event ev);
+void ath_chanctx_set_next(struct ath_softc *sc, bool force);
+void ath_offchannel_next(struct ath_softc *sc);
+void ath_scan_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc, bool abort);
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
+
+#else
+
+static inline bool ath9k_is_chanctx_enabled(void)
+{
+ return false;
+}
+static inline void ath9k_fill_chanctx_ops(void)
+{
+}
+static inline void ath9k_init_channel_context(struct ath_softc *sc)
+{
+}
+static inline void ath9k_offchannel_init(struct ath_softc *sc)
+{
+}
+static inline void ath9k_deinit_channel_context(struct ath_softc *sc)
+{
+}
+static inline void ath_chanctx_beacon_recv_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev)
+{
+}
+static inline void ath_chanctx_beacon_sent_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev)
+{
+}
+static inline void ath_chanctx_event(struct ath_softc *sc,
+ struct ieee80211_vif *vif,
+ enum ath_chanctx_event ev)
+{
+}
+static inline int ath9k_init_p2p(struct ath_softc *sc)
+{
+ return 0;
+}
+static inline void ath9k_deinit_p2p(struct ath_softc *sc)
+{
+}
+static inline void ath9k_p2p_remove_vif(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+}
+static inline void ath9k_p2p_beacon_sync(struct ath_softc *sc)
+{
+}
+static inline void ath9k_p2p_bss_info_changed(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+}
+static inline void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp,
+ struct sk_buff *skb)
+{
+}
+static inline void ath9k_p2p_ps_timer(struct ath_softc *sc)
+{
+}
+static inline void ath9k_chanctx_wake_queues(struct ath_softc *sc,
+ struct ath_chanctx *ctx)
+{
+}
+static inline void ath9k_chanctx_stop_queues(struct ath_softc *sc,
+ struct ath_chanctx *ctx)
+{
+}
+static inline void ath_chanctx_check_active(struct ath_softc *sc,
+ struct ath_chanctx *ctx)
+{
+}
+
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
+void ath_startrecv(struct ath_softc *sc);
+bool ath_stoprecv(struct ath_softc *sc);
+u32 ath_calcrxfilter(struct ath_softc *sc);
+int ath_rx_init(struct ath_softc *sc, int nbufs);
+void ath_rx_cleanup(struct ath_softc *sc);
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
+struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
+bool ath_drain_all_txq(struct ath_softc *sc);
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
+void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_schedule_all(struct ath_softc *sc);
+int ath_tx_init(struct ath_softc *sc, int nbufs);
+int ath_txq_update(struct ath_softc *sc, int qnum,
+ struct ath9k_tx_queue_info *q);
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+void ath_tx_tasklet(struct ath_softc *sc);
+void ath_tx_edma_tasklet(struct ath_softc *sc);
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn);
+void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+
+void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
+void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ struct ath_node *an);
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+
+/********/
+/* VIFs */
+/********/
+
+#define P2P_DEFAULT_CTWIN 10
+
+struct ath_vif {
+ struct list_head list;
+
+ u16 seq_no;
+
+ /* BSS info */
+ u8 bssid[ETH_ALEN] __aligned(2);
+ u16 aid;
+ bool assoc;
+
+ struct ieee80211_vif *vif;
+ struct ath_node mcast_node;
+ int av_bslot;
+ __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
+ struct ath_buf *av_bcbuf;
+ struct ath_chanctx *chanctx;
+
+ /* P2P Client */
+ struct ieee80211_noa_data noa;
+
+ /* P2P GO */
+ u8 noa_index;
+ u32 offchannel_start;
+ u32 offchannel_duration;
+
+ /* These are used for both periodic and one-shot */
+ u32 noa_start;
+ u32 noa_duration;
+ bool periodic_noa;
+ bool oneshot_noa;
+};
+
+struct ath9k_vif_iter_data {
+ u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ bool has_hw_macaddr;
+ u8 slottime;
+ bool beacons;
+
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of WDS vifs */
+ int nadhocs; /* number of adhoc vifs */
+ struct ieee80211_vif *primary_sta;
+};
+
+void ath9k_calculate_iter_data(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath9k_vif_iter_data *iter_data);
+void ath9k_calculate_summary_state(struct ath_softc *sc,
+ struct ath_chanctx *ctx);
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif);
+
+/*******************/
+/* Beacon Handling */
+/*******************/
+
+/*
+ * Regardless of the number of beacons we stagger, (i.e. regardless of the
+ * number of BSSIDs) if a given beacon does not go out even after waiting this
+ * number of beacon intervals, the game's up.
+ */
+#define BSTUCK_THRESH 9
+#define ATH_BCBUF 8
+#define ATH_DEFAULT_BINTVAL 100 /* TU */
+#define ATH_DEFAULT_BMISS_LIMIT 10
+
+#define TSF_TO_TU(_h,_l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+struct ath_beacon {
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } updateslot; /* slot time update fsm */
+
+ u32 beaconq;
+ u32 bmisscnt;
+ struct ieee80211_vif *bslot[ATH_BCBUF];
+ int slottime;
+ int slotupdate;
+ struct ath_descdma bdma;
+ struct ath_txq *cabq;
+ struct list_head bbuf;
+
+ bool tx_processed;
+ bool tx_last;
+};
+
+void ath9k_beacon_tasklet(unsigned long data);
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed);
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_set_beacon(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
+
+/*******************/
+/* Link Monitoring */
+/*******************/
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
+#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+#define ATH_ANI_MAX_SKIP_COUNT 10
+#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
+
+void ath_tx_complete_poll_work(struct work_struct *work);
+void ath_reset_work(struct work_struct *work);
+bool ath_hw_check(struct ath_softc *sc);
+void ath_hw_pll_work(struct work_struct *work);
+void ath_paprd_calibrate(struct work_struct *work);
+void ath_ani_calibrate(unsigned long data);
+void ath_start_ani(struct ath_softc *sc);
+void ath_stop_ani(struct ath_softc *sc);
+void ath_check_ani(struct ath_softc *sc);
+int ath_update_survey_stats(struct ath_softc *sc);
+void ath_update_survey_nf(struct ath_softc *sc, int channel);
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
+void ath_ps_full_sleep(unsigned long data);
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override);
+
+/**********/
+/* BTCOEX */
+/**********/
+
+#define ATH_DUMP_BTCOEX(_s, _val) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
+ } while (0)
+
+enum bt_op_flags {
+ BT_OP_PRIORITY_DETECTED,
+ BT_OP_SCAN,
+};
+
+struct ath_btcoex {
+ spinlock_t btcoex_lock;
+ struct timer_list period_timer; /* Timer for BT period */
+ struct timer_list no_stomp_timer;
+ u32 bt_priority_cnt;
+ unsigned long bt_priority_time;
+ unsigned long op_flags;
+ int bt_stomp_type; /* Types of BT stomping */
+ u32 btcoex_no_stomp; /* in msec */
+ u32 btcoex_period; /* in msec */
+ u32 btscan_no_stomp; /* in msec */
+ u32 duty_cycle;
+ u32 bt_wait_time;
+ int rssi_count;
+ struct ath_mci_profile mci;
+ u8 stomp_audio;
+};
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+int ath9k_init_btcoex(struct ath_softc *sc);
+void ath9k_deinit_btcoex(struct ath_softc *sc);
+void ath9k_start_btcoex(struct ath_softc *sc);
+void ath9k_stop_btcoex(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
+u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size);
+#else
+static inline int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ return 0;
+}
+static inline void ath9k_deinit_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_start_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_stop_btcoex(struct ath_softc *sc)
+{
+}
+static inline void ath9k_btcoex_handle_interrupt(struct ath_softc *sc,
+ u32 status)
+{
+}
+static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
+ u32 max_4ms_framelen)
+{
+ return 0;
+}
+static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+}
+static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+/********************/
+/* LED Control */
+/********************/
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 8
+#define ATH_LED_PIN_9300 10
+#define ATH_LED_PIN_9485 6
+#define ATH_LED_PIN_9462 4
+
+#ifdef CONFIG_MAC80211_LEDS
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+void ath_fill_led_pin(struct ath_softc *sc);
+#else
+static inline void ath_init_leds(struct ath_softc *sc)
+{
+}
+
+static inline void ath_deinit_leds(struct ath_softc *sc)
+{
+}
+static inline void ath_fill_led_pin(struct ath_softc *sc)
+{
+}
+#endif
+
+/************************/
+/* Wake on Wireless LAN */
+/************************/
+
+#ifdef CONFIG_ATH9K_WOW
+void ath9k_init_wow(struct ieee80211_hw *hw);
+void ath9k_deinit_wow(struct ieee80211_hw *hw);
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath9k_resume(struct ieee80211_hw *hw);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+#else
+static inline void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+}
+static inline void ath9k_deinit_wow(struct ieee80211_hw *hw)
+{
+}
+static inline int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ return 0;
+}
+static inline int ath9k_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+static inline void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+}
+#endif /* CONFIG_ATH9K_WOW */
+
+/*******************************/
+/* Antenna diversity/combining */
+/*******************************/
+
+#define ATH_ANT_RX_CURRENT_SHIFT 4
+#define ATH_ANT_RX_MAIN_SHIFT 2
+#define ATH_ANT_RX_MASK 0x3
+
+#define ATH_ANT_DIV_COMB_SHORT_SCAN_INTR 50
+#define ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT 0x100
+#define ATH_ANT_DIV_COMB_MAX_PKTCOUNT 0x200
+#define ATH_ANT_DIV_COMB_INIT_COUNT 95
+#define ATH_ANT_DIV_COMB_MAX_COUNT 100
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
+
+#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
+#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
+#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
+
+struct ath_ant_comb {
+ u16 count;
+ u16 total_pkt_count;
+ bool scan;
+ bool scan_not_start;
+ int main_total_rssi;
+ int alt_total_rssi;
+ int alt_recv_cnt;
+ int main_recv_cnt;
+ int rssi_lna1;
+ int rssi_lna2;
+ int rssi_add;
+ int rssi_sub;
+ int rssi_first;
+ int rssi_second;
+ int rssi_third;
+ int ant_ratio;
+ int ant_ratio2;
+ bool alt_good;
+ int quick_scan_cnt;
+ enum ath9k_ant_div_comb_lna_conf main_conf;
+ enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
+ enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
+ bool first_ratio;
+ bool second_ratio;
+ unsigned long scan_start_time;
+
+ /*
+ * Card-specific config values.
+ */
+ int low_rssi_thresh;
+ int fast_div_bias;
+};
+
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
+
+/********************/
+/* Main driver core */
+/********************/
+
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_CUS252 0x0008
+#define ATH9K_PCI_WOW 0x0010
+#define ATH9K_PCI_BT_ANT_DIV 0x0020
+#define ATH9K_PCI_D3_L1_WAR 0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
+#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200
+#define ATH9K_PCI_KILLER 0x0400
+#define ATH9K_PCI_LED_ACT_HI 0x0800
+
+/*
+ * Default cache line size, in bytes.
+ * Used when PCI device not fully initialized by bootrom/BIOS
+*/
+#define DEFAULT_CACHELINE 32
+#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
+#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
+#define MAX_GTT_CNT 5
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_WAIT_FOR_ANI BIT(5)
+
+#define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */
+
+struct ath_softc {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ struct survey_info *cur_survey;
+ struct survey_info survey[ATH9K_NUM_CHANNELS];
+
+ struct tasklet_struct intr_tq;
+ struct tasklet_struct bcon_tasklet;
+ struct ath_hw *sc_ah;
+ void __iomem *mem;
+ int irq;
+ spinlock_t sc_serial_rw;
+ spinlock_t sc_pm_lock;
+ spinlock_t sc_pcu_lock;
+ struct mutex mutex;
+ struct work_struct paprd_work;
+ struct work_struct hw_reset_work;
+ struct completion paprd_complete;
+ wait_queue_head_t tx_wait;
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ struct work_struct chanctx_work;
+ struct ath_gen_timer *p2p_ps_timer;
+ struct ath_vif *p2p_ps_vif;
+ struct ath_chanctx_sched sched;
+ struct ath_offchannel offchannel;
+ struct ath_chanctx *next_chan;
+ struct completion go_beacon;
+#endif
+
+ unsigned long driver_data;
+
+ u8 gtt_cnt;
+ u32 intrstatus;
+ u16 ps_flags; /* PS_* */
+ bool ps_enabled;
+ bool ps_idle;
+ short nbcnvifs;
+ unsigned long ps_usecount;
+
+ struct ath_rx rx;
+ struct ath_tx tx;
+ struct ath_beacon beacon;
+
+ struct cfg80211_chan_def cur_chandef;
+ struct ath_chanctx chanctx[ATH9K_NUM_CHANCTX];
+ struct ath_chanctx *cur_chan;
+ spinlock_t chan_lock;
+
+#ifdef CONFIG_MAC80211_LEDS
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+#endif
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct delayed_work tx_complete_work;
+ struct delayed_work hw_pll_work;
+ struct timer_list sleep_timer;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ struct ath_btcoex btcoex;
+ struct ath_mci_coex mci_coex;
+ struct work_struct mci_work;
+#endif
+
+ struct ath_descdma txsdma;
+
+ struct ath_ant_comb ant_comb;
+ u8 ant_tx, ant_rx;
+ struct dfs_pattern_detector *dfs_detector;
+ u64 dfs_prev_pulse_ts;
+ u32 wow_enabled;
+
+ struct ath_spec_scan_priv spec_priv;
+
+ struct ieee80211_vif *tx99_vif;
+ struct sk_buff *tx99_skb;
+ bool tx99_state;
+ s16 tx99_power;
+
+#ifdef CONFIG_ATH9K_WOW
+ u32 wow_intr_before_sleep;
+ bool force_wow;
+#endif
+};
+
+/********/
+/* TX99 */
+/********/
+
+#ifdef CONFIG_ATH9K_TX99
+void ath9k_tx99_init_debug(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl);
+#else
+static inline void ath9k_tx99_init_debug(struct ath_softc *sc)
+{
+}
+static inline int ath9k_tx99_send(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ return 0;
+}
+#endif /* CONFIG_ATH9K_TX99 */
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_tasklet(unsigned long data);
+int ath_cabq_update(struct ath_softc *);
+u8 ath9k_parse_mpdudensity(u8 mpdudensity);
+irqreturn_t ath_isr(int irq, void *dev);
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan);
+void ath_cancel_work(struct ath_softc *sc);
+void ath_restart_work(struct ath_softc *sc);
+int ath9k_init_device(u16 devid, struct ath_softc *sc,
+ const struct ath_bus_ops *bus_ops);
+void ath9k_deinit_device(struct ath_softc *sc);
+void ath9k_reload_chainmask_settings(struct ath_softc *sc);
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+void ath_start_rfkill_poll(struct ath_softc *sc);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_ps_wakeup(struct ath_softc *sc);
+void ath9k_ps_restore(struct ath_softc *sc);
+
+#ifdef CONFIG_ATH9K_PCI
+int ath_pci_init(void);
+void ath_pci_exit(void);
+#else
+static inline int ath_pci_init(void) { return 0; };
+static inline void ath_pci_exit(void) {};
+#endif
+
+#ifdef CONFIG_ATH9K_AHB
+int ath_ahb_init(void);
+void ath_ahb_exit(void);
+#else
+static inline int ath_ahb_init(void) { return 0; };
+static inline void ath_ahb_exit(void) {};
+#endif
+
+#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
new file mode 100644
index 000000000..f50a6bc5d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "ath9k.h"
+
+#define FUDGE 2
+
+static void ath9k_reset_beacon_status(struct ath_softc *sc)
+{
+ sc->beacon.tx_processed = false;
+ sc->beacon.tx_last = false;
+}
+
+/*
+ * This function will modify certain transmit queue properties depending on
+ * the operating mode of the station (AP or AdHoc). Parameters are AIFS
+ * settings and channel width min/max
+*/
+static void ath9k_beaconq_config(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi, qi_be;
+ struct ath_txq *txq;
+
+ ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
+ sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ /* Always burst out beacon and CAB traffic. */
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ } else {
+ /* Adhoc mode; important thing is to use 2x cwmin. */
+ txq = sc->tx.txq_map[IEEE80211_AC_BE];
+ ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
+ qi.tqi_aifs = qi_be.tqi_aifs;
+ if (ah->slottime == ATH9K_SLOT_TIME_20)
+ qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
+ else
+ qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+ qi.tqi_cwmax = qi_be.tqi_cwmax;
+ }
+
+ if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
+ ath_err(common, "Unable to update h/w beacon queue parameters\n");
+ } else {
+ ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
+ }
+}
+
+/*
+ * Associates the beacon frame buffer with a transmit descriptor. Will set
+ * up rate codes, and channel flags. Beacons are always sent out at the
+ * lowest rate, and are not retried.
+*/
+static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
+ struct ath_buf *bf, int rateidx)
+{
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_info info;
+ struct ieee80211_supported_band *sband;
+ u8 chainmask = ah->txchainmask;
+ u8 i, rate = 0;
+
+ sband = &common->sbands[sc->cur_chandef.chan->band];
+ rate = sband->bitrates[rateidx].hw_value;
+ if (vif->bss_conf.use_short_preamble)
+ rate |= sband->bitrates[rateidx].hw_value_short;
+
+ memset(&info, 0, sizeof(info));
+ info.pkt_len = skb->len + FCS_LEN;
+ info.type = ATH9K_PKT_TYPE_BEACON;
+ for (i = 0; i < 4; i++)
+ info.txpower[i] = MAX_RATE_POWER;
+ info.keyix = ATH9K_TXKEYIX_INVALID;
+ info.keytype = ATH9K_KEY_TYPE_CLEAR;
+ info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
+
+ info.buf_addr[0] = bf->bf_buf_addr;
+ info.buf_len[0] = roundup(skb->len, 4);
+
+ info.is_first = true;
+ info.is_last = true;
+
+ info.qcu = sc->beacon.beaconq;
+
+ info.rates[0].Tries = 1;
+ info.rates[0].Rate = rate;
+ info.rates[0].ChSel = ath_txchainmask_reduction(sc, chainmask, rate);
+
+ ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
+}
+
+static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_buf *bf;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct sk_buff *skb;
+ struct ath_txq *cabq = sc->beacon.cabq;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_mgmt *mgmt_hdr;
+ int cabq_depth;
+
+ if (avp->av_bcbuf == NULL)
+ return NULL;
+
+ bf = avp->av_bcbuf;
+ skb = bf->bf_mpdu;
+ if (skb) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
+ }
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb == NULL)
+ return NULL;
+
+ bf->bf_mpdu = skb;
+
+ mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
+ mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ ath_assign_seq(common, skb);
+
+ if (vif->p2p)
+ ath9k_beacon_add_noa(sc, avp, skb);
+
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
+ ath_err(common, "dma_mapping_error on beaconing\n");
+ return NULL;
+ }
+
+ skb = ieee80211_get_buffered_bc(hw, vif);
+
+ /*
+ * if the CABQ traffic from previous DTIM is pending and the current
+ * beacon is also a DTIM.
+ * 1) if there is only one vif let the cab traffic continue.
+ * 2) if there are more than one vif and we are using staggered
+ * beacons, then drain the cabq by dropping all the frames in
+ * the cabq so that the current vifs cab traffic can be scheduled.
+ */
+ spin_lock_bh(&cabq->axq_lock);
+ cabq_depth = cabq->axq_depth;
+ spin_unlock_bh(&cabq->axq_lock);
+
+ if (skb && cabq_depth) {
+ if (sc->cur_chan->nvifs > 1) {
+ ath_dbg(common, BEACON,
+ "Flushing previous cabq traffic\n");
+ ath_draintxq(sc, cabq);
+ }
+ }
+
+ ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
+
+ if (skb)
+ ath_tx_cabq(hw, vif, skb);
+
+ return bf;
+}
+
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ int slot;
+
+ avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf, struct ath_buf, list);
+ list_del(&avp->av_bcbuf->list);
+
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot] == NULL) {
+ avp->av_bslot = slot;
+ break;
+ }
+ }
+
+ sc->beacon.bslot[avp->av_bslot] = vif;
+ sc->nbcnvifs++;
+
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->av_bslot);
+}
+
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_buf *bf = avp->av_bcbuf;
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+
+ ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
+ avp->av_bslot);
+
+ tasklet_disable(&sc->bcon_tasklet);
+
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+
+ if (bf && bf->bf_mpdu) {
+ struct sk_buff *skb = bf->bf_mpdu;
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
+ }
+
+ avp->av_bcbuf = NULL;
+ sc->beacon.bslot[avp->av_bslot] = NULL;
+ sc->nbcnvifs--;
+ list_add_tail(&bf->list, &sc->beacon.bbuf);
+
+ tasklet_enable(&sc->bcon_tasklet);
+}
+
+static int ath9k_beacon_choose_slot(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+ u16 intval;
+ u32 tsftu;
+ u64 tsf;
+ int slot;
+
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_AP &&
+ sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) {
+ ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
+ ath9k_hw_gettsf64(sc->sc_ah));
+ return 0;
+ }
+
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf += TU_TO_USEC(sc->sc_ah->config.sw_beacon_response_time);
+ tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
+ slot = (tsftu % (intval * ATH_BCBUF)) / intval;
+
+ ath_dbg(common, BEACON, "slot: %d tsf: %llu tsftu: %u\n",
+ slot, tsf, tsftu / ATH_BCBUF);
+
+ return slot;
+}
+
+static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_beacon_config *cur_conf = &avp->chanctx->beacon;
+ u32 tsfadjust;
+
+ if (avp->av_bslot == 0)
+ return;
+
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+ tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(tsfadjust);
+
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
+ (unsigned long long)tsfadjust, avp->av_bslot);
+}
+
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ if (!vif || !vif->csa_active)
+ return false;
+
+ if (!ieee80211_csa_is_complete(vif))
+ return false;
+
+ ieee80211_csa_finish(vif);
+ return true;
+}
+
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif, sc);
+}
+
+void ath9k_beacon_tasklet(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_buf *bf = NULL;
+ struct ieee80211_vif *vif;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int slot;
+
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
+ ath_dbg(common, RESET,
+ "reset work is pending, skip beaconing now\n");
+ return;
+ }
+
+ /*
+ * Check if the previous beacon has gone out. If
+ * not don't try to post another, skip this period
+ * and wait for the next. Missed beacons indicate
+ * a problem and should not occur. If we miss too
+ * many consecutive beacons reset the device.
+ */
+ if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
+ sc->beacon.bmisscnt++;
+
+ ath9k_hw_check_nav(ah);
+
+ /*
+ * If the previous beacon has not been transmitted
+ * and a MAC/BB hang has been identified, return
+ * here because a chip reset would have been
+ * initiated.
+ */
+ if (!ath_hw_check(sc))
+ return;
+
+ if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
+ ath_dbg(common, BSTUCK,
+ "missed %u consecutive beacons\n",
+ sc->beacon.bmisscnt);
+ ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
+ if (sc->beacon.bmisscnt > 3)
+ ath9k_hw_bstuck_nfcal(ah);
+ } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
+ ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
+ sc->beacon.bmisscnt = 0;
+ ath9k_queue_reset(sc, RESET_TYPE_BEACON_STUCK);
+ }
+
+ return;
+ }
+
+ slot = ath9k_beacon_choose_slot(sc);
+ vif = sc->beacon.bslot[slot];
+
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma) {
+ if (ath9k_is_chanctx_enabled()) {
+ ath_chanctx_beacon_sent_ev(sc,
+ ATH_CHANCTX_EVENT_BEACON_SENT);
+ }
+
+ if (ath9k_csa_is_finished(sc, vif))
+ return;
+ }
+
+ if (!vif || !vif->bss_conf.enable_beacon)
+ return;
+
+ if (ath9k_is_chanctx_enabled()) {
+ ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE);
+ }
+
+ bf = ath9k_beacon_generate(sc->hw, vif);
+
+ if (sc->beacon.bmisscnt != 0) {
+ ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
+ sc->beacon.bmisscnt);
+ sc->beacon.bmisscnt = 0;
+ }
+
+ /*
+ * Handle slot time change when a non-ERP station joins/leaves
+ * an 11g network. The 802.11 layer notifies us via callback,
+ * we mark updateslot, then wait one beacon before effecting
+ * the change. This gives associated stations at least one
+ * beacon interval to note the state change.
+ *
+ * NB: The slot time change state machine is clocked according
+ * to whether we are bursting or staggering beacons. We
+ * recognize the request to update and record the current
+ * slot then don't transition until that slot is reached
+ * again. If we miss a beacon for that slot then we'll be
+ * slow to transition but we'll be sure at least one beacon
+ * interval has passed. When bursting slot is always left
+ * set to ATH_BCBUF so this check is a noop.
+ */
+ if (sc->beacon.updateslot == UPDATE) {
+ sc->beacon.updateslot = COMMIT;
+ sc->beacon.slotupdate = slot;
+ } else if (sc->beacon.updateslot == COMMIT &&
+ sc->beacon.slotupdate == slot) {
+ ah->slottime = sc->beacon.slottime;
+ ath9k_hw_init_global_settings(ah);
+ sc->beacon.updateslot = OK;
+ }
+
+ if (bf) {
+ ath9k_reset_beacon_status(sc);
+
+ ath_dbg(common, BEACON,
+ "Transmitting beacon for slot: %d\n", slot);
+
+ /* NB: cabq traffic should already be queued and primed */
+ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
+
+ if (!edma)
+ ath9k_hw_txstart(ah, sc->beacon.beaconq);
+ }
+}
+
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
+ u32 intval, bool reset_tsf)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath9k_hw_disable_interrupts(ah);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
+ ath9k_beaconq_config(sc);
+ ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ sc->beacon.bmisscnt = 0;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+}
+
+/*
+ * For multi-bss ap support beacons are either staggered evenly over N slots or
+ * burst together. For the former arrange for the SWBA to be delivered for each
+ * slot. Slots that are not occupied will generate nothing.
+ */
+static void ath9k_beacon_config_ap(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
+}
+
+static void ath9k_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf)
+{
+ struct ath9k_beacon_state bs;
+
+ if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
+ return;
+
+ ath9k_hw_disable_interrupts(ah);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ ah->imask |= ATH9K_INT_BMISS;
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+}
+
+static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath9k_reset_beacon_status(sc);
+
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
+
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
+
+ /*
+ * Set the global 'beacon has been configured' flag for the
+ * joiner case in IBSS mode.
+ */
+ if (!conf->ibss_creator && conf->enable_beacon)
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
+}
+
+static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ if (ath9k_is_chanctx_enabled()) {
+ /*
+ * If the VIF is not present in the current channel context,
+ * then we can't do the usual opmode checks. Allow the
+ * beacon config for the VIF to be updated in this case and
+ * return immediately.
+ */
+ if (sc->cur_chan != avp->chanctx)
+ return true;
+ }
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+ if (vif->type != NL80211_IFTYPE_AP) {
+ ath_dbg(common, CONFIG,
+ "An AP interface is already present !\n");
+ return false;
+ }
+ }
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ if ((vif->type == NL80211_IFTYPE_STATION) &&
+ test_bit(ATH_OP_BEACONS, &common->op_flags) &&
+ vif != sc->cur_chan->primary_sta) {
+ ath_dbg(common, CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath9k_cache_beacon_config(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &ctx->beacon;
+
+ ath_dbg(common, BEACON,
+ "Caching beacon data for BSS: %pM\n", bss_conf->bssid);
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->dtim_count = 1;
+ cur_conf->ibss_creator = bss_conf->ibss_creator;
+
+ /*
+ * It looks like mac80211 may end up using beacon interval of zero in
+ * some cases (at least for mesh point). Avoid getting into an
+ * infinite loop by using a bit safer value instead. To be safe,
+ * do sanity check on beacon interval for all operating modes.
+ */
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ /*
+ * We don't parse dtim period from mac80211 during the driver
+ * initialization as it breaks association with hidden-ssid
+ * AP and it causes latency in roaming
+ */
+ if (cur_conf->dtim_period == 0)
+ cur_conf->dtim_period = 1;
+
+}
+
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed)
+{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_chanctx *ctx = avp->chanctx;
+ struct ath_beacon_config *cur_conf;
+ unsigned long flags;
+ bool skip_beacon = false;
+
+ if (!ctx)
+ return;
+
+ cur_conf = &avp->chanctx->beacon;
+ if (vif->type == NL80211_IFTYPE_AP)
+ ath9k_set_tsfadjust(sc, vif);
+
+ if (!ath9k_allow_beacon_config(sc, vif))
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ ath9k_cache_beacon_config(sc, ctx, bss_conf);
+ if (ctx != sc->cur_chan)
+ return;
+
+ ath9k_set_beacon(sc);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
+ return;
+ }
+
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ bool enabled = cur_conf->enable_beacon;
+
+ if (!bss_conf->enable_beacon) {
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+ } else {
+ cur_conf->enable_beacon |= BIT(avp->av_bslot);
+ if (!enabled)
+ ath9k_cache_beacon_config(sc, ctx, bss_conf);
+ }
+ }
+
+ if (ctx != sc->cur_chan)
+ return;
+
+ /*
+ * Configure the HW beacon registers only when we have a valid
+ * beacon interval.
+ */
+ if (cur_conf->beacon_interval) {
+ /*
+ * If we are joining an existing IBSS network, start beaconing
+ * only after a TSF-sync has taken place. Ensure that this
+ * happens by setting the appropriate flags.
+ */
+ if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
+ bss_conf->enable_beacon) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ skip_beacon = true;
+ } else {
+ ath9k_set_beacon(sc);
+ }
+
+ /*
+ * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
+ * here, it is done in ath9k_beacon_config_adhoc().
+ */
+ if (cur_conf->enable_beacon && !skip_beacon)
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
+ else
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
+ }
+}
+
+void ath9k_set_beacon(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+
+ switch (sc->sc_ah->opmode) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ ath9k_beacon_config_ap(sc, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_beacon_config_adhoc(sc, cur_conf);
+ break;
+ case NL80211_IFTYPE_STATION:
+ ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
+ break;
+ default:
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
new file mode 100644
index 000000000..5a084d94e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+
+enum ath_bt_mode {
+ ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
+ ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
+ ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
+ ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
+};
+
+struct ath_btcoex_config {
+ u8 bt_time_extend;
+ bool bt_txstate_extend;
+ bool bt_txframe_extend;
+ enum ath_bt_mode bt_mode; /* coexistence mode */
+ bool bt_quiet_collision;
+ bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
+ u8 bt_priority_time;
+ u8 bt_first_slot_time;
+ bool bt_hold_rx_clear;
+};
+
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+ { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+ { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+ { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+ { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+ { 0xffffff01, 0xffffffff, 0xffffff01, 0xffffffff }, /* STOMP_AUDIO */
+};
+
+void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ const struct ath_btcoex_config ath_bt_config = {
+ .bt_time_extend = 0,
+ .bt_txstate_extend = true,
+ .bt_txframe_extend = true,
+ .bt_mode = ATH_BT_COEX_MODE_SLOTTED,
+ .bt_quiet_collision = true,
+ .bt_rxclear_polarity = true,
+ .bt_priority_time = 2,
+ .bt_first_slot_time = 5,
+ .bt_hold_rx_clear = true,
+ };
+ bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
+
+ btcoex_hw->bt_coex_mode =
+ (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
+ SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
+ SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
+ SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
+ SM(ath_bt_config.bt_mode, AR_BT_MODE) |
+ SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
+ SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
+ SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
+ SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
+ SM(qnum, AR_BT_QCU_THRESH);
+
+ btcoex_hw->bt_coex_mode2 =
+ SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
+ SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
+ AR_BT_DISABLE_BT_ANT;
+}
+EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
+
+void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ /*
+ * Check if BTCOEX is globally disabled.
+ */
+ if (!common->btcoex_enabled) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
+ return;
+ }
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ } else if (AR_SREV_9300_20_OR_LATER(ah)) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
+ btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
+ btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
+ btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
+ btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
+
+ if (AR_SREV_9285(ah)) {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
+ btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
+ } else {
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
+ }
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
+
+void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ /* connect bt_active to baseband */
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
+ AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
+
+ /* Set input mux for bt_active to gpio pin */
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
+
+ /* Configure the desired gpio port for input */
+ ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
+
+void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ /* btcoex 3-wire */
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
+ AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
+
+ /* Set input mux for bt_prority_async and
+ * bt_active_async to GPIO pins */
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
+
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+ btcoex_hw->btpriority_gpio);
+
+ /* Configure the desired GPIO ports for input */
+
+ ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+ ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
+
+void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
+{
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = (AR_SREV_9462(ah)) ? 0x2201 : 0xa4c1;
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
+
+static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ /* Configure the desired GPIO port for TX_FRAME output */
+ ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+ AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
+}
+
+/*
+ * For AR9002, bt_weight/wlan_weight are used.
+ * For AR9003 and above, stomp_type is used.
+ */
+void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
+ u32 bt_weight,
+ u32 wlan_weight,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u8 txprio_shift[] = { 24, 16, 16, 0 }; /* tx priority weight */
+ bool concur_tx = (mci_hw->concur_tx && btcoex_hw->tx_prio[stomp_type]);
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
+ int i;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ btcoex_hw->bt_coex_weights =
+ SM(bt_weight, AR_BTCOEX_BT_WGHT) |
+ SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+ return;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ enum ath_stomp_type stype =
+ ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex_hw->mci.stomp_ftp) ?
+ ATH_BTCOEX_STOMP_LOW_FTP : stomp_type;
+ weight = mci_wlan_weights[stype];
+ }
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex_hw->wlan_weight[i] = weight[i];
+ if (concur_tx && i) {
+ btcoex_hw->wlan_weight[i] &=
+ ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i] |=
+ (btcoex_hw->tx_prio[stomp_type] <<
+ txprio_shift[i-1]);
+ }
+ }
+ /* Last WLAN weight has to be adjusted wrt tx priority */
+ if (concur_tx) {
+ btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
+ << txprio_shift[i-1]);
+ }
+
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
+
+
+static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ u32 val;
+ int i;
+
+ /*
+ * Program coex mode and weight registers to
+ * enable coex 3-wire
+ */
+ REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+ btcoex->bt_weight[i]);
+ } else
+ REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
+
+
+
+ if (AR_SREV_9271(ah)) {
+ val = REG_READ(ah, 0x50040);
+ val &= 0xFFFFFEFF;
+ REG_WRITE(ah, 0x50040, val);
+ }
+
+ REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
+ AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
+}
+
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex->wlan_weight[i]);
+
+ REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ btcoex->enabled = true;
+}
+
+static void ath9k_hw_btcoex_disable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex_hw->wlan_weight[i]);
+}
+
+void ath9k_hw_btcoex_enable(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ switch (ath9k_hw_get_btcoex_scheme(ah)) {
+ case ATH_BTCOEX_CFG_NONE:
+ return;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_enable_2wire(ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_enable_3wire(ah);
+ break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath9k_hw_btcoex_enable_mci(ah);
+ break;
+ }
+
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+ REG_RMW(ah, AR_GPIO_PDPU,
+ (0x2 << (btcoex_hw->btactive_gpio * 2)),
+ (0x3 << (btcoex_hw->btactive_gpio * 2)));
+ }
+
+ ah->btcoex_hw.enabled = true;
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
+
+void ath9k_hw_btcoex_disable(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ btcoex_hw->enabled = false;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) {
+ ath9k_hw_btcoex_disable_mci(ah);
+ return;
+ }
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
+
+ ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
+ REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
+ REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
+ } else
+ REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
+
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
+ enum ath_stomp_type stomp_type)
+{
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
+ return;
+ }
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT, 0);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT, 0);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT, 0);
+ break;
+ default:
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
+ break;
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
+
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ btcoex->tx_prio[i] = stomp_txprio[i];
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_concur_txprio);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
new file mode 100644
index 000000000..cd2f0a237
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BTCOEX_H
+#define BTCOEX_H
+
+#include "hw.h"
+
+#define ATH_WLANACTIVE_GPIO_9280 5
+#define ATH_BTACTIVE_GPIO_9280 6
+#define ATH_BTPRIORITY_GPIO_9285 7
+
+#define ATH_WLANACTIVE_GPIO_9300 5
+#define ATH_BTACTIVE_GPIO_9300 4
+#define ATH_BTPRIORITY_GPIO_9300 8
+
+#define ATH_BTCOEX_DEF_BT_PERIOD 45
+#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
+#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
+#define ATH_BTCOEX_BMISS_THRESH 50
+
+#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
+#define ATH_BT_CNT_THRESHOLD 3
+#define ATH_BT_CNT_SCAN_THRESHOLD 15
+
+#define ATH_BTCOEX_RX_WAIT_TIME 100
+#define ATH_BTCOEX_STOMP_FTP_THRESH 5
+
+#define ATH_BTCOEX_HT20_MAX_TXPOWER 0x14
+#define ATH_BTCOEX_HT40_MAX_TXPOWER 0x10
+
+#define AR9300_NUM_BT_WEIGHTS 4
+#define AR9300_NUM_WLAN_WEIGHTS 4
+
+#define ATH_AIC_MAX_BT_CHANNEL 79
+
+/* Defines the BT AR_BT_COEX_WGHT used */
+enum ath_stomp_type {
+ ATH_BTCOEX_STOMP_ALL,
+ ATH_BTCOEX_STOMP_LOW,
+ ATH_BTCOEX_STOMP_NONE,
+ ATH_BTCOEX_STOMP_LOW_FTP,
+ ATH_BTCOEX_STOMP_AUDIO,
+ ATH_BTCOEX_STOMP_MAX
+};
+
+enum ath_btcoex_scheme {
+ ATH_BTCOEX_CFG_NONE,
+ ATH_BTCOEX_CFG_2WIRE,
+ ATH_BTCOEX_CFG_3WIRE,
+ ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+ u32 raw_intr;
+ u32 rx_msg_intr;
+ u32 cont_status;
+ u32 gpm_addr;
+ u32 gpm_len;
+ u32 gpm_idx;
+ u32 sched_addr;
+ u32 wlan_channels[4];
+ u32 wlan_cal_seq;
+ u32 wlan_cal_done;
+ u32 config;
+ u8 *gpm_buf;
+ bool ready;
+ bool update_2g5g;
+ bool is_2g;
+ bool query_bt;
+ bool unhalt_bt_gpm; /* need send UNHALT */
+ bool halted_bt_gpm; /* HALT sent */
+ bool need_flush_btinfo;
+ bool bt_version_known;
+ bool wlan_channels_update;
+ u8 wlan_ver_major;
+ u8 wlan_ver_minor;
+ u8 bt_ver_major;
+ u8 bt_ver_minor;
+ u8 bt_state;
+ u8 stomp_ftp;
+ bool concur_tx;
+ u32 last_recovery;
+};
+
+struct ath9k_hw_aic {
+ bool aic_enabled;
+ u8 aic_cal_state;
+ u8 aic_caled_chan;
+ u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
+ u32 aic_cal_start_time;
+};
+
+struct ath_btcoex_hw {
+ enum ath_btcoex_scheme scheme;
+ struct ath9k_hw_mci mci;
+ struct ath9k_hw_aic aic;
+ bool enabled;
+ u8 wlanactive_gpio;
+ u8 btactive_gpio;
+ u8 btpriority_gpio;
+ u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
+ u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
+ u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+ u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
+ u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
+};
+
+void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
+void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
+void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
+ u32 bt_weight,
+ u32 wlan_weight,
+ enum ath_stomp_type stomp_type);
+void ath9k_hw_btcoex_disable(struct ath_hw *ah);
+void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
+ enum ath_stomp_type stomp_type);
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
new file mode 100644
index 000000000..3e2e24e48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include <linux/export.h>
+
+/* Common calibration code */
+
+
+static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
+{
+ int16_t nfval;
+ int16_t sort[ATH9K_NF_CAL_HIST_MAX];
+ int i, j;
+
+ for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
+ sort[i] = nfCalBuffer[i];
+
+ for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
+ for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
+ if (sort[j] > sort[j - 1]) {
+ nfval = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = nfval;
+ }
+ }
+ }
+ nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
+
+ return nfval;
+}
+
+static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_nf_limits *limit;
+
+ if (!chan || IS_CHAN_2GHZ(chan))
+ limit = &ah->nf_2g;
+ else
+ limit = &ah->nf_5g;
+
+ return limit;
+}
+
+static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_get_nf_limits(ah, chan)->nominal;
+}
+
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf)
+{
+ s8 noise = ATH_DEFAULT_NOISE_FLOOR;
+
+ if (nf) {
+ s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
+ ath9k_hw_get_default_nf(ah, chan);
+ if (delta > 0)
+ noise += delta;
+ }
+ return noise;
+}
+EXPORT_SYMBOL(ath9k_hw_getchan_noise);
+
+static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *cal,
+ int16_t *nfarray)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_nf_limits *limit;
+ struct ath9k_nfcal_hist *h;
+ bool high_nf_mid = false;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ int i;
+
+ h = cal->nfCalHist;
+ limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!(chainmask & (1 << i)) ||
+ ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(ah->curchan)))
+ continue;
+
+ h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
+
+ if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
+ h[i].currIndex = 0;
+
+ if (h[i].invalidNFcount > 0) {
+ h[i].invalidNFcount--;
+ h[i].privNF = nfarray[i];
+ } else {
+ h[i].privNF =
+ ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
+ }
+
+ if (!h[i].privNF)
+ continue;
+
+ if (h[i].privNF > limit->max) {
+ high_nf_mid = true;
+
+ ath_dbg(common, CALIBRATE,
+ "NFmid[%d] (%d) > MAX (%d), %s\n",
+ i, h[i].privNF, limit->max,
+ (test_bit(NFCAL_INTF, &cal->cal_flags) ?
+ "not corrected (due to interference)" :
+ "correcting to MAX"));
+
+ /*
+ * Normally we limit the average noise floor by the
+ * hardware specific maximum here. However if we have
+ * encountered stuck beacons because of interference,
+ * we bypass this limit here in order to better deal
+ * with our environment.
+ */
+ if (!test_bit(NFCAL_INTF, &cal->cal_flags))
+ h[i].privNF = limit->max;
+ }
+ }
+
+ /*
+ * If the noise floor seems normal for all chains, assume that
+ * there is no significant interference in the environment anymore.
+ * Re-enable the enforcement of the NF maximum again.
+ */
+ if (!high_nf_mid)
+ clear_bit(NFCAL_INTF, &cal->cal_flags);
+}
+
+static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
+ enum ieee80211_band band,
+ int16_t *nft)
+{
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
+ break;
+ case IEEE80211_BAND_2GHZ:
+ *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
+ break;
+ default:
+ BUG_ON(1);
+ return false;
+ }
+
+ return true;
+}
+
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ int i;
+
+ ath9k_hw_setup_calibration(ah, currCal);
+
+ currCal->calState = CAL_RUNNING;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->meas0.sign[i] = 0;
+ ah->meas1.sign[i] = 0;
+ ah->meas2.sign[i] = 0;
+ ah->meas3.sign[i] = 0;
+ }
+
+ ah->cal_samples = 0;
+}
+
+/* This is done for the currently configured channel */
+bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ if (!ah->caldata)
+ return true;
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
+ return true;
+
+ if (currCal == NULL)
+ return true;
+
+ if (currCal->calState != CAL_DONE) {
+ ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
+ currCal->calState);
+ return true;
+ }
+
+ if (!(ah->supp_cals & currCal->calData->calType))
+ return true;
+
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
+ currCal->calData->calType, ah->curchan->chan->center_freq);
+
+ ah->caldata->CalValid &= ~currCal->calData->calType;
+ currCal->calState = CAL_WAITING;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
+
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
+{
+ if (ah->caldata)
+ set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
+
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+
+ if (update)
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ else
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+}
+
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h = NULL;
+ unsigned i, j;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ struct ath_common *common = ath9k_hw_common(ah);
+ s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+
+ if (ah->caldata)
+ h = ah->caldata->nfCalHist;
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ s16 nfval;
+
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
+ continue;
+
+ if (h)
+ nfval = h[i].privNF;
+ else
+ nfval = default_nf;
+
+ REG_RMW(ah, ah->nf_regs[i],
+ (((u32) nfval << 1) & 0x1ff), 0x1ff);
+ }
+ }
+
+ /*
+ * Load software filtered NF value into baseband internal minCCApwr
+ * variable.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ /*
+ * Wait for load to complete, should be fast, a few 10s of us.
+ * The max delay was changed from an original 250us to 10000us
+ * since 250us often results in NF load timeout and causes deaf
+ * condition during stress testing 12/12/2009
+ */
+ for (j = 0; j < 10000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * We timed out waiting for the noisefloor to load, probably due to an
+ * in-progress rx. Simply return here and allow the load plenty of time
+ * to complete before the next calibration interval. We need to avoid
+ * trying to load -50 (which happens below) while the previous load is
+ * still in progress as this can cause rx deafness. Instead by returning
+ * here, the baseband nf cal will just be capped by our present
+ * noisefloor until the next calibration timer.
+ */
+ if (j == 10000) {
+ ath_dbg(common, ANY,
+ "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Restore maxCCAPower register parameter again so that we're not capped
+ * by the median we just loaded. This will be initial (and max) value
+ * of next noise floor calibration the baseband does.
+ */
+ ENABLE_REG_RMW_BUFFER(ah);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
+ continue;
+
+ REG_RMW(ah, ah->nf_regs[i],
+ (((u32) (-50) << 1) & 0x1ff), 0x1ff);
+ }
+ }
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ return 0;
+}
+
+
+static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_nf_limits *limit;
+ int i;
+
+ if (IS_CHAN_2GHZ(ah->curchan))
+ limit = &ah->nf_2g;
+ else
+ limit = &ah->nf_5g;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!nf[i])
+ continue;
+
+ ath_dbg(common, CALIBRATE,
+ "NF calibrated [%s] [chain %d] is %d\n",
+ (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
+
+ if (nf[i] > limit->max) {
+ ath_dbg(common, CALIBRATE,
+ "NF[%d] (%d) > MAX (%d), correcting to MAX\n",
+ i, nf[i], limit->max);
+ nf[i] = limit->max;
+ } else if (nf[i] < limit->min) {
+ ath_dbg(common, CALIBRATE,
+ "NF[%d] (%d) < MIN (%d), correcting to NOM\n",
+ i, nf[i], limit->min);
+ nf[i] = limit->nominal;
+ }
+ }
+}
+
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf, nfThresh;
+ int16_t nfarray[NUM_NF_READINGS] = { 0 };
+ struct ath9k_nfcal_hist *h;
+ struct ieee80211_channel *c = chan->chan;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+ if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
+ ath_dbg(common, CALIBRATE,
+ "NF did not complete in calibration window\n");
+ return false;
+ }
+
+ ath9k_hw_do_getnf(ah, nfarray);
+ ath9k_hw_nf_sanitize(ah, nfarray);
+ nf = nfarray[0];
+ if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
+ && nf > nfThresh) {
+ ath_dbg(common, CALIBRATE,
+ "noise floor failed detected; detected %d, threshold %d\n",
+ nf, nfThresh);
+ }
+
+ if (!caldata) {
+ chan->noisefloor = nf;
+ return false;
+ }
+
+ h = caldata->nfCalHist;
+ clear_bit(NFCAL_PENDING, &caldata->cal_flags);
+ ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
+ chan->noisefloor = h[0].privNF;
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_getnf);
+
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ s16 default_nf;
+ int i, j;
+
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags;
+ h = ah->caldata->nfCalHist;
+ default_nf = ath9k_hw_get_default_nf(ah, chan);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ h[i].currIndex = 0;
+ h[i].privNF = default_nf;
+ h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
+ for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
+ h[i].nfCalBuffer[j] = default_nf;
+ }
+ }
+}
+
+
+void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+ if (unlikely(!caldata))
+ return;
+
+ /*
+ * If beacons are stuck, the most likely cause is interference.
+ * Triggering a noise floor calibration at this point helps the
+ * hardware adapt to a noisy environment much faster.
+ * To ensure that we recover from stuck beacons quickly, let
+ * the baseband update the internal NF value itself, similar to
+ * what is being done after a full reset.
+ */
+ if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
+ ath9k_hw_start_nfcal(ah, true);
+ else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
+ ath9k_hw_getnf(ah, ah->curchan);
+
+ set_bit(NFCAL_INTF, &caldata->cal_flags);
+}
+EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
+
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
new file mode 100644
index 000000000..87badf4bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CALIB_H
+#define CALIB_H
+
+#include "hw.h"
+
+#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
+/* Internal noise floor can vary by about 6db depending on the frequency */
+#define ATH9K_NF_CAL_NOISE_THRESH 6
+
+#define NUM_NF_READINGS 6
+#define ATH9K_NF_CAL_HIST_MAX 5
+
+struct ar5416IniArray {
+ u32 *ia_array;
+ u32 ia_rows;
+ u32 ia_columns;
+};
+
+#define STATIC_INI_ARRAY(array) { \
+ .ia_array = (u32 *)(array), \
+ .ia_rows = ARRAY_SIZE(array), \
+ .ia_columns = ARRAY_SIZE(array[0]), \
+ }
+
+#define INIT_INI_ARRAY(iniarray, array) do { \
+ (iniarray)->ia_array = (u32 *)(array); \
+ (iniarray)->ia_rows = ARRAY_SIZE(array); \
+ (iniarray)->ia_columns = ARRAY_SIZE(array[0]); \
+ } while (0)
+
+#define INI_RA(iniarray, row, column) \
+ (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
+
+#define INIT_CAL(_perCal) do { \
+ (_perCal)->calState = CAL_WAITING; \
+ (_perCal)->calNext = NULL; \
+ } while (0)
+
+#define INSERT_CAL(_ahp, _perCal) \
+ do { \
+ if ((_ahp)->cal_list_last == NULL) { \
+ (_ahp)->cal_list = \
+ (_ahp)->cal_list_last = (_perCal); \
+ ((_ahp)->cal_list_last)->calNext = (_perCal); \
+ } else { \
+ ((_ahp)->cal_list_last)->calNext = (_perCal); \
+ (_ahp)->cal_list_last = (_perCal); \
+ (_perCal)->calNext = (_ahp)->cal_list; \
+ } \
+ } while (0)
+
+enum ath9k_cal_state {
+ CAL_INACTIVE,
+ CAL_WAITING,
+ CAL_RUNNING,
+ CAL_DONE
+};
+
+#define MIN_CAL_SAMPLES 1
+#define MAX_CAL_SAMPLES 64
+#define INIT_LOG_COUNT 5
+#define PER_MIN_LOG_COUNT 2
+#define PER_MAX_LOG_COUNT 10
+
+struct ath9k_percal_data {
+ u32 calType;
+ u32 calNumSamples;
+ u32 calCountMax;
+ void (*calCollect) (struct ath_hw *);
+ void (*calPostProc) (struct ath_hw *, u8);
+};
+
+struct ath9k_cal_list {
+ const struct ath9k_percal_data *calData;
+ enum ath9k_cal_state calState;
+ struct ath9k_cal_list *calNext;
+};
+
+struct ath9k_nfcal_hist {
+ int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
+ u8 currIndex;
+ int16_t privNF;
+ u8 invalidNFcount;
+};
+
+#define MAX_PACAL_SKIPCOUNT 8
+struct ath9k_pacal_info{
+ int32_t prev_offset; /* Previous value of PA offset value */
+ int8_t max_skipcount; /* Max No. of times PACAL can be skipped */
+ int8_t skipcount; /* No. of times the PACAL to be skipped */
+};
+
+bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update);
+int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+ s16 nf);
+
+
+#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
new file mode 100644
index 000000000..206665059
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -0,0 +1,1606 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/* Set/change channels. If the channel is really being changed, it's done
+ * by reseting the chip. To accomplish this we must first cleanup any pending
+ * DMA, then restart stuff.
+ */
+static int ath_set_channel(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath9k_channel *hchan;
+ struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
+ struct ieee80211_channel *chan = chandef->chan;
+ int pos = chan->hw_value;
+ int old_pos = -1;
+ int r;
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
+ return -EIO;
+
+ if (ah->curchan)
+ old_pos = ah->curchan - &ah->channels[0];
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ chan->center_freq, chandef->width);
+
+ /* update survey stats for the old channel before switching */
+ spin_lock_bh(&common->cc_lock);
+ ath_update_survey_stats(sc);
+ spin_unlock_bh(&common->cc_lock);
+
+ ath9k_cmn_get_channel(hw, ah, chandef);
+
+ /* If the operating channel changes, change the survey in-use flags
+ * along with it.
+ * Reset the survey data for the new channel, unless we're switching
+ * back to the operating channel from an off-channel operation.
+ */
+ if (!sc->cur_chan->offchannel && sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+ sc->cur_survey = &sc->survey[pos];
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+ memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+ }
+
+ hchan = &sc->sc_ah->channels[pos];
+ r = ath_reset(sc, hchan);
+ if (r)
+ return r;
+
+ /* The most recent snapshot of channel->noisefloor for the old
+ * channel is only available after the hardware reset. Copy it to
+ * the survey stats now.
+ */
+ if (old_pos >= 0)
+ ath_update_survey_nf(sc, old_pos);
+
+ /* Enable radar pulse detection if on a DFS channel. Spectral
+ * scanning and radar detection can not be used concurrently.
+ */
+ if (hw->conf.radar_enabled) {
+ u32 rxfilter;
+
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR;
+ ath9k_hw_setrxfilter(ah, rxfilter);
+ ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+ chan->center_freq);
+ } else {
+ /* perform spectral scan if requested. */
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+ sc->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &sc->spec_priv);
+ }
+
+ return 0;
+}
+
+void ath_chanctx_init(struct ath_softc *sc)
+{
+ struct ath_chanctx *ctx;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ int i, j;
+
+ sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ if (!sband->n_channels)
+ sband = &common->sbands[IEEE80211_BAND_5GHZ];
+
+ chan = &sband->channels[0];
+ for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
+ ctx = &sc->chanctx[i];
+ cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
+ INIT_LIST_HEAD(&ctx->vifs);
+ ctx->txpower = ATH_TXPOWER_MAX;
+ ctx->flush_timeout = HZ / 5; /* 200ms */
+ for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
+ INIT_LIST_HEAD(&ctx->acq[j]);
+ }
+}
+
+void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool cur_chan;
+
+ spin_lock_bh(&sc->chan_lock);
+ if (chandef)
+ memcpy(&ctx->chandef, chandef, sizeof(*chandef));
+ cur_chan = sc->cur_chan == ctx;
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (!cur_chan) {
+ ath_dbg(common, CHAN_CTX,
+ "Current context differs from the new context\n");
+ return;
+ }
+
+ ath_set_channel(sc);
+}
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+
+/*************/
+/* Utilities */
+/*************/
+
+struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc)
+{
+ struct ath_chanctx *ctx;
+ struct ath_vif *avp;
+ struct ieee80211_vif *vif;
+
+ spin_lock_bh(&sc->chan_lock);
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (!ctx->active)
+ continue;
+
+ list_for_each_entry(avp, &ctx->vifs, list) {
+ vif = avp->vif;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
+ spin_unlock_bh(&sc->chan_lock);
+ return ctx;
+ }
+ }
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
+ return NULL;
+}
+
+/**********************************************************/
+/* Functions to handle the channel context state machine. */
+/**********************************************************/
+
+static const char *offchannel_state_string(enum ath_offchannel_state state)
+{
+ switch (state) {
+ case_rtn_string(ATH_OFFCHANNEL_IDLE);
+ case_rtn_string(ATH_OFFCHANNEL_PROBE_SEND);
+ case_rtn_string(ATH_OFFCHANNEL_PROBE_WAIT);
+ case_rtn_string(ATH_OFFCHANNEL_SUSPEND);
+ case_rtn_string(ATH_OFFCHANNEL_ROC_START);
+ case_rtn_string(ATH_OFFCHANNEL_ROC_WAIT);
+ case_rtn_string(ATH_OFFCHANNEL_ROC_DONE);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *chanctx_event_string(enum ath_chanctx_event ev)
+{
+ switch (ev) {
+ case_rtn_string(ATH_CHANCTX_EVENT_BEACON_PREPARE);
+ case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT);
+ case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER);
+ case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED);
+ case_rtn_string(ATH_CHANCTX_EVENT_AUTHORIZED);
+ case_rtn_string(ATH_CHANCTX_EVENT_SWITCH);
+ case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN);
+ case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN);
+ case_rtn_string(ATH_CHANCTX_EVENT_CHANGE);
+ case_rtn_string(ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *chanctx_state_string(enum ath_chanctx_state state)
+{
+ switch (state) {
+ case_rtn_string(ATH_CHANCTX_STATE_IDLE);
+ case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_BEACON);
+ case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_TIMER);
+ case_rtn_string(ATH_CHANCTX_STATE_SWITCH);
+ case_rtn_string(ATH_CHANCTX_STATE_FORCE_ACTIVE);
+ default:
+ return "unknown";
+ }
+}
+
+void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ictx;
+ struct ath_vif *avp;
+ bool active = false;
+ u8 n_active = 0;
+
+ if (!ctx)
+ return;
+
+ if (ctx == &sc->offchannel.chan) {
+ spin_lock_bh(&sc->chan_lock);
+
+ if (likely(sc->sched.channel_switch_time))
+ ctx->flush_timeout =
+ usecs_to_jiffies(sc->sched.channel_switch_time);
+ else
+ ctx->flush_timeout =
+ msecs_to_jiffies(10);
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ /*
+ * There is no need to iterate over the
+ * active/assigned channel contexts if
+ * the current context is offchannel.
+ */
+ return;
+ }
+
+ ictx = ctx;
+
+ list_for_each_entry(avp, &ctx->vifs, list) {
+ struct ieee80211_vif *vif = avp->vif;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ if (avp->assoc)
+ active = true;
+ break;
+ default:
+ active = true;
+ break;
+ }
+ }
+ ctx->active = active;
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (!ctx->assigned || list_empty(&ctx->vifs))
+ continue;
+ n_active++;
+ }
+
+ spin_lock_bh(&sc->chan_lock);
+
+ if (n_active <= 1) {
+ ictx->flush_timeout = HZ / 5;
+ clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags);
+ spin_unlock_bh(&sc->chan_lock);
+ return;
+ }
+
+ ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time);
+
+ if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
+ spin_unlock_bh(&sc->chan_lock);
+ return;
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (ath9k_is_chanctx_enabled()) {
+ ath_chanctx_event(sc, NULL,
+ ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL);
+ }
+}
+
+static struct ath_chanctx *
+ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+ int idx = ctx - &sc->chanctx[0];
+
+ return &sc->chanctx[!idx];
+}
+
+static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
+{
+ struct ath_chanctx *prev, *cur;
+ struct timespec ts;
+ u32 cur_tsf, prev_tsf, beacon_int;
+ s32 offset;
+
+ beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
+
+ cur = sc->cur_chan;
+ prev = ath_chanctx_get_next(sc, cur);
+
+ if (!prev->switch_after_beacon)
+ return;
+
+ getrawmonotonic(&ts);
+ cur_tsf = (u32) cur->tsf_val +
+ ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
+
+ prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
+ prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
+
+ /* Adjust the TSF time of the AP chanctx to keep its beacons
+ * at half beacon interval offset relative to the STA chanctx.
+ */
+ offset = cur_tsf - prev_tsf;
+
+ /* Ignore stale data or spurious timestamps */
+ if (offset < 0 || offset > 3 * beacon_int)
+ return;
+
+ offset = beacon_int / 2 - (offset % beacon_int);
+ prev->tsf_val += offset;
+}
+
+/* Configure the TSF based hardware timer for a channel switch.
+ * Also set up backup software timer, in case the gen timer fails.
+ * This could be caused by a hardware reset.
+ */
+static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000);
+ tsf_time -= ath9k_hw_gettsf32(ah);
+ tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1;
+ mod_timer(&sc->sched.timer, jiffies + tsf_time);
+
+ ath_dbg(common, CHAN_CTX,
+ "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time));
+}
+
+static void ath_chanctx_handle_bmiss(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp)
+{
+ /*
+ * Clear the extend_absence flag if it had been
+ * set during the previous beacon transmission,
+ * since we need to revert to the normal NoA
+ * schedule.
+ */
+ if (ctx->active && sc->sched.extend_absence) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = false;
+ }
+
+ /* If at least two consecutive beacons were missed on the STA
+ * chanctx, stay on the STA channel for one extra beacon period,
+ * to resync the timer properly.
+ */
+ if (ctx->active && sc->sched.beacon_miss >= 2) {
+ avp->noa_duration = 0;
+ sc->sched.extend_absence = true;
+ }
+}
+
+static void ath_chanctx_offchannel_noa(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath_vif *avp,
+ u32 tsf_time)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->offchannel_start = tsf_time;
+ avp->offchannel_duration = sc->sched.offchannel_duration;
+
+ ath_dbg(common, CHAN_CTX,
+ "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n",
+ avp->offchannel_duration,
+ avp->offchannel_start,
+ avp->noa_index);
+
+ /*
+ * When multiple contexts are active, the NoA
+ * has to be recalculated and advertised after
+ * an offchannel operation.
+ */
+ if (ctx->active && avp->noa_duration)
+ avp->noa_duration = 0;
+}
+
+static void ath_chanctx_set_periodic_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ struct ath_beacon_config *cur_conf,
+ u32 tsf_time,
+ u32 beacon_int)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+
+ if (sc->sched.extend_absence)
+ avp->noa_duration = (3 * beacon_int / 2) +
+ sc->sched.channel_switch_time;
+ else
+ avp->noa_duration =
+ TU_TO_USEC(cur_conf->beacon_interval) / 2 +
+ sc->sched.channel_switch_time;
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) ||
+ sc->sched.extend_absence)
+ avp->periodic_noa = false;
+ else
+ avp->periodic_noa = true;
+
+ ath_dbg(common, CHAN_CTX,
+ "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
+static void ath_chanctx_set_oneshot_noa(struct ath_softc *sc,
+ struct ath_vif *avp,
+ u32 tsf_time,
+ u32 duration)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ avp->noa_index++;
+ avp->noa_start = tsf_time;
+ avp->periodic_noa = false;
+ avp->oneshot_noa = true;
+ avp->noa_duration = duration + sc->sched.channel_switch_time;
+
+ ath_dbg(common, CHAN_CTX,
+ "oneshot noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n",
+ avp->noa_duration,
+ avp->noa_start,
+ avp->noa_index,
+ avp->periodic_noa);
+}
+
+void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
+ enum ath_chanctx_event ev)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_beacon_config *cur_conf;
+ struct ath_vif *avp = NULL;
+ struct ath_chanctx *ctx;
+ u32 tsf_time;
+ u32 beacon_int;
+
+ if (vif)
+ avp = (struct ath_vif *) vif->drv_priv;
+
+ spin_lock_bh(&sc->chan_lock);
+
+ ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s\n",
+ sc->cur_chan->chandef.center_freq1,
+ chanctx_event_string(ev),
+ chanctx_state_string(sc->sched.state));
+
+ switch (ev) {
+ case ATH_CHANCTX_EVENT_BEACON_PREPARE:
+ if (avp->offchannel_duration)
+ avp->offchannel_duration = 0;
+
+ if (avp->oneshot_noa) {
+ avp->noa_duration = 0;
+ avp->oneshot_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing oneshot NoA\n");
+ }
+
+ if (avp->chanctx != sc->cur_chan) {
+ ath_dbg(common, CHAN_CTX,
+ "Contexts differ, not preparing beacon\n");
+ break;
+ }
+
+ if (sc->sched.offchannel_pending && !sc->sched.wait_switch) {
+ sc->sched.offchannel_pending = false;
+ sc->next_chan = &sc->offchannel.chan;
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+ ath_dbg(common, CHAN_CTX,
+ "Setting offchannel_pending to false\n");
+ }
+
+ ctx = ath_chanctx_get_next(sc, sc->cur_chan);
+ if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) {
+ sc->next_chan = ctx;
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+ ath_dbg(common, CHAN_CTX,
+ "Set next context, move chanctx state to WAIT_FOR_BEACON\n");
+ }
+
+ /* if the timer missed its window, use the next interval */
+ if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) {
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+ ath_dbg(common, CHAN_CTX,
+ "Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n");
+ }
+
+ if (sc->sched.mgd_prepare_tx)
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+
+ /*
+ * When a context becomes inactive, for example,
+ * disassociation of a station context, the NoA
+ * attribute needs to be removed from subsequent
+ * beacons.
+ */
+ if (!ctx->active && avp->noa_duration &&
+ sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) {
+ avp->noa_duration = 0;
+ avp->periodic_noa = false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Clearing NoA schedule\n");
+ }
+
+ if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
+ break;
+
+ ath_dbg(common, CHAN_CTX, "Preparing beacon for vif: %pM\n", vif->addr);
+
+ sc->sched.beacon_pending = true;
+ sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER);
+
+ cur_conf = &sc->cur_chan->beacon;
+ beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
+
+ /* defer channel switch by a quarter beacon interval */
+ tsf_time = sc->sched.next_tbtt + beacon_int / 4;
+ sc->sched.switch_start_time = tsf_time;
+ sc->cur_chan->last_beacon = sc->sched.next_tbtt;
+
+ /*
+ * If an offchannel switch is scheduled to happen after
+ * a beacon transmission, update the NoA with one-shot
+ * values and increment the index.
+ */
+ if (sc->next_chan == &sc->offchannel.chan) {
+ ath_chanctx_offchannel_noa(sc, ctx, avp, tsf_time);
+ break;
+ }
+
+ ath_chanctx_handle_bmiss(sc, ctx, avp);
+
+ /*
+ * If a mgd_prepare_tx() has been called by mac80211,
+ * a one-shot NoA needs to be sent. This can happen
+ * with one or more active channel contexts - in both
+ * cases, a new NoA schedule has to be advertised.
+ */
+ if (sc->sched.mgd_prepare_tx) {
+ ath_chanctx_set_oneshot_noa(sc, avp, tsf_time,
+ jiffies_to_usecs(HZ / 5));
+ break;
+ }
+
+ /* Prevent wrap-around issues */
+ if (avp->noa_duration && tsf_time - avp->noa_start > BIT(30))
+ avp->noa_duration = 0;
+
+ /*
+ * If multiple contexts are active, start periodic
+ * NoA and increment the index for the first
+ * announcement.
+ */
+ if (ctx->active &&
+ (!avp->noa_duration || sc->sched.force_noa_update))
+ ath_chanctx_set_periodic_noa(sc, avp, cur_conf,
+ tsf_time, beacon_int);
+
+ if (ctx->active && sc->sched.force_noa_update)
+ sc->sched.force_noa_update = false;
+
+ break;
+ case ATH_CHANCTX_EVENT_BEACON_SENT:
+ if (!sc->sched.beacon_pending) {
+ ath_dbg(common, CHAN_CTX,
+ "No pending beacon\n");
+ break;
+ }
+
+ sc->sched.beacon_pending = false;
+
+ if (sc->sched.mgd_prepare_tx) {
+ sc->sched.mgd_prepare_tx = false;
+ complete(&sc->go_beacon);
+ ath_dbg(common, CHAN_CTX,
+ "Beacon sent, complete go_beacon\n");
+ break;
+ }
+
+ if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
+ break;
+
+ ath_dbg(common, CHAN_CTX,
+ "Move chanctx state to WAIT_FOR_TIMER\n");
+
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
+ ath_chanctx_setup_timer(sc, sc->sched.switch_start_time);
+ break;
+ case ATH_CHANCTX_EVENT_TSF_TIMER:
+ if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
+ break;
+
+ if (!sc->cur_chan->switch_after_beacon &&
+ sc->sched.beacon_pending)
+ sc->sched.beacon_miss++;
+
+ ath_dbg(common, CHAN_CTX,
+ "Move chanctx state to SWITCH\n");
+
+ sc->sched.state = ATH_CHANCTX_STATE_SWITCH;
+ ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+ break;
+ case ATH_CHANCTX_EVENT_BEACON_RECEIVED:
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
+ sc->cur_chan == &sc->offchannel.chan)
+ break;
+
+ sc->sched.beacon_pending = false;
+ sc->sched.beacon_miss = 0;
+
+ if (sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+ !sc->sched.beacon_adjust ||
+ !sc->cur_chan->tsf_val)
+ break;
+
+ ath_chanctx_adjust_tbtt_delta(sc);
+
+ /* TSF time might have been updated by the incoming beacon,
+ * need update the channel switch timer to reflect the change.
+ */
+ tsf_time = sc->sched.switch_start_time;
+ tsf_time -= (u32) sc->cur_chan->tsf_val +
+ ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ tsf_time += ath9k_hw_gettsf32(ah);
+
+ sc->sched.beacon_adjust = false;
+ ath_chanctx_setup_timer(sc, tsf_time);
+ break;
+ case ATH_CHANCTX_EVENT_AUTHORIZED:
+ if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+ avp->chanctx != sc->cur_chan)
+ break;
+
+ ath_dbg(common, CHAN_CTX,
+ "Move chanctx state from FORCE_ACTIVE to IDLE\n");
+
+ sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+ /* fall through */
+ case ATH_CHANCTX_EVENT_SWITCH:
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
+ sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+ sc->cur_chan->switch_after_beacon ||
+ sc->cur_chan == &sc->offchannel.chan)
+ break;
+
+ /* If this is a station chanctx, stay active for a half
+ * beacon period (minus channel switch time)
+ */
+ sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
+ cur_conf = &sc->cur_chan->beacon;
+
+ ath_dbg(common, CHAN_CTX,
+ "Move chanctx state to WAIT_FOR_TIMER (event SWITCH)\n");
+
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
+ sc->sched.wait_switch = false;
+
+ tsf_time = TU_TO_USEC(cur_conf->beacon_interval) / 2;
+
+ if (sc->sched.extend_absence) {
+ sc->sched.beacon_miss = 0;
+ tsf_time *= 3;
+ }
+
+ tsf_time -= sc->sched.channel_switch_time;
+ tsf_time += ath9k_hw_gettsf32(sc->sc_ah);
+ sc->sched.switch_start_time = tsf_time;
+
+ ath_chanctx_setup_timer(sc, tsf_time);
+ sc->sched.beacon_pending = true;
+ sc->sched.beacon_adjust = true;
+ break;
+ case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL:
+ if (sc->cur_chan == &sc->offchannel.chan ||
+ sc->cur_chan->switch_after_beacon)
+ break;
+
+ sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
+ ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+ break;
+ case ATH_CHANCTX_EVENT_UNASSIGN:
+ if (sc->cur_chan->assigned) {
+ if (sc->next_chan && !sc->next_chan->assigned &&
+ sc->next_chan != &sc->offchannel.chan)
+ sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+ break;
+ }
+
+ ctx = ath_chanctx_get_next(sc, sc->cur_chan);
+ sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+ if (!ctx->assigned)
+ break;
+
+ sc->next_chan = ctx;
+ ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+ break;
+ case ATH_CHANCTX_EVENT_ASSIGN:
+ break;
+ case ATH_CHANCTX_EVENT_CHANGE:
+ break;
+ }
+
+ spin_unlock_bh(&sc->chan_lock);
+}
+
+void ath_chanctx_beacon_sent_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev)
+{
+ if (sc->sched.beacon_pending)
+ ath_chanctx_event(sc, NULL, ev);
+}
+
+void ath_chanctx_beacon_recv_ev(struct ath_softc *sc,
+ enum ath_chanctx_event ev)
+{
+ ath_chanctx_event(sc, NULL, ev);
+}
+
+static int ath_scan_channel_duration(struct ath_softc *sc,
+ struct ieee80211_channel *chan)
+{
+ struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+
+ if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR))
+ return (HZ / 9); /* ~110 ms */
+
+ return (HZ / 16); /* ~60 ms */
+}
+
+static void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ spin_lock_bh(&sc->chan_lock);
+
+ if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) &&
+ (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) {
+ if (chandef)
+ ctx->chandef = *chandef;
+
+ sc->sched.offchannel_pending = true;
+ sc->sched.wait_switch = true;
+ sc->sched.offchannel_duration =
+ jiffies_to_usecs(sc->offchannel.duration) +
+ sc->sched.channel_switch_time;
+
+ spin_unlock_bh(&sc->chan_lock);
+ ath_dbg(common, CHAN_CTX,
+ "Set offchannel_pending to true\n");
+ return;
+ }
+
+ sc->next_chan = ctx;
+ if (chandef) {
+ ctx->chandef = *chandef;
+ ath_dbg(common, CHAN_CTX,
+ "Assigned next_chan to %d MHz\n", chandef->center_freq1);
+ }
+
+ if (sc->next_chan == &sc->offchannel.chan) {
+ sc->sched.offchannel_duration =
+ jiffies_to_usecs(sc->offchannel.duration) +
+ sc->sched.channel_switch_time;
+
+ if (chandef) {
+ ath_dbg(common, CHAN_CTX,
+ "Offchannel duration for chan %d MHz : %u\n",
+ chandef->center_freq1,
+ sc->sched.offchannel_duration);
+ }
+ }
+ spin_unlock_bh(&sc->chan_lock);
+ ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+}
+
+static void ath_chanctx_offchan_switch(struct ath_softc *sc,
+ struct ieee80211_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct cfg80211_chan_def chandef;
+
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ ath_dbg(common, CHAN_CTX,
+ "Channel definition created: %d MHz\n", chandef.center_freq1);
+
+ ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef);
+}
+
+static struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc,
+ bool active)
+{
+ struct ath_chanctx *ctx;
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (!ctx->assigned || list_empty(&ctx->vifs))
+ continue;
+ if (active && !ctx->active)
+ continue;
+
+ if (ctx->switch_after_beacon)
+ return ctx;
+ }
+
+ return &sc->chanctx[0];
+}
+
+static void
+ath_scan_next_channel(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+ struct ieee80211_channel *chan;
+
+ if (sc->offchannel.scan_idx >= req->n_channels) {
+ ath_dbg(common, CHAN_CTX,
+ "Moving offchannel state to ATH_OFFCHANNEL_IDLE, "
+ "scan_idx: %d, n_channels: %d\n",
+ sc->offchannel.scan_idx,
+ req->n_channels);
+
+ sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+ ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
+ NULL);
+ return;
+ }
+
+ ath_dbg(common, CHAN_CTX,
+ "Moving offchannel state to ATH_OFFCHANNEL_PROBE_SEND, scan_idx: %d\n",
+ sc->offchannel.scan_idx);
+
+ chan = req->channels[sc->offchannel.scan_idx++];
+ sc->offchannel.duration = ath_scan_channel_duration(sc, chan);
+ sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND;
+
+ ath_chanctx_offchan_switch(sc, chan);
+}
+
+void ath_offchannel_next(struct ath_softc *sc)
+{
+ struct ieee80211_vif *vif;
+
+ if (sc->offchannel.scan_req) {
+ vif = sc->offchannel.scan_vif;
+ sc->offchannel.chan.txpower = vif->bss_conf.txpower;
+ ath_scan_next_channel(sc);
+ } else if (sc->offchannel.roc_vif) {
+ vif = sc->offchannel.roc_vif;
+ sc->offchannel.chan.txpower = vif->bss_conf.txpower;
+ sc->offchannel.duration =
+ msecs_to_jiffies(sc->offchannel.roc_duration);
+ sc->offchannel.state = ATH_OFFCHANNEL_ROC_START;
+ ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan);
+ } else {
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.offchannel_pending = false;
+ sc->sched.wait_switch = false;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
+ NULL);
+ sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+ if (sc->ps_idle)
+ ath_cancel_work(sc);
+ }
+}
+
+void ath_roc_complete(struct ath_softc *sc, bool abort)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (abort)
+ ath_dbg(common, CHAN_CTX, "RoC aborted\n");
+ else
+ ath_dbg(common, CHAN_CTX, "RoC expired\n");
+
+ sc->offchannel.roc_vif = NULL;
+ sc->offchannel.roc_chan = NULL;
+ ieee80211_remain_on_channel_expired(sc->hw);
+ ath_offchannel_next(sc);
+ ath9k_ps_restore(sc);
+}
+
+void ath_scan_complete(struct ath_softc *sc, bool abort)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (abort)
+ ath_dbg(common, CHAN_CTX, "HW scan aborted\n");
+ else
+ ath_dbg(common, CHAN_CTX, "HW scan complete\n");
+
+ sc->offchannel.scan_req = NULL;
+ sc->offchannel.scan_vif = NULL;
+ sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+ ieee80211_scan_completed(sc->hw, abort);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
+ spin_lock_bh(&sc->chan_lock);
+ if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+ sc->sched.force_noa_update = true;
+ spin_unlock_bh(&sc->chan_lock);
+ ath_offchannel_next(sc);
+ ath9k_ps_restore(sc);
+}
+
+static void ath_scan_send_probe(struct ath_softc *sc,
+ struct cfg80211_ssid *ssid)
+{
+ struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+ struct ieee80211_vif *vif = sc->offchannel.scan_vif;
+ struct ath_tx_control txctl = {};
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ int band = sc->offchannel.chan.chandef.chan->band;
+
+ skb = ieee80211_probereq_get(sc->hw, vif->addr,
+ ssid->ssid, ssid->ssid_len, req->ie_len);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
+ if (req->ie_len)
+ memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+
+ if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL))
+ goto error;
+
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+ txctl.force_channel = true;
+ if (ath_tx_start(sc->hw, skb, &txctl))
+ goto error;
+
+ return;
+
+error:
+ ieee80211_free_txskb(sc->hw, skb);
+}
+
+static void ath_scan_channel_start(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+ int i;
+
+ if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) &&
+ req->n_ssids) {
+ for (i = 0; i < req->n_ssids; i++)
+ ath_scan_send_probe(sc, &req->ssids[i]);
+
+ }
+
+ ath_dbg(common, CHAN_CTX,
+ "Moving offchannel state to ATH_OFFCHANNEL_PROBE_WAIT\n");
+
+ sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT;
+ mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration);
+}
+
+static void ath_chanctx_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, CHAN_CTX,
+ "Channel context timer invoked\n");
+
+ ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
+}
+
+static void ath_offchannel_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_chanctx *ctx;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n",
+ __func__, offchannel_state_string(sc->offchannel.state));
+
+ switch (sc->offchannel.state) {
+ case ATH_OFFCHANNEL_PROBE_WAIT:
+ if (!sc->offchannel.scan_req)
+ return;
+
+ /* get first active channel context */
+ ctx = ath_chanctx_get_oper_chan(sc, true);
+ if (ctx->active) {
+ ath_dbg(common, CHAN_CTX,
+ "Switch to oper/active context, "
+ "move offchannel state to ATH_OFFCHANNEL_SUSPEND\n");
+
+ sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND;
+ ath_chanctx_switch(sc, ctx, NULL);
+ mod_timer(&sc->offchannel.timer, jiffies + HZ / 10);
+ break;
+ }
+ /* fall through */
+ case ATH_OFFCHANNEL_SUSPEND:
+ if (!sc->offchannel.scan_req)
+ return;
+
+ ath_scan_next_channel(sc);
+ break;
+ case ATH_OFFCHANNEL_ROC_START:
+ case ATH_OFFCHANNEL_ROC_WAIT:
+ sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
+ ath_roc_complete(sc, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool
+ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
+ bool powersave)
+{
+ struct ieee80211_vif *vif = avp->vif;
+ struct ieee80211_sta *sta = NULL;
+ struct ieee80211_hdr_3addr *nullfunc;
+ struct ath_tx_control txctl;
+ struct sk_buff *skb;
+ int band = sc->cur_chan->chandef.chan->band;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (!avp->assoc)
+ return false;
+
+ skb = ieee80211_nullfunc_get(sc->hw, vif);
+ if (!skb)
+ return false;
+
+ nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
+ if (powersave)
+ nullfunc->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_PM);
+
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) {
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+ txctl.sta = sta;
+ txctl.force_channel = true;
+ if (ath_tx_start(sc->hw, skb, &txctl)) {
+ ieee80211_free_txskb(sc->hw, skb);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave)
+{
+ struct ath_vif *avp;
+ bool sent = false;
+
+ rcu_read_lock();
+ list_for_each_entry(avp, &sc->cur_chan->vifs, list) {
+ if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave))
+ sent = true;
+ }
+ rcu_read_unlock();
+
+ return sent;
+}
+
+static bool ath_chanctx_defer_switch(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (sc->cur_chan == &sc->offchannel.chan)
+ return false;
+
+ switch (sc->sched.state) {
+ case ATH_CHANCTX_STATE_SWITCH:
+ return false;
+ case ATH_CHANCTX_STATE_IDLE:
+ if (!sc->cur_chan->switch_after_beacon)
+ return false;
+
+ ath_dbg(common, CHAN_CTX,
+ "Defer switch, set chanctx state to WAIT_FOR_BEACON\n");
+
+ sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static void ath_offchannel_channel_change(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n",
+ __func__, offchannel_state_string(sc->offchannel.state));
+
+ switch (sc->offchannel.state) {
+ case ATH_OFFCHANNEL_PROBE_SEND:
+ if (!sc->offchannel.scan_req)
+ return;
+
+ if (sc->cur_chan->chandef.chan !=
+ sc->offchannel.chan.chandef.chan)
+ return;
+
+ ath_scan_channel_start(sc);
+ break;
+ case ATH_OFFCHANNEL_IDLE:
+ if (!sc->offchannel.scan_req)
+ return;
+
+ ath_scan_complete(sc, false);
+ break;
+ case ATH_OFFCHANNEL_ROC_START:
+ if (sc->cur_chan != &sc->offchannel.chan)
+ break;
+
+ sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT;
+ mod_timer(&sc->offchannel.timer,
+ jiffies + sc->offchannel.duration);
+ ieee80211_ready_on_channel(sc->hw);
+ break;
+ case ATH_OFFCHANNEL_ROC_DONE:
+ break;
+ default:
+ break;
+ }
+}
+
+void ath_chanctx_set_next(struct ath_softc *sc, bool force)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *old_ctx;
+ struct timespec ts;
+ bool measure_time = false;
+ bool send_ps = false;
+ bool queues_stopped = false;
+
+ spin_lock_bh(&sc->chan_lock);
+ if (!sc->next_chan) {
+ spin_unlock_bh(&sc->chan_lock);
+ return;
+ }
+
+ if (!force && ath_chanctx_defer_switch(sc)) {
+ spin_unlock_bh(&sc->chan_lock);
+ return;
+ }
+
+ ath_dbg(common, CHAN_CTX,
+ "%s: current: %d MHz, next: %d MHz\n",
+ __func__,
+ sc->cur_chan->chandef.center_freq1,
+ sc->next_chan->chandef.center_freq1);
+
+ if (sc->cur_chan != sc->next_chan) {
+ ath_dbg(common, CHAN_CTX,
+ "Stopping current chanctx: %d\n",
+ sc->cur_chan->chandef.center_freq1);
+ sc->cur_chan->stopped = true;
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (sc->next_chan == &sc->offchannel.chan) {
+ getrawmonotonic(&ts);
+ measure_time = true;
+ }
+
+ ath9k_chanctx_stop_queues(sc, sc->cur_chan);
+ queues_stopped = true;
+
+ __ath9k_flush(sc->hw, ~0, true, false, false);
+
+ if (ath_chanctx_send_ps_frame(sc, true))
+ __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO),
+ false, false, false);
+
+ send_ps = true;
+ spin_lock_bh(&sc->chan_lock);
+
+ if (sc->cur_chan != &sc->offchannel.chan) {
+ getrawmonotonic(&sc->cur_chan->tsf_ts);
+ sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
+ }
+ }
+ old_ctx = sc->cur_chan;
+ sc->cur_chan = sc->next_chan;
+ sc->cur_chan->stopped = false;
+ sc->next_chan = NULL;
+
+ if (!sc->sched.offchannel_pending)
+ sc->sched.offchannel_duration = 0;
+
+ if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE)
+ sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (sc->sc_ah->chip_fullsleep ||
+ memcmp(&sc->cur_chandef, &sc->cur_chan->chandef,
+ sizeof(sc->cur_chandef))) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Set channel %d MHz\n",
+ __func__, sc->cur_chan->chandef.center_freq1);
+ ath_set_channel(sc);
+ if (measure_time)
+ sc->sched.channel_switch_time =
+ ath9k_hw_get_tsf_offset(&ts, NULL);
+ /*
+ * A reset will ensure that all queues are woken up,
+ * so there is no need to awaken them again.
+ */
+ goto out;
+ }
+
+ if (queues_stopped)
+ ath9k_chanctx_wake_queues(sc, old_ctx);
+out:
+ if (send_ps)
+ ath_chanctx_send_ps_frame(sc, false);
+
+ ath_offchannel_channel_change(sc);
+ ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH);
+}
+
+static void ath_chanctx_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ chanctx_work);
+ mutex_lock(&sc->mutex);
+ ath_chanctx_set_next(sc, false);
+ mutex_unlock(&sc->mutex);
+}
+
+void ath9k_offchannel_init(struct ath_softc *sc)
+{
+ struct ath_chanctx *ctx;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ int i;
+
+ sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ if (!sband->n_channels)
+ sband = &common->sbands[IEEE80211_BAND_5GHZ];
+
+ chan = &sband->channels[0];
+
+ ctx = &sc->offchannel.chan;
+ INIT_LIST_HEAD(&ctx->vifs);
+ ctx->txpower = ATH_TXPOWER_MAX;
+ cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->acq); i++)
+ INIT_LIST_HEAD(&ctx->acq[i]);
+
+ sc->offchannel.chan.offchannel = true;
+}
+
+void ath9k_init_channel_context(struct ath_softc *sc)
+{
+ INIT_WORK(&sc->chanctx_work, ath_chanctx_work);
+
+ setup_timer(&sc->offchannel.timer, ath_offchannel_timer,
+ (unsigned long)sc);
+ setup_timer(&sc->sched.timer, ath_chanctx_timer,
+ (unsigned long)sc);
+
+ init_completion(&sc->go_beacon);
+}
+
+void ath9k_deinit_channel_context(struct ath_softc *sc)
+{
+ cancel_work_sync(&sc->chanctx_work);
+}
+
+bool ath9k_is_chanctx_enabled(void)
+{
+ return (ath9k_use_chanctx == 1);
+}
+
+/********************/
+/* Queue management */
+/********************/
+
+void ath9k_chanctx_stop_queues(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int i;
+
+ if (ctx == &sc->offchannel.chan) {
+ ieee80211_stop_queue(sc->hw,
+ sc->hw->offchannel_tx_hw_queue);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ ieee80211_stop_queue(sc->hw,
+ ctx->hw_queue_base + i);
+ }
+
+ if (ah->opmode == NL80211_IFTYPE_AP)
+ ieee80211_stop_queue(sc->hw, sc->hw->queues - 2);
+}
+
+
+void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int i;
+
+ if (ctx == &sc->offchannel.chan) {
+ ieee80211_wake_queue(sc->hw,
+ sc->hw->offchannel_tx_hw_queue);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ ieee80211_wake_queue(sc->hw,
+ ctx->hw_queue_base + i);
+ }
+
+ if (ah->opmode == NL80211_IFTYPE_AP)
+ ieee80211_wake_queue(sc->hw, sc->hw->queues - 2);
+}
+
+/*****************/
+/* P2P Powersave */
+/*****************/
+
+static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ s32 tsf, target_tsf;
+
+ if (!avp || !avp->noa.has_next_tsf)
+ return;
+
+ ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+ target_tsf = avp->noa.next_tsf;
+ if (!avp->noa.absent)
+ target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+ if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+ target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+ ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
+static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u32 tsf;
+
+ if (!sc->p2p_ps_timer)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+ return;
+
+ sc->p2p_ps_vif = avp;
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+ ath9k_update_p2p_ps_timer(sc, avp);
+}
+
+static u8 ath9k_get_ctwin(struct ath_softc *sc, struct ath_vif *avp)
+{
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+ u8 switch_time, ctwin;
+
+ /*
+ * Channel switch in multi-channel mode is deferred
+ * by a quarter beacon interval when handling
+ * ATH_CHANCTX_EVENT_BEACON_PREPARE, so the P2P-GO
+ * interface is guaranteed to be discoverable
+ * for that duration after a TBTT.
+ */
+ switch_time = cur_conf->beacon_interval / 4;
+
+ ctwin = avp->vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ if (ctwin && (ctwin < switch_time))
+ return ctwin;
+
+ if (switch_time < P2P_DEFAULT_CTWIN)
+ return 0;
+
+ return P2P_DEFAULT_CTWIN;
+}
+
+void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp,
+ struct sk_buff *skb)
+{
+ static const u8 noa_ie_hdr[] = {
+ WLAN_EID_VENDOR_SPECIFIC, /* type */
+ 0, /* length */
+ 0x50, 0x6f, 0x9a, /* WFA OUI */
+ 0x09, /* P2P subtype */
+ 0x0c, /* Notice of Absence */
+ 0x00, /* LSB of little-endian len */
+ 0x00, /* MSB of little-endian len */
+ };
+
+ struct ieee80211_p2p_noa_attr *noa;
+ int noa_len, noa_desc, i = 0;
+ u8 *hdr;
+
+ if (!avp->offchannel_duration && !avp->noa_duration)
+ return;
+
+ noa_desc = !!avp->offchannel_duration + !!avp->noa_duration;
+ noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc;
+
+ hdr = skb_put(skb, sizeof(noa_ie_hdr));
+ memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr));
+ hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2;
+ hdr[7] = noa_len;
+
+ noa = (void *) skb_put(skb, noa_len);
+ memset(noa, 0, noa_len);
+
+ noa->index = avp->noa_index;
+ noa->oppps_ctwindow = ath9k_get_ctwin(sc, avp);
+
+ if (avp->noa_duration) {
+ if (avp->periodic_noa) {
+ u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
+ noa->desc[i].count = 255;
+ noa->desc[i].interval = cpu_to_le32(interval);
+ } else {
+ noa->desc[i].count = 1;
+ }
+
+ noa->desc[i].start_time = cpu_to_le32(avp->noa_start);
+ noa->desc[i].duration = cpu_to_le32(avp->noa_duration);
+ i++;
+ }
+
+ if (avp->offchannel_duration) {
+ noa->desc[i].count = 1;
+ noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start);
+ noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration);
+ }
+}
+
+void ath9k_p2p_ps_timer(void *priv)
+{
+ struct ath_softc *sc = priv;
+ struct ath_vif *avp = sc->p2p_ps_vif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ u32 tsf;
+
+ del_timer_sync(&sc->sched.timer);
+ ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer);
+ ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
+
+ if (!avp || avp->chanctx != sc->cur_chan)
+ return;
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ if (!avp->noa.absent)
+ tsf += ATH_P2P_PS_STOP_TIME;
+
+ if (!avp->noa.has_next_tsf ||
+ avp->noa.next_tsf - tsf > BIT(31))
+ ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+ ath9k_update_p2p_ps_timer(sc, avp);
+
+ rcu_read_lock();
+
+ vif = avp->vif;
+ sta = ieee80211_find_sta(vif, avp->bssid);
+ if (!sta)
+ goto out;
+
+ an = (void *) sta->drv_priv;
+ if (an->sleeping == !!avp->noa.absent)
+ goto out;
+
+ an->sleeping = avp->noa.absent;
+ if (an->sleeping)
+ ath_tx_aggr_sleep(sta, sc, an);
+ else
+ ath_tx_aggr_wakeup(sc, an);
+
+out:
+ rcu_read_unlock();
+}
+
+void ath9k_p2p_bss_info_changed(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+ unsigned long flags;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (!(sc->ps_flags & PS_BEACON_SYNC))
+ ath9k_update_p2p_ps(sc, vif);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+}
+
+void ath9k_p2p_beacon_sync(struct ath_softc *sc)
+{
+ if (sc->p2p_ps_vif)
+ ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
+}
+
+void ath9k_p2p_remove_vif(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ if (avp == sc->p2p_ps_vif) {
+ sc->p2p_ps_vif = NULL;
+ ath9k_update_p2p_ps_timer(sc, NULL);
+ }
+ spin_unlock_bh(&sc->sc_pcu_lock);
+}
+
+int ath9k_init_p2p(struct ath_softc *sc)
+{
+ sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+ NULL, sc, AR_FIRST_NDP_TIMER);
+ if (!sc->p2p_ps_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ath9k_deinit_p2p(struct ath_softc *sc)
+{
+ if (sc->p2p_ps_timer)
+ ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+}
+
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
new file mode 100644
index 000000000..6ad44470d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+#define FUDGE 2
+
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+ return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
+ unsigned int interval)
+{
+ unsigned int offset;
+
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
+/*
+ * This sets up the beacon timers according to the timestamp of the last
+ * received beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware will wakeup in
+ * time to receive beacons, and configures the beacon miss handling so
+ * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int dtim_intval;
+ u64 tsf;
+
+ /* No need to configure beacon if we are not associated */
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+ ath_dbg(common, BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return -EPERM;
+ }
+
+ memset(bs, 0, sizeof(*bs));
+ conf->intval = conf->beacon_interval;
+
+ /*
+ * Setup dtim parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtim_intval = conf->intval * conf->dtim_period;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
+
+ bs->bs_intval = TU_TO_USEC(conf->intval);
+ bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
+ bs->bs_nexttbtt = conf->nexttbtt;
+ bs->bs_nextdtim = conf->nexttbtt;
+ if (conf->dtim_period > 1)
+ bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
+ if (bs->bs_bmissthreshold > 15)
+ bs->bs_bmissthreshold = 15;
+ else if (bs->bs_bmissthreshold <= 0)
+ bs->bs_bmissthreshold = 1;
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ conf->intval));
+ if (bs->bs_sleepduration > bs->bs_dtimperiod)
+ bs->bs_sleepduration = bs->bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs->bs_bmissthreshold, bs->bs_sleepduration);
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
+
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+
+ if (conf->ibss_creator)
+ conf->nexttbtt = conf->intval;
+ else
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
+
+/*
+ * For multi-bss ap support beacons are either staggered evenly over N slots or
+ * burst together. For the former arrange for the SWBA to be delivered for each
+ * slot. Slots that are not occupied will generate nothing.
+ */
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* NB: the beacon interval is kept internally in TU's */
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+ conf->intval /= bc_buf;
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.h b/drivers/net/wireless/ath/ath9k/common-beacon.h
new file mode 100644
index 000000000..3665d27f0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct ath_beacon_config;
+
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs);
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf);
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644
index 000000000..3b289f933
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 6000;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_modal_eeprom = {
+ .read = read_file_modal_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_modal_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
+
+static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 1500;
+ ssize_t retval = 0;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_base_eeprom = {
+ .read = read_file_base_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_base_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
+
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs)
+{
+#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
+#define RX_CMN_STAT_INC(c) (rxstats->c++)
+
+ RX_CMN_STAT_INC(rx_pkts_all);
+ rxstats->rx_bytes_all += rs->rs_datalen;
+
+ if (rs->rs_status & ATH9K_RXERR_CRC)
+ RX_CMN_STAT_INC(crc_err);
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
+ RX_CMN_STAT_INC(decrypt_crc_err);
+ if (rs->rs_status & ATH9K_RXERR_MIC)
+ RX_CMN_STAT_INC(mic_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_CMN_STAT_INC(pre_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_CMN_STAT_INC(post_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_CMN_STAT_INC(decrypt_busy_err);
+
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
+ RX_CMN_STAT_INC(phy_err);
+ if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
+ RX_PHY_ERR_INC(rs->rs_phyerr);
+ }
+
+#undef RX_CMN_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define RXS_ERR(s, e) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%18s : %10u\n", s, \
+ rxstats->e); \
+ } while (0)
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ RXS_ERR("PKTS-ALL", rx_pkts_all);
+ RXS_ERR("BYTES-ALL", rx_bytes_all);
+ RXS_ERR("BEACONS", rx_beacons);
+ RXS_ERR("FRAGS", rx_frags);
+ RXS_ERR("SPECTRAL", rx_spectral);
+
+ RXS_ERR("CRC ERR", crc_err);
+ RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
+ RXS_ERR("PHY ERR", phy_err);
+ RXS_ERR("MIC ERR", mic_err);
+ RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
+ RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
+ RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
+ RXS_ERR("LENGTH-ERR", rx_len_err);
+ RXS_ERR("OOM-ERR", rx_oom_err);
+ RXS_ERR("RATE-ERR", rx_rate_err);
+ RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef RXS_ERR
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
+ &fops_recv);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_recv);
+
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ rxstats->phy_err_stats[p]);
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+static const struct file_operations fops_phy_err = {
+ .read = read_file_phy_err,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
+ &fops_phy_err);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644
index 000000000..7c9788490
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @rx_pkts_all: No. of total frames received, including ones that
+ may have had errors.
+ * @rx_bytes_all: No. of total bytes received, including ones that
+ may have had errors.
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ * @rx_len_err: No. of frames discarded due to bad length.
+ * @rx_oom_err: No. of frames dropped due to OOM issues.
+ * @rx_rate_err: No. of frames dropped due to rate errors.
+ * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
+ * @rx_beacons: No. of beacons received.
+ * @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
+ */
+struct ath_rx_stats {
+ u32 rx_pkts_all;
+ u32 rx_bytes_all;
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+ u32 rx_len_err;
+ u32 rx_oom_err;
+ u32 rx_rate_err;
+ u32 rx_too_many_frags_err;
+ u32 rx_beacons;
+ u32 rx_frags;
+ u32 rx_spectral;
+};
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs);
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
new file mode 100644
index 000000000..a006c1499
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* We use the hw_value as an index into our private channel structure */
+
+#include "common.h"
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+};
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common)
+{
+ struct ath_hw *ah = (struct ath_hw *)common->ah;
+ void *channels;
+
+ BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
+ ARRAY_SIZE(ath9k_5ghz_chantable) !=
+ ATH9K_NUM_CHANNELS);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable));
+ common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable));
+ common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_init_channels_rates);
+
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 tx_streams, rx_streams;
+ int i, max_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ if (AR_SREV_9271(ah) || AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
+ rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
+
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+
+ if (tx_streams != rx_streams) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+EXPORT_SYMBOL(ath9k_cmn_setup_ht_cap);
+
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_HT))
+ return;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+}
+EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.h b/drivers/net/wireless/ath/ath9k/common-init.h
new file mode 100644
index 000000000..ac03fca5f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common);
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info);
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
new file mode 100644
index 000000000..5cee231cc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "ath9k.h"
+
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+}
+
+static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
+ struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+ if (!spec_priv->rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ u8 num_bins, *bins, *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample_20;
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct fft_sample_tlv *tlv;
+ struct ath_radar_info *radar_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ enum nl80211_channel_type chan_type;
+
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ */
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
+ chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_40_NUM_BINS;
+ bins = (u8 *)fft_sample_40.data;
+ } else {
+ fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ num_bins = SPECTRAL_HT20_NUM_BINS;
+ bins = (u8 *)fft_sample_20.data;
+ }
+
+ /* Variation in the data length is possible and will be fixed later */
+ if ((len > fft_len + 2) || (len < fft_len - 1))
+ return 1;
+
+ switch (len - fft_len) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, num_bins);
+ break;
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, num_bins - 1);
+ bins[0] = vdata[0];
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], num_bins - 31);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ bins[0] = vdata[0];
+ memcpy(&bins[1], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], num_bins - 32);
+ break;
+ default:
+ return 1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = num_bins / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+ if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+ (chan_type == NL80211_CHAN_HT40PLUS)) {
+ s8 lower_rssi, upper_rssi;
+ s16 ext_nf;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w;
+ u16 lower_mag, upper_mag;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+ lower_max_index = spectral_max_index(mag_info->lower_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins);
+ fft_sample_40.lower_max_index = lower_max_index;
+ fft_sample_40.upper_max_index = upper_max_index;
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+ fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+ } else {
+ u8 max_index, bitmap_w;
+ u16 magnitude;
+ struct ath_ht20_mag_info *mag_info;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+ max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample_20.max_index = max_index;
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+ fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ }
+
+ ath_debug_send_fft_sample(spec_priv, tlv);
+
+ return 1;
+}
+EXPORT_SYMBOL(ath_cmn_process_fft);
+
+/*********************/
+/* spectral_scan_ctl */
+/*********************/
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ char *mode = "";
+ unsigned int len;
+
+ switch (spec_priv->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv)
+{
+ struct ath_hw *ah = spec_priv->ah;
+ u32 rxfilter;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ ath9k_hw_setrxfilter(ah, rxfilter |
+ ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR);
+
+ /* TODO: usually this should not be neccesary, but for some reason
+ * (or in some mode?) the trigger must be called after the
+ * configuration, otherwise the register will have its values reset
+ * (on my ar9220 to value 0x01002310)
+ */
+ ath9k_cmn_spectral_scan_config(common, spec_priv, spec_priv->spectral_mode);
+ ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
+ ath_ps_ops(common)->restore(common);
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_trigger);
+
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
+ enum spectral_mode spectral_mode)
+{
+ struct ath_hw *ah = spec_priv->ah;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return -1;
+ }
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ spec_priv->spec_config.enabled = 0;
+ break;
+ case SPECTRAL_BACKGROUND:
+ /* send endless samples.
+ * TODO: is this really useful for "background"?
+ */
+ spec_priv->spec_config.endless = 1;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ case SPECTRAL_CHANSCAN:
+ case SPECTRAL_MANUAL:
+ spec_priv->spec_config.endless = 0;
+ spec_priv->spec_config.enabled = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ ath_ps_ops(common)->wakeup(common);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &spec_priv->spec_config);
+ ath_ps_ops(common)->restore(common);
+
+ spec_priv->spectral_mode = spectral_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_scan_config);
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ char buf[32];
+ ssize_t len;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_cmn_spectral_scan_trigger(common, spec_priv);
+ } else if (strncmp("background", buf, 10) == 0) {
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_cmn_spectral_scan_config(common, spec_priv, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************************/
+/* spectral_short_repeat */
+/*************************/
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 1)
+ return -EINVAL;
+
+ spec_priv->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/******************/
+/* spectral_count */
+/******************/
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 255)
+ return -EINVAL;
+
+ spec_priv->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* spectral_period */
+/*******************/
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 255)
+ return -EINVAL;
+
+ spec_priv->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/***********************/
+/* spectral_fft_period */
+/***********************/
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", spec_priv->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_spec_scan_priv *spec_priv = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 15)
+ return -EINVAL;
+
+ spec_priv->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*******************/
+/* Relay interface */
+/*******************/
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+/*********************/
+/* Debug Init/Deinit */
+/*********************/
+
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
+{
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ relay_close(spec_priv->rfs_chan_spec_scan);
+ spec_priv->rfs_chan_spec_scan = NULL;
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_deinit_debug);
+
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+ struct dentry *debugfs_phy)
+{
+ spec_priv->rfs_chan_spec_scan = relay_open("spectral_scan",
+ debugfs_phy,
+ 1024, 256, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_period",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+ &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+ &fops_spectral_fft_period);
+}
+EXPORT_SYMBOL(ath9k_cmn_spectral_init_debug);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
new file mode 100644
index 000000000..82d9dd296
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+#include "../spectral_common.h"
+
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+ */
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+};
+
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+} __packed;
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+ u8 max_exp;
+} __packed;
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+struct ath_spec_scan_priv {
+ struct ath_hw *ah;
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+ enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
+};
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+ s8 m = (bins[2] & 0xfc) >> 2;
+
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ else
+ m &= ~0xe0;
+
+ return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
+void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
+
+void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv);
+int ath9k_cmn_spectral_scan_config(struct ath_common *common,
+ struct ath_spec_scan_priv *spec_priv,
+ enum spectral_mode spectral_mode);
+int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf);
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
new file mode 100644
index 000000000..e8c699446
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Module for common driver code between ath9k and ath9k_htc
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "common.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter)
+{
+ struct ath_hw *ah = common->ah;
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ ieee80211_has_protected(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
+
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+ *decrypt_error = true;
+ mic_error = false;
+ }
+
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
+ }
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto_rx &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_hw *ah = common->ah;
+
+ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ rxs->flag |= rx_stats->flag;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ath_hw *ah = common->ah;
+ int last_rssi;
+ int rssi = rx_stats->rs_rssi;
+ int i, j;
+
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ /*
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
+ */
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+ last_rssi = common->last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
+
+ ah->stats.avgbrssi = rssi;
+ }
+
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.hw_key) {
+ switch (tx_info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ return ATH9K_KEY_TYPE_WEP;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return ATH9K_KEY_TYPE_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return ATH9K_KEY_TYPE_AES;
+ default:
+ break;
+ }
+ }
+
+ return ATH9K_KEY_TYPE_CLEAR;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
+
+/*
+ * Update internal channel flags.
+ */
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *chan = chandef->chan;
+ u16 flags = 0;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ flags |= CHANNEL_5GHZ;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ flags |= CHANNEL_QUARTER;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ flags |= CHANNEL_HALF;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ flags |= CHANNEL_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+ else
+ flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ ichan->channelFlags = flags;
+}
+
+/*
+ * Get the internal channel reference.
+ */
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *curchan = chandef->chan;
+ struct ath9k_channel *channel;
+
+ channel = &ah->channels[curchan->hw_value];
+ ath9k_cmn_update_ichannel(channel, chandef);
+
+ return channel;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
+
+int ath9k_cmn_count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+EXPORT_SYMBOL(ath9k_cmn_count_streams);
+
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower)
+{
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+ if (reg->power_limit != new_txpow)
+ ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+
+ /* read back in case value is clamped */
+ *txpower = reg->max_power_level;
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
+void ath9k_cmn_init_crypto(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = AR_KEYTABLE_SIZE;
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
+ common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath_hw_keyreset(common, (u16) i);
+}
+EXPORT_SYMBOL(ath9k_cmn_init_crypto);
+
+static int __init ath9k_cmn_init(void)
+{
+ return 0;
+}
+module_init(ath9k_cmn_init);
+
+static void __exit ath9k_cmn_exit(void)
+{
+ return;
+}
+module_exit(ath9k_cmn_exit);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
new file mode 100644
index 000000000..d23737342
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <net/mac80211.h>
+
+#include "../ath.h"
+
+#include "hw.h"
+#include "hw-ops.h"
+
+#include "common-init.h"
+#include "common-beacon.h"
+#include "common-debug.h"
+#include "common-spectral.h"
+
+/* Common header for Atheros 802.11n base driver cores */
+
+#define WME_BA_BMP_SIZE 64
+#define WME_MAX_BA WME_BA_BMP_SIZE
+#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
+
+#define ATH_RSSI_DUMMY_MARKER 127
+#define ATH_RSSI_LPF_LEN 10
+#define RSSI_LPF_THRESHOLD -20
+#define ATH_RSSI_EP_MULTIPLIER (1<<7)
+#define ATH_EP_MUL(x, mul) ((x) * (mul))
+#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
+#define ATH_LPF_RSSI(x, y, len) \
+ ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
+#define ATH_RSSI_LPF(x, y) do { \
+ if ((y) >= RSSI_LPF_THRESHOLD) \
+ x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
+} while (0)
+#define ATH_EP_RND(x, mul) \
+ (((x) + ((mul)/2)) / (mul))
+
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+
+struct ath_beacon_config {
+ int beacon_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+ u8 enable_beacon;
+ bool ibss_creator;
+ u32 nexttbtt;
+ u32 intval;
+};
+
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+ struct ath_hw *ah,
+ struct cfg80211_chan_def *chandef);
+int ath9k_cmn_count_streams(unsigned int chainmask, int max);
+void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
+ enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower);
+void ath9k_cmn_init_crypto(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
new file mode 100644
index 000000000..dbf8f4959
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -0,0 +1,1395 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+#include "ath9k.h"
+
+#define REG_WRITE_D(_ah, _reg, _val) \
+ ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
+#define REG_READ_D(_ah, _reg) \
+ ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+ if (sync_cause)
+ sc->debug.stats.istats.sync_cause_all++;
+ if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+ sc->debug.stats.istats.sync_rtc_irq++;
+ if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+ sc->debug.stats.istats.sync_mac_irq++;
+ if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+ sc->debug.stats.istats.eeprom_illegal_access++;
+ if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+ sc->debug.stats.istats.apb_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+ sc->debug.stats.istats.pci_mode_conflict++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+ sc->debug.stats.istats.host1_fatal++;
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+ sc->debug.stats.istats.host1_perr++;
+ if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+ sc->debug.stats.istats.trcv_fifo_perr++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+ sc->debug.stats.istats.radm_cpl_ep++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_dllp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+ sc->debug.stats.istats.radm_cpl_tlp_abort++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+ sc->debug.stats.istats.radm_cpl_ecrc_err++;
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+ sc->debug.stats.istats.radm_cpl_timeout++;
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ sc->debug.stats.istats.local_timeout++;
+ if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+ sc->debug.stats.istats.pm_access++;
+ if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+ sc->debug.stats.istats.mac_awake++;
+ if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+ sc->debug.stats.istats.mac_asleep++;
+ if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+ sc->debug.stats.istats.mac_sleep_access++;
+}
+
+static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+#ifdef CONFIG_ATH_DEBUG
+
+static ssize_t read_file_debug(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "0x%08x\n", common->debug_mask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->debug_mask = mask;
+ return count;
+}
+
+static const struct file_operations fops_debug = {
+ .read = read_file_debug,
+ .write = write_file_debug,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+#define DMA_BUF_LEN 1024
+
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
+ ssize_t retval = 0;
+ char *buf;
+ int i;
+ struct {
+ const char *name;
+ unsigned int val;
+ } ani_info[] = {
+ { "ANI RESET", ah->stats.ast_ani_reset },
+ { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
+ { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
+ { "SPUR UP", ah->stats.ast_ani_spurup },
+ { "SPUR DOWN", ah->stats.ast_ani_spurup },
+ { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
+ { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
+ { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
+ { "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
+ { "FIR-STEP UP", ah->stats.ast_ani_stepup },
+ { "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
+ { "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
+ { "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
+ { "CCK ERRORS", ah->stats.ast_ani_cckerrs },
+ };
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
+ common->disable_ani ? "DISABLED" : "ENABLED");
+
+ if (common->disable_ani)
+ goto exit;
+
+ for (i = 0; i < ARRAY_SIZE(ani_info); i++)
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ ani_info[i].name, ani_info[i].val);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long ani;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &ani))
+ return -EINVAL;
+
+ if (ani > 1)
+ return -EINVAL;
+
+ common->disable_ani = !ani;
+
+ if (common->disable_ani) {
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
+ ath_stop_ani(sc);
+ } else {
+ ath_check_ani(sc);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static ssize_t read_file_bt_ant_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", common->bt_ant_diversity);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_bt_ant_diversity(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
+ unsigned long bt_ant_diversity;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
+ goto exit;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &bt_ant_diversity))
+ return -EINVAL;
+
+ common->bt_ant_diversity = !!bt_ant_diversity;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
+ ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
+ common->bt_ant_diversity);
+ ath9k_ps_restore(sc);
+exit:
+ return count;
+}
+
+static const struct file_operations fops_bt_ant_diversity = {
+ .read = read_file_bt_ant_diversity,
+ .write = write_file_bt_ant_diversity,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+
+ as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
+ as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
+
+ as_main->rssi_avg = main_rssi_avg;
+ as_alt->rssi_avg = alt_rssi_avg;
+}
+
+static ssize_t read_file_antenna_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+ struct ath_hw_antcomb_conf div_ant_conf;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
+ ssize_t retval = 0;
+ char *buf;
+ static const char *lna_conf_str[4] = {
+ "LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
+ };
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
+ len += scnprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
+ goto exit;
+ }
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+ len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
+ ath9k_ps_restore(sc);
+
+ len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += scnprintf(buf + len, size - len, "-------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += scnprintf(buf + len, size - len, "--------------------\n");
+
+ len += scnprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_antenna_diversity = {
+ .read = read_file_antenna_diversity,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int read_file_dma(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
+ int i, qcuOffset = 0, dcuOffset = 0;
+ u32 *qcuBase = &val[0], *dcuBase = &val[4];
+
+ ath9k_ps_wakeup(sc);
+
+ REG_WRITE_D(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ seq_puts(file, "Raw DMA Debug values:\n");
+
+ for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
+ if (i % 4 == 0)
+ seq_puts(file, "\n");
+
+ val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
+ seq_printf(file, "%d: %08x ", i, val[i]);
+ }
+
+ seq_puts(file, "\n\n");
+ seq_puts(file, "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+
+ for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
+ if (i == 8) {
+ qcuOffset = 0;
+ qcuBase++;
+ }
+
+ if (i == 6) {
+ dcuOffset = 0;
+ dcuBase++;
+ }
+
+ seq_printf(file, "%2d %2x %1x %2x %2x\n",
+ i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+ (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+ (val[2] & (0x7 << (i * 3))) >> (i * 3),
+ (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+ }
+
+ seq_puts(file, "\n");
+
+ seq_printf(file, "qcu_stitch state: %2x qcu_fetch state: %2x\n",
+ (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
+ seq_printf(file, "qcu_complete state: %2x dcu_complete state: %2x\n",
+ (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
+ seq_printf(file, "dcu_arb state: %2x dcu_fp state: %2x\n",
+ (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
+ seq_printf(file, "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
+ (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
+ seq_printf(file, "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
+ (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
+ seq_printf(file, "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
+ (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
+
+ seq_printf(file, "pcu observe: 0x%x\n", REG_READ_D(ah, AR_OBS_BUS_1));
+ seq_printf(file, "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+
+ ath9k_ps_restore(sc);
+
+ return 0;
+}
+
+void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
+{
+ if (status)
+ sc->debug.stats.istats.total++;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXLP)
+ sc->debug.stats.istats.rxlp++;
+ if (status & ATH9K_INT_RXHP)
+ sc->debug.stats.istats.rxhp++;
+ if (status & ATH9K_INT_BB_WATCHDOG)
+ sc->debug.stats.istats.bb_watchdog++;
+ } else {
+ if (status & ATH9K_INT_RX)
+ sc->debug.stats.istats.rxok++;
+ }
+ if (status & ATH9K_INT_RXEOL)
+ sc->debug.stats.istats.rxeol++;
+ if (status & ATH9K_INT_RXORN)
+ sc->debug.stats.istats.rxorn++;
+ if (status & ATH9K_INT_TX)
+ sc->debug.stats.istats.txok++;
+ if (status & ATH9K_INT_TXURN)
+ sc->debug.stats.istats.txurn++;
+ if (status & ATH9K_INT_RXPHY)
+ sc->debug.stats.istats.rxphyerr++;
+ if (status & ATH9K_INT_RXKCM)
+ sc->debug.stats.istats.rx_keycache_miss++;
+ if (status & ATH9K_INT_SWBA)
+ sc->debug.stats.istats.swba++;
+ if (status & ATH9K_INT_BMISS)
+ sc->debug.stats.istats.bmiss++;
+ if (status & ATH9K_INT_BNR)
+ sc->debug.stats.istats.bnr++;
+ if (status & ATH9K_INT_CST)
+ sc->debug.stats.istats.cst++;
+ if (status & ATH9K_INT_GTT)
+ sc->debug.stats.istats.gtt++;
+ if (status & ATH9K_INT_TIM)
+ sc->debug.stats.istats.tim++;
+ if (status & ATH9K_INT_CABEND)
+ sc->debug.stats.istats.cabend++;
+ if (status & ATH9K_INT_DTIMSYNC)
+ sc->debug.stats.istats.dtimsync++;
+ if (status & ATH9K_INT_DTIM)
+ sc->debug.stats.istats.dtim++;
+ if (status & ATH9K_INT_TSFOOR)
+ sc->debug.stats.istats.tsfoor++;
+ if (status & ATH9K_INT_MCI)
+ sc->debug.stats.istats.mci++;
+ if (status & ATH9K_INT_GENTIMER)
+ sc->debug.stats.istats.gen_timer++;
+}
+
+static int read_file_interrupt(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+
+#define PR_IS(a, s) \
+ do { \
+ seq_printf(file, "%21s: %10u\n", a, \
+ sc->debug.stats.istats.s); \
+ } while (0)
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ PR_IS("RXLP", rxlp);
+ PR_IS("RXHP", rxhp);
+ PR_IS("WATHDOG", bb_watchdog);
+ } else {
+ PR_IS("RX", rxok);
+ }
+ PR_IS("RXEOL", rxeol);
+ PR_IS("RXORN", rxorn);
+ PR_IS("TX", txok);
+ PR_IS("TXURN", txurn);
+ PR_IS("MIB", mib);
+ PR_IS("RXPHY", rxphyerr);
+ PR_IS("RXKCM", rx_keycache_miss);
+ PR_IS("SWBA", swba);
+ PR_IS("BMISS", bmiss);
+ PR_IS("BNR", bnr);
+ PR_IS("CST", cst);
+ PR_IS("GTT", gtt);
+ PR_IS("TIM", tim);
+ PR_IS("CABEND", cabend);
+ PR_IS("DTIMSYNC", dtimsync);
+ PR_IS("DTIM", dtim);
+ PR_IS("TSFOOR", tsfoor);
+ PR_IS("MCI", mci);
+ PR_IS("GENTIMER", gen_timer);
+ PR_IS("TOTAL", total);
+
+ seq_puts(file, "SYNC_CAUSE stats:\n");
+
+ PR_IS("Sync-All", sync_cause_all);
+ PR_IS("RTC-IRQ", sync_rtc_irq);
+ PR_IS("MAC-IRQ", sync_mac_irq);
+ PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access);
+ PR_IS("APB-Timeout", apb_timeout);
+ PR_IS("PCI-Mode-Conflict", pci_mode_conflict);
+ PR_IS("HOST1-Fatal", host1_fatal);
+ PR_IS("HOST1-Perr", host1_perr);
+ PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr);
+ PR_IS("RADM-CPL-EP", radm_cpl_ep);
+ PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort);
+ PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort);
+ PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err);
+ PR_IS("RADM-CPL-Timeout", radm_cpl_timeout);
+ PR_IS("Local-Bus-Timeout", local_timeout);
+ PR_IS("PM-Access", pm_access);
+ PR_IS("MAC-Awake", mac_awake);
+ PR_IS("MAC-Asleep", mac_asleep);
+ PR_IS("MAC-Sleep-Access", mac_sleep_access);
+
+ return 0;
+}
+
+static int read_file_xmit(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+
+ seq_printf(file, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+
+ PR("MPDUs Queued: ", queued);
+ PR("MPDUs Completed: ", completed);
+ PR("MPDUs XRetried: ", xretries);
+ PR("Aggregates: ", a_aggr);
+ PR("AMPDUs Queued HW:", a_queued_hw);
+ PR("AMPDUs Queued SW:", a_queued_sw);
+ PR("AMPDUs Completed:", a_completed);
+ PR("AMPDUs Retried: ", a_retries);
+ PR("AMPDUs XRetried: ", a_xretries);
+ PR("TXERR Filtered: ", txerr_filtered);
+ PR("FIFO Underrun: ", fifo_underrun);
+ PR("TXOP Exceeded: ", xtxop);
+ PR("TXTIMER Expiry: ", timer_exp);
+ PR("DESC CFG Error: ", desc_cfg_err);
+ PR("DATA Underrun: ", data_underrun);
+ PR("DELIM Underrun: ", delim_underrun);
+ PR("TX-Pkts-All: ", tx_pkts_all);
+ PR("TX-Bytes-All: ", tx_bytes_all);
+ PR("HW-put-tx-buf: ", puttxbuf);
+ PR("HW-tx-start: ", txstart);
+ PR("HW-tx-proc-desc: ", txprocdesc);
+ PR("TX-Failed: ", txfailed);
+
+ return 0;
+}
+
+static void print_queue(struct ath_softc *sc, struct ath_txq *txq,
+ struct seq_file *file)
+{
+ ath_txq_lock(sc, txq);
+
+ seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
+ seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
+ seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
+ seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
+ seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
+
+ ath_txq_unlock(sc, txq);
+}
+
+static int read_file_queues(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+ struct ath_txq *txq;
+ int i;
+ static const char *qname[4] = {
+ "VO", "VI", "BE", "BK"
+ };
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+ seq_printf(file, "(%s): ", qname[i]);
+ print_queue(sc, txq, file);
+ }
+
+ seq_puts(file, "(CAB): ");
+ print_queue(sc, sc->beacon.cabq, file);
+
+ return 0;
+}
+
+static int read_file_misc(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath9k_vif_iter_data iter_data;
+ struct ath_chanctx *ctx;
+ unsigned int reg;
+ u32 rxfilter, i;
+
+ seq_printf(file, "BSSID: %pM\n", common->curbssid);
+ seq_printf(file, "BSSID-MASK: %pM\n", common->bssidmask);
+ seq_printf(file, "OPMODE: %s\n",
+ ath_opmode_to_string(sc->sc_ah->opmode));
+
+ ath9k_ps_wakeup(sc);
+ rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
+ ath9k_ps_restore(sc);
+
+ seq_printf(file, "RXFILTER: 0x%x", rxfilter);
+
+ if (rxfilter & ATH9K_RX_FILTER_UCAST)
+ seq_puts(file, " UCAST");
+ if (rxfilter & ATH9K_RX_FILTER_MCAST)
+ seq_puts(file, " MCAST");
+ if (rxfilter & ATH9K_RX_FILTER_BCAST)
+ seq_puts(file, " BCAST");
+ if (rxfilter & ATH9K_RX_FILTER_CONTROL)
+ seq_puts(file, " CONTROL");
+ if (rxfilter & ATH9K_RX_FILTER_BEACON)
+ seq_puts(file, " BEACON");
+ if (rxfilter & ATH9K_RX_FILTER_PROM)
+ seq_puts(file, " PROM");
+ if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
+ seq_puts(file, " PROBEREQ");
+ if (rxfilter & ATH9K_RX_FILTER_PHYERR)
+ seq_puts(file, " PHYERR");
+ if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
+ seq_puts(file, " MYBEACON");
+ if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
+ seq_puts(file, " COMP_BAR");
+ if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
+ seq_puts(file, " PSPOLL");
+ if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
+ seq_puts(file, " PHYRADAR");
+ if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
+ seq_puts(file, " MCAST_BCAST_ALL");
+ if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
+ seq_puts(file, " CONTROL_WRAPPER");
+
+ seq_puts(file, "\n");
+
+ reg = sc->sc_ah->imask;
+
+ seq_printf(file, "INTERRUPT-MASK: 0x%x", reg);
+
+ if (reg & ATH9K_INT_SWBA)
+ seq_puts(file, " SWBA");
+ if (reg & ATH9K_INT_BMISS)
+ seq_puts(file, " BMISS");
+ if (reg & ATH9K_INT_CST)
+ seq_puts(file, " CST");
+ if (reg & ATH9K_INT_RX)
+ seq_puts(file, " RX");
+ if (reg & ATH9K_INT_RXHP)
+ seq_puts(file, " RXHP");
+ if (reg & ATH9K_INT_RXLP)
+ seq_puts(file, " RXLP");
+ if (reg & ATH9K_INT_BB_WATCHDOG)
+ seq_puts(file, " BB_WATCHDOG");
+
+ seq_puts(file, "\n");
+
+ i = 0;
+ ath_for_each_chanctx(sc, ctx) {
+ if (list_empty(&ctx->vifs))
+ continue;
+ ath9k_calculate_iter_data(sc, ctx, &iter_data);
+
+ seq_printf(file,
+ "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i",
+ i++, (int)(ctx->assigned), iter_data.naps,
+ iter_data.nstations,
+ iter_data.nmeshes, iter_data.nwds);
+ seq_printf(file, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+ iter_data.nadhocs, sc->cur_chan->nvifs,
+ sc->nbcnvifs);
+ }
+
+ return 0;
+}
+
+static int read_file_reset(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+ static const char * const reset_cause[__RESET_TYPE_MAX] = {
+ [RESET_TYPE_BB_HANG] = "Baseband Hang",
+ [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
+ [RESET_TYPE_FATAL_INT] = "Fatal HW Error",
+ [RESET_TYPE_TX_ERROR] = "TX HW error",
+ [RESET_TYPE_TX_GTT] = "Transmit timeout",
+ [RESET_TYPE_TX_HANG] = "TX Path Hang",
+ [RESET_TYPE_PLL_HANG] = "PLL RX Hang",
+ [RESET_TYPE_MAC_HANG] = "MAC Hang",
+ [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
+ [RESET_TYPE_MCI] = "MCI Reset",
+ [RESET_TYPE_CALIBRATION] = "Calibration error",
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reset_cause); i++) {
+ if (!reset_cause[i])
+ continue;
+
+ seq_printf(file, "%17s: %2d\n", reset_cause[i],
+ sc->debug.stats.reset[i]);
+ }
+
+ return 0;
+}
+
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, struct ath_txq *txq,
+ unsigned int flags)
+{
+ int qnum = txq->axq_qnum;
+
+ TX_STAT_INC(qnum, tx_pkts_all);
+ sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
+
+ if (bf_isampdu(bf)) {
+ if (flags & ATH_TX_ERROR)
+ TX_STAT_INC(qnum, a_xretries);
+ else
+ TX_STAT_INC(qnum, a_completed);
+ } else {
+ if (ts->ts_status & ATH9K_TXERR_XRETRY)
+ TX_STAT_INC(qnum, xretries);
+ else
+ TX_STAT_INC(qnum, completed);
+ }
+
+ if (ts->ts_status & ATH9K_TXERR_FILT)
+ TX_STAT_INC(qnum, txerr_filtered);
+ if (ts->ts_status & ATH9K_TXERR_FIFO)
+ TX_STAT_INC(qnum, fifo_underrun);
+ if (ts->ts_status & ATH9K_TXERR_XTXOP)
+ TX_STAT_INC(qnum, xtxop);
+ if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
+ TX_STAT_INC(qnum, timer_exp);
+ if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
+ TX_STAT_INC(qnum, desc_cfg_err);
+ if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
+ TX_STAT_INC(qnum, data_underrun);
+ if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
+ TX_STAT_INC(qnum, delim_underrun);
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
+{
+ ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
+}
+
+static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "0x%08x\n", sc->debug.regidx);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long regidx;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &regidx))
+ return -EINVAL;
+
+ sc->debug.regidx = regidx;
+ return count;
+}
+
+static const struct file_operations fops_regidx = {
+ .read = read_file_regidx,
+ .write = write_file_regidx,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_regval(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+ u32 regval;
+
+ ath9k_ps_wakeup(sc);
+ regval = REG_READ_D(ah, sc->debug.regidx);
+ ath9k_ps_restore(sc);
+ len = sprintf(buf, "0x%08x\n", regval);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long regval;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &regval))
+ return -EINVAL;
+
+ ath9k_ps_wakeup(sc);
+ REG_WRITE_D(ah, sc->debug.regidx, regval);
+ ath9k_ps_restore(sc);
+ return count;
+}
+
+static const struct file_operations fops_regval = {
+ .read = read_file_regval,
+ .write = write_file_regval,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define REGDUMP_LINE_SIZE 20
+
+static int open_file_regdump(struct inode *inode, struct file *file)
+{
+ struct ath_softc *sc = inode->i_private;
+ unsigned int len = 0;
+ u8 *buf;
+ int i;
+ unsigned long num_regs, regdump_len, max_reg_offset;
+
+ max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+ num_regs = max_reg_offset / 4 + 1;
+ regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
+ buf = vmalloc(regdump_len);
+ if (!buf)
+ return -ENOMEM;
+
+ ath9k_ps_wakeup(sc);
+ for (i = 0; i < num_regs; i++)
+ len += scnprintf(buf + len, regdump_len - len,
+ "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+ ath9k_ps_restore(sc);
+
+ file->private_data = buf;
+
+ return 0;
+}
+
+static const struct file_operations fops_regdump = {
+ .open = open_file_regdump,
+ .read = ath9k_debugfs_read_buf,
+ .release = ath9k_debugfs_release_buf,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,/* read accesses f_pos */
+};
+
+static int read_file_dump_nfcal(struct seq_file *file, void *data)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(file->private);
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ u32 i, j;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ u8 nread;
+
+ seq_printf(file, "Channel Noise Floor : %d\n", ah->noise);
+ seq_puts(file, "Chain | privNF | # Readings | NF Readings\n");
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (!(chainmask & (1 << i)) ||
+ ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
+ continue;
+
+ nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
+ seq_printf(file, " %d\t %d\t %d\t\t", i, h[i].privNF, nread);
+ for (j = 0; j < nread; j++)
+ seq_printf(file, " %d", h[i].nfCalBuffer[j]);
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int open_file_dump_nfcal(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_dump_nfcal, inode->i_private);
+}
+
+static const struct file_operations fops_dump_nfcal = {
+ .read = seq_read,
+ .open = open_file_dump_nfcal,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ u32 len = 0, size = 1500;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!sc->sc_ah->common.btcoex_enabled) {
+ len = scnprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
+ goto exit;
+ }
+
+ len = ath9k_dump_btcoex(sc, buf, size);
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = read_file_btcoex,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+#endif
+
+#ifdef CONFIG_ATH9K_DYNACK
+static ssize_t read_file_ackto(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%u %c\n", ah->dynack.ackto,
+ (ah->dynack.enabled) ? 'A' : 'S');
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ackto = {
+ .read = read_file_ackto,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+#endif
+
+#ifdef CONFIG_ATH9K_WOW
+
+static ssize_t read_file_wow(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned int len = 0, size = 32;
+ ssize_t retval;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "WOW: %s\n",
+ sc->force_wow ? "ENABLED" : "DISABLED");
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_wow(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val != 1)
+ return -EINVAL;
+
+ if (!sc->force_wow) {
+ sc->force_wow = true;
+ ath9k_init_wow(sc->hw);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wow = {
+ .read = read_file_wow,
+ .write = write_file_wow,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+static ssize_t read_file_tpc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned int len = 0, size = 32;
+ ssize_t retval;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "%s\n",
+ ah->tpc_enabled ? "ENABLED" : "DISABLED");
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+ bool tpc_enabled;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ tpc_enabled = !!val;
+
+ if (tpc_enabled != ah->tpc_enabled) {
+ ah->tpc_enabled = tpc_enabled;
+
+ mutex_lock(&sc->mutex);
+ ath9k_set_txpower(sc, NULL);
+ mutex_unlock(&sc->mutex);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_tpc = {
+ .read = read_file_tpc,
+ .write = write_file_tpc,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* Ethtool support for get-stats */
+
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ AMKSTR(d_tx_pkts),
+ AMKSTR(d_tx_bytes),
+ AMKSTR(d_tx_mpdus_queued),
+ AMKSTR(d_tx_mpdus_completed),
+ AMKSTR(d_tx_mpdu_xretries),
+ AMKSTR(d_tx_aggregates),
+ AMKSTR(d_tx_ampdus_queued_hw),
+ AMKSTR(d_tx_ampdus_queued_sw),
+ AMKSTR(d_tx_ampdus_completed),
+ AMKSTR(d_tx_ampdu_retries),
+ AMKSTR(d_tx_ampdu_xretries),
+ AMKSTR(d_tx_fifo_underrun),
+ AMKSTR(d_tx_op_exceeded),
+ AMKSTR(d_tx_timer_expiry),
+ AMKSTR(d_tx_desc_cfg_err),
+ AMKSTR(d_tx_data_underrun),
+ AMKSTR(d_tx_delim_underrun),
+ "d_rx_crc_err",
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
+
+void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+}
+
+int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_SSTATS_LEN;
+ return 0;
+}
+
+#define AWDATA(elem) \
+ do { \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].elem; \
+ } while (0)
+
+#define AWDATA_RX(elem) \
+ do { \
+ data[i++] = sc->debug.stats.rxstats.elem; \
+ } while (0)
+
+void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath_softc *sc = hw->priv;
+ int i = 0;
+
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
+ AWDATA_RX(rx_pkts_all);
+ AWDATA_RX(rx_bytes_all);
+
+ AWDATA(tx_pkts_all);
+ AWDATA(tx_bytes_all);
+ AWDATA(queued);
+ AWDATA(completed);
+ AWDATA(xretries);
+ AWDATA(a_aggr);
+ AWDATA(a_queued_hw);
+ AWDATA(a_queued_sw);
+ AWDATA(a_completed);
+ AWDATA(a_retries);
+ AWDATA(a_xretries);
+ AWDATA(fifo_underrun);
+ AWDATA(xtxop);
+ AWDATA(timer_exp);
+ AWDATA(desc_cfg_err);
+ AWDATA(data_underrun);
+ AWDATA(delim_underrun);
+
+ AWDATA_RX(crc_err);
+ AWDATA_RX(decrypt_crc_err);
+ AWDATA_RX(phy_err);
+ AWDATA_RX(mic_err);
+ AWDATA_RX(pre_delim_crc_err);
+ AWDATA_RX(post_delim_crc_err);
+ AWDATA_RX(decrypt_busy_err);
+
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
+
+ WARN_ON(i != ATH9K_SSTATS_LEN);
+}
+
+void ath9k_deinit_debug(struct ath_softc *sc)
+{
+ ath9k_cmn_spectral_deinit_debug(&sc->spec_priv);
+}
+
+int ath9k_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
+ sc->hw->wiphy->debugfsdir);
+ if (!sc->debug.debugfs_phy)
+ return -ENOMEM;
+
+#ifdef CONFIG_ATH_DEBUG
+ debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ sc, &fops_debug);
+#endif
+
+ ath9k_dfs_init_debug(sc);
+ ath9k_tx99_init_debug(sc);
+ ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy);
+
+ debugfs_create_devm_seqfile(sc->dev, "dma", sc->debug.debugfs_phy,
+ read_file_dma);
+ debugfs_create_devm_seqfile(sc->dev, "interrupt", sc->debug.debugfs_phy,
+ read_file_interrupt);
+ debugfs_create_devm_seqfile(sc->dev, "xmit", sc->debug.debugfs_phy,
+ read_file_xmit);
+ debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
+ read_file_queues);
+ debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
+ debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
+ debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
+ debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
+ debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
+ read_file_misc);
+ debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
+ read_file_reset);
+
+ ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+ ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+
+ debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->rxchainmask);
+ debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->txchainmask);
+ debugfs_create_file("ani", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_ani);
+ debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->sc_ah->config.enable_paprd);
+ debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ sc, &fops_regidx);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ sc, &fops_regval);
+ debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy,
+ &ah->config.cwm_ignore_extcca);
+ debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_regdump);
+ debugfs_create_devm_seqfile(sc->dev, "dump_nfcal",
+ sc->debug.debugfs_phy,
+ read_file_dump_nfcal);
+
+ ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+ ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+
+ debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
+ debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
+ debugfs_create_file("antenna_diversity", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
+ debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_btcoex);
+#endif
+
+#ifdef CONFIG_ATH9K_WOW
+ debugfs_create_file("wow", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_wow);
+#endif
+
+#ifdef CONFIG_ATH9K_DYNACK
+ debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ sc, &fops_ackto);
+#endif
+ debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tpc);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
new file mode 100644
index 000000000..a8e931995
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include "hw.h"
+#include "dfs_debug.h"
+
+struct ath_txq;
+struct ath_buf;
+struct fft_sample_tlv;
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
+#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
+#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
+#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
+#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
+#else
+#define TX_STAT_INC(q, c) do { } while (0)
+#define RX_STAT_INC(c)
+#define RESET_STAT_INC(sc, type) do { } while (0)
+#define ANT_STAT_INC(i, c) do { } while (0)
+#define ANT_LNA_INC(i, c) do { } while (0)
+#endif
+
+enum ath_reset_type {
+ RESET_TYPE_BB_HANG,
+ RESET_TYPE_BB_WATCHDOG,
+ RESET_TYPE_FATAL_INT,
+ RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_GTT,
+ RESET_TYPE_TX_HANG,
+ RESET_TYPE_PLL_HANG,
+ RESET_TYPE_MAC_HANG,
+ RESET_TYPE_BEACON_STUCK,
+ RESET_TYPE_MCI,
+ RESET_TYPE_CALIBRATION,
+ __RESET_TYPE_MAX
+};
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+
+/**
+ * struct ath_interrupt_stats - Contains statistics about interrupts
+ * @total: Total no. of interrupts generated so far
+ * @rxok: RX with no errors
+ * @rxlp: RX with low priority RX
+ * @rxhp: RX with high priority, uapsd only
+ * @rxeol: RX with no more RXDESC available
+ * @rxorn: RX FIFO overrun
+ * @txok: TX completed at the requested rate
+ * @txurn: TX FIFO underrun
+ * @mib: MIB regs reaching its threshold
+ * @rxphyerr: RX with phy errors
+ * @rx_keycache_miss: RX with key cache misses
+ * @swba: Software Beacon Alert
+ * @bmiss: Beacon Miss
+ * @bnr: Beacon Not Ready
+ * @cst: Carrier Sense TImeout
+ * @gtt: Global TX Timeout
+ * @tim: RX beacon TIM occurrence
+ * @cabend: RX End of CAB traffic
+ * @dtimsync: DTIM sync lossage
+ * @dtim: RX Beacon with DTIM
+ * @bb_watchdog: Baseband watchdog
+ * @tsfoor: TSF out of range, indicates that the corrected TSF received
+ * from a beacon differs from the PCU's internal TSF by more than a
+ * (programmable) threshold
+ * @local_timeout: Internal bus timeout.
+ * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
+ * @gen_timer: Generic hardware timer interrupt
+ */
+struct ath_interrupt_stats {
+ u32 total;
+ u32 rxok;
+ u32 rxlp;
+ u32 rxhp;
+ u32 rxeol;
+ u32 rxorn;
+ u32 txok;
+ u32 txeol;
+ u32 txurn;
+ u32 mib;
+ u32 rxphyerr;
+ u32 rx_keycache_miss;
+ u32 swba;
+ u32 bmiss;
+ u32 bnr;
+ u32 cst;
+ u32 gtt;
+ u32 tim;
+ u32 cabend;
+ u32 dtimsync;
+ u32 dtim;
+ u32 bb_watchdog;
+ u32 tsfoor;
+ u32 mci;
+ u32 gen_timer;
+
+ /* Sync-cause stats */
+ u32 sync_cause_all;
+ u32 sync_rtc_irq;
+ u32 sync_mac_irq;
+ u32 eeprom_illegal_access;
+ u32 apb_timeout;
+ u32 pci_mode_conflict;
+ u32 host1_fatal;
+ u32 host1_perr;
+ u32 trcv_fifo_perr;
+ u32 radm_cpl_ep;
+ u32 radm_cpl_dllp_abort;
+ u32 radm_cpl_tlp_abort;
+ u32 radm_cpl_ecrc_err;
+ u32 radm_cpl_timeout;
+ u32 local_timeout;
+ u32 pm_access;
+ u32 mac_awake;
+ u32 mac_asleep;
+ u32 mac_sleep_access;
+};
+
+
+/**
+ * struct ath_tx_stats - Statistics about TX
+ * @tx_pkts_all: No. of total frames transmitted, including ones that
+ may have had errors.
+ * @tx_bytes_all: No. of total bytes transmitted, including ones that
+ may have had errors.
+ * @queued: Total MPDUs (non-aggr) queued
+ * @completed: Total MPDUs (non-aggr) completed
+ * @a_aggr: Total no. of aggregates queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
+ * @a_completed: Total AMPDUs completed
+ * @a_retries: No. of AMPDUs retried (SW)
+ * @a_xretries: No. of AMPDUs dropped due to xretries
+ * @txerr_filtered: No. of frames with TXERR_FILT flag set.
+ * @fifo_underrun: FIFO underrun occurrences
+ Valid only for:
+ - non-aggregate condition.
+ - first packet of aggregate.
+ * @xtxop: No. of frames filtered because of TXOP limit
+ * @timer_exp: Transmit timer expiry
+ * @desc_cfg_err: Descriptor configuration errors
+ * @data_urn: TX data underrun errors
+ * @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart: Number of times hardware was told to start tx.
+ * @txprocdesc: Number of times tx descriptor was processed
+ * @txfailed: Out-of-memory or other errors in xmit path.
+ */
+struct ath_tx_stats {
+ u32 tx_pkts_all;
+ u32 tx_bytes_all;
+ u32 queued;
+ u32 completed;
+ u32 xretries;
+ u32 a_aggr;
+ u32 a_queued_hw;
+ u32 a_queued_sw;
+ u32 a_completed;
+ u32 a_retries;
+ u32 a_xretries;
+ u32 txerr_filtered;
+ u32 fifo_underrun;
+ u32 xtxop;
+ u32 timer_exp;
+ u32 desc_cfg_err;
+ u32 data_underrun;
+ u32 delim_underrun;
+ u32 puttxbuf;
+ u32 txstart;
+ u32 txprocdesc;
+ u32 txfailed;
+};
+
+/*
+ * Various utility macros to print TX/Queue counters.
+ */
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
+#define TXSTATS sc->debug.stats.txstats
+#define PR(str, elem) \
+ do { \
+ seq_printf(file, "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ } while(0)
+
+struct ath_rx_rate_stats {
+ struct {
+ u32 ht20_cnt;
+ u32 ht40_cnt;
+ u32 sgi_cnt;
+ u32 lgi_cnt;
+ } ht_stats[24];
+
+ struct {
+ u32 ofdm_cnt;
+ } ofdm_stats[8];
+
+ struct {
+ u32 cck_lp_cnt;
+ u32 cck_sp_cnt;
+ } cck_stats[4];
+};
+
+#define ANT_MAIN 0
+#define ANT_ALT 1
+
+struct ath_antenna_stats {
+ u32 recv_cnt;
+ u32 rssi_avg;
+ u32 lna_recv_cnt[4];
+ u32 lna_attempt_cnt[4];
+};
+
+struct ath_stats {
+ struct ath_interrupt_stats istats;
+ struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
+ struct ath_dfs_stats dfs_stats;
+ struct ath_antenna_stats ant_stats[2];
+ u32 reset[__RESET_TYPE_MAX];
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ u32 regidx;
+ struct ath_stats stats;
+};
+
+int ath9k_init_debug(struct ath_hw *ah);
+void ath9k_deinit_debug(struct ath_softc *sc);
+
+void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, struct ath_txq *txq,
+ unsigned int flags);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
+int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg);
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause);
+
+#else
+
+static inline int ath9k_init_debug(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static inline void ath9k_deinit_debug(struct ath_softc *sc)
+{
+}
+static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
+ enum ath9k_int status)
+{
+}
+static inline void ath_debug_stat_tx(struct ath_softc *sc,
+ struct ath_buf *bf,
+ struct ath_tx_status *ts,
+ struct ath_txq *txq,
+ unsigned int flags)
+{
+}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+}
+static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+
+}
+
+static inline void
+ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+}
+
+#endif /* CONFIG_ATH9K_DEBUGFS */
+
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb);
+#else
+static inline void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_ATH9K_STATION_STATISTICS */
+
+#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
new file mode 100644
index 000000000..ffca918ff
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*************/
+/* node_aggr */
+/*************/
+
+static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int tidno, acno;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!an->sta->ht_cap.ht_supported) {
+ len = scnprintf(buf, size, "%s\n",
+ "HT not supported");
+ goto exit;
+ }
+
+ len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
+
+ len += scnprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
+
+ for (acno = 0, ac = &an->ac[acno];
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
+ txq = ac->txq;
+ ath_txq_lock(sc, txq);
+ len += scnprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
+ ath_txq_unlock(sc, txq);
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ txq = tid->ac->txq;
+ ath_txq_lock(sc, txq);
+ if (tid->active) {
+ len += scnprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d\n",
+ tid->tidno,
+ tid->seq_start,
+ tid->seq_next,
+ tid->baw_size,
+ tid->baw_head,
+ tid->baw_tail,
+ tid->bar_index,
+ tid->sched);
+ }
+ ath_txq_unlock(sc, txq);
+ }
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_node_aggr = {
+ .read = read_file_node_aggr,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/*************/
+/* node_recv */
+/*************/
+
+void ath_debug_rate_stats(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_rx_status *rxs;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+
+ an = (struct ath_node *) sta->drv_priv;
+ rstats = &an->rx_rate_stats;
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ if (IS_HT_RATE(rs->rs_rate)) {
+ if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
+ goto exit;
+
+ if (rxs->flag & RX_FLAG_40MHZ)
+ rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
+
+ if (rxs->flag & RX_FLAG_SHORT_GI)
+ rstats->ht_stats[rxs->rate_idx].sgi_cnt++;
+ else
+ rstats->ht_stats[rxs->rate_idx].lgi_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_CCK_RATE(rs->rs_rate)) {
+ if (rxs->flag & RX_FLAG_SHORTPRE)
+ rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++;
+ else
+ rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++;
+
+ goto exit;
+ }
+
+ if (IS_OFDM_RATE(rs->rs_rate)) {
+ if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+ rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
+ else
+ rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
+ }
+exit:
+ rcu_read_unlock();
+}
+
+#define PRINT_CCK_RATE(str, i, sp) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ (sp) ? rstats->cck_stats[i].cck_sp_cnt : \
+ rstats->cck_stats[i].cck_lp_cnt); \
+ } while (0)
+
+#define PRINT_OFDM_RATE(str, i) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%11s : %10u\n", \
+ str, \
+ rstats->ofdm_stats[i].ofdm_cnt); \
+ } while (0)
+
+static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_rate_stats *rstats;
+ struct ieee80211_sta *sta = an->sta;
+ enum ieee80211_band band;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int i;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ band = ah->curchan->chan->band;
+ rstats = &an->rx_rate_stats;
+
+ if (!sta->ht_cap.ht_supported)
+ goto legacy;
+
+ len += scnprintf(buf + len, size - len,
+ "%24s%10s%10s%10s\n",
+ "HT20", "HT40", "SGI", "LGI");
+
+ for (i = 0; i < 24; i++) {
+ len += scnprintf(buf + len, size - len,
+ "%8s%3u : %10u%10u%10u%10u\n",
+ "MCS", i,
+ rstats->ht_stats[i].ht20_cnt,
+ rstats->ht_stats[i].ht40_cnt,
+ rstats->ht_stats[i].sgi_cnt,
+ rstats->ht_stats[i].lgi_cnt);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+legacy:
+ if (band == IEEE80211_BAND_2GHZ) {
+ PRINT_CCK_RATE("CCK-1M/LP", 0, false);
+ PRINT_CCK_RATE("CCK-2M/LP", 1, false);
+ PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
+ PRINT_CCK_RATE("CCK-11M/LP", 3, false);
+
+ PRINT_CCK_RATE("CCK-2M/SP", 1, true);
+ PRINT_CCK_RATE("CCK-5.5M/SP", 2, true);
+ PRINT_CCK_RATE("CCK-11M/SP", 3, true);
+ }
+
+ PRINT_OFDM_RATE("OFDM-6M", 0);
+ PRINT_OFDM_RATE("OFDM-9M", 1);
+ PRINT_OFDM_RATE("OFDM-12M", 2);
+ PRINT_OFDM_RATE("OFDM-18M", 3);
+ PRINT_OFDM_RATE("OFDM-24M", 4);
+ PRINT_OFDM_RATE("OFDM-36M", 5);
+ PRINT_OFDM_RATE("OFDM-48M", 6);
+ PRINT_OFDM_RATE("OFDM-54M", 7);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+#undef PRINT_OFDM_RATE
+#undef PRINT_CCK_RATE
+
+static const struct file_operations fops_node_recv = {
+ .read = read_file_node_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+
+ debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
+ debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 000000000..e98a9eaba
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+ u8 pulse_bw_info;
+ u8 rssi;
+ u8 ext_rssi;
+ u8 pulse_length_ext;
+ u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+ const u32 AR93X_NSECS_PER_DUR = 800;
+ const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+ u32 nsecs;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+ nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+ else
+ nsecs = dur * AR93X_NSECS_PER_DUR;
+
+ return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+ struct ath_radar_data *ard,
+ struct pulse_event *pe)
+{
+ u8 rssi;
+ u16 dur;
+
+ /*
+ * Only the last 2 bits of the BW info are relevant, they indicate
+ * which channel the radar was detected in.
+ */
+ ard->pulse_bw_info &= 0x03;
+
+ switch (ard->pulse_bw_info) {
+ case PRI_CH_RADAR_FOUND:
+ /* radar in ctrl channel */
+ dur = ard->pulse_length_pri;
+ DFS_STAT_INC(sc, pri_phy_errors);
+ /*
+ * cannot use ctrl channel RSSI
+ * if extension channel is stronger
+ */
+ rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi;
+ break;
+ case EXT_CH_RADAR_FOUND:
+ /* radar in extension channel */
+ dur = ard->pulse_length_ext;
+ DFS_STAT_INC(sc, ext_phy_errors);
+ /*
+ * cannot use extension channel RSSI
+ * if control channel is stronger
+ */
+ rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi;
+ break;
+ case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+ /*
+ * Conducted testing, when pulse is on DC, both pri and ext
+ * durations are reported to be same
+ *
+ * Radiated testing, when pulse is on DC, different pri and
+ * ext durations are reported, so take the larger of the two
+ */
+ if (ard->pulse_length_ext >= ard->pulse_length_pri)
+ dur = ard->pulse_length_ext;
+ else
+ dur = ard->pulse_length_pri;
+ DFS_STAT_INC(sc, dc_phy_errors);
+
+ /* when both are present use stronger one */
+ rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
+ break;
+ default:
+ /*
+ * Bogus bandwidth info was received in descriptor,
+ * so ignore this PHY error
+ */
+ DFS_STAT_INC(sc, bwinfo_discards);
+ return false;
+ }
+
+ if (rssi == 0) {
+ DFS_STAT_INC(sc, rssi_discards);
+ return false;
+ }
+
+ /*
+ * TODO: check chirping pulses
+ * checks for chirping are dependent on the DFS regulatory domain
+ * used, which is yet TBD
+ */
+
+ /* convert duration to usecs */
+ pe->width = dur_to_usecs(sc->sc_ah, dur);
+ pe->rssi = rssi;
+
+ DFS_STAT_INC(sc, pulses_detected);
+ return true;
+}
+
+static void
+ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
+{
+ struct dfs_pattern_detector *pd = sc->dfs_detector;
+ DFS_STAT_INC(sc, pulses_processed);
+ if (pd == NULL)
+ return;
+ if (!pd->add_pulse(pd, pe))
+ return;
+ DFS_STAT_INC(sc, radar_detected);
+ ieee80211_radar_detected(sc->hw);
+}
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime)
+{
+ struct ath_radar_data ard;
+ u16 datalen;
+ char *vdata_end;
+ struct pulse_event pe;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ DFS_STAT_INC(sc, pulses_total);
+ if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) &&
+ (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) {
+ ath_dbg(common, DFS,
+ "Error: rs_phyer=0x%x not a radar error\n",
+ rs->rs_phyerr);
+ DFS_STAT_INC(sc, pulses_no_dfs);
+ return;
+ }
+
+ datalen = rs->rs_datalen;
+ if (datalen == 0) {
+ DFS_STAT_INC(sc, datalen_discards);
+ return;
+ }
+
+ ard.rssi = rs->rs_rssi_ctl[0];
+ ard.ext_rssi = rs->rs_rssi_ext[0];
+
+ /*
+ * hardware stores this as 8 bit signed value.
+ * we will cap it at 0 if it is a negative number
+ */
+ if (ard.rssi & 0x80)
+ ard.rssi = 0;
+ if (ard.ext_rssi & 0x80)
+ ard.ext_rssi = 0;
+
+ vdata_end = (char *)data + datalen;
+ ard.pulse_bw_info = vdata_end[-1];
+ ard.pulse_length_ext = vdata_end[-2];
+ ard.pulse_length_pri = vdata_end[-3];
+ pe.freq = ah->curchan->channel;
+ pe.ts = mactime;
+ if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
+ return;
+
+ ath_dbg(common, DFS,
+ "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
+ pe.ts - sc->dfs_prev_pulse_ts);
+ sc->dfs_prev_pulse_ts = pe.ts;
+ if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
+ ath9k_dfs_process_radar_pulse(sc, &pe);
+ if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+ pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
+ ath9k_dfs_process_radar_pulse(sc, &pe);
+ }
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 000000000..c6fa3d5b5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+#include "../dfs_pattern_detector.h"
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the DFS detector for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void
+ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 000000000..8824610c2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
+
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
+
+#define ATH9K_DFS_STAT(s, p) \
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
+#define ATH9K_DFS_POOL_STAT(s, p) \
+ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ dfs_pool_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ "enabled" : "disabled");
+
+ if (!sc->dfs_detector) {
+ len += scnprintf(buf + len, size - len,
+ "DFS detector not enabled\n");
+ goto exit;
+ }
+
+ dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+ ATH9K_DFS_STAT("pulse events reported ", pulses_total);
+ ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
+ ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
+ ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
+ ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
+ ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
+ ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
+ ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+ ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
+ len += scnprintf(buf + len, size - len, "Radar detector statistics "
+ "(current DFS region: %d)\n",
+ sc->dfs_detector->region);
+ ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
+ ATH9K_DFS_STAT("Radars detected ", radar_detected);
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
+ ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
+ ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
+ ATH9K_DFS_POOL_STAT("Pulses in use ", pulse_used);
+ ATH9K_DFS_POOL_STAT("Seqs. allocated ", pseq_allocated);
+ ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error);
+ ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+/* magic number to prevent accidental reset of DFS statistics */
+#define DFS_STATS_RESET_MAGIC 0x80000000
+static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val == DFS_STATS_RESET_MAGIC)
+ memset(&sc->debug.stats.dfs_stats, 0,
+ sizeof(sc->debug.stats.dfs_stats));
+ return count;
+}
+
+static ssize_t write_file_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+
+ ieee80211_radar_detected(sc->hw);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = write_file_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_dfs_stats = {
+ .read = read_file_dfs,
+ .write = write_file_dfs,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+ debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_simulate_radar);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 000000000..7936c9126
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef ATH9K_DFS_DEBUG_H
+#define ATH9K_DFS_DEBUG_H
+
+#include "hw.h"
+
+struct ath_softc;
+
+/**
+ * struct ath_dfs_stats - DFS Statistics per wiphy
+ * @pulses_total: pulses reported by HW
+ * @pulses_no_dfs: pulses wrongly reported as DFS
+ * @pulses_detected: pulses detected so far
+ * @datalen_discards: pulses discarded due to invalid datalen
+ * @rssi_discards: pulses discarded due to invalid RSSI
+ * @bwinfo_discards: pulses discarded due to invalid BW info
+ * @pri_phy_errors: pulses reported for primary channel
+ * @ext_phy_errors: pulses reported for extension channel
+ * @dc_phy_errors: pulses reported for primary + extension channel
+ * @pulses_processed: pulses forwarded to detector
+ * @radar_detected: radars detected
+ */
+struct ath_dfs_stats {
+ /* pulse stats */
+ u32 pulses_total;
+ u32 pulses_no_dfs;
+ u32 pulses_detected;
+ u32 datalen_discards;
+ u32 rssi_discards;
+ u32 bwinfo_discards;
+ u32 pri_phy_errors;
+ u32 ext_phy_errors;
+ u32 dc_phy_errors;
+ /* pattern detection stats */
+ u32 pulses_processed;
+ u32 radar_detected;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
new file mode 100644
index 000000000..22b3cc4c2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+#include "hw.h"
+#include "dynack.h"
+
+#define COMPUTE_TO (5 * HZ)
+#define LATEACK_DELAY (10 * HZ)
+#define LATEACK_TO 256
+#define MAX_DELAY 300
+#define EWMA_LEVEL 96
+#define EWMA_DIV 128
+
+/**
+ * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
+ *
+ */
+static inline u32 ath_dynack_ewma(u32 old, u32 new)
+{
+ return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
+}
+
+/**
+ * ath_dynack_get_sifs - get sifs time based on phy used
+ * @ah: ath hw
+ * @phy: phy used
+ *
+ */
+static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
+{
+ u32 sifs = CCK_SIFS_TIME;
+
+ if (phy == WLAN_RC_PHY_OFDM) {
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ sifs = OFDM_SIFS_TIME_QUARTER;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ sifs = OFDM_SIFS_TIME_HALF;
+ else
+ sifs = OFDM_SIFS_TIME;
+ }
+ return sifs;
+}
+
+/**
+ * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
+ * @ah: ath hw
+ * @mac: receiver address
+ */
+static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
+{
+ int i;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ if ((common->macaddr[i] & common->bssidmask[i]) !=
+ (mac[i] & common->bssidmask[i]))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
+ * @ah: ath hw
+ *
+ * should be called while holding qlock
+ */
+static void ath_dynack_compute_ackto(struct ath_hw *ah)
+{
+ struct ath_node *an;
+ u32 to = 0;
+ struct ath_dynack *da = &ah->dynack;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ list_for_each_entry(an, &da->nodes, list)
+ if (an->ackto > to)
+ to = an->ackto;
+
+ if (to && da->ackto != to) {
+ u32 slottime;
+
+ slottime = (to - 3) / 2;
+ da->ackto = to;
+ ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
+ da->ackto, slottime);
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, da->ackto);
+ ath9k_hw_set_cts_timeout(ah, da->ackto);
+ }
+}
+
+/**
+ * ath_dynack_compute_to - compute STA ACK timeout
+ * @ah: ath hw
+ *
+ * should be called while holding qlock
+ */
+static void ath_dynack_compute_to(struct ath_hw *ah)
+{
+ u32 ackto, ack_ts;
+ u8 *dst, *src;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ struct ts_info *st_ts;
+ struct ath_dynack *da = &ah->dynack;
+
+ rcu_read_lock();
+
+ while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
+ da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
+ ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
+ st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
+ dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
+ src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
+
+ ath_dbg(ath9k_hw_common(ah), DYNACK,
+ "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
+ ack_ts, st_ts->tstamp, st_ts->dur,
+ da->ack_rbf.h_rb, da->st_rbf.h_rb);
+
+ if (ack_ts > st_ts->tstamp + st_ts->dur) {
+ ackto = ack_ts - st_ts->tstamp - st_ts->dur;
+
+ if (ackto < MAX_DELAY) {
+ sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
+ src);
+ if (sta) {
+ an = (struct ath_node *)sta->drv_priv;
+ an->ackto = ath_dynack_ewma(an->ackto,
+ ackto);
+ ath_dbg(ath9k_hw_common(ah), DYNACK,
+ "%pM to %u\n", dst, an->ackto);
+ if (time_is_before_jiffies(da->lto)) {
+ ath_dynack_compute_ackto(ah);
+ da->lto = jiffies + COMPUTE_TO;
+ }
+ }
+ INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
+ }
+ INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
+ } else {
+ INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+/**
+ * ath_dynack_sample_tx_ts - status timestamp sampling method
+ * @ah: ath hw
+ * @skb: socket buffer
+ * @ts: tx status info
+ *
+ */
+void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+ struct ath_tx_status *ts)
+{
+ u8 ridx;
+ struct ieee80211_hdr *hdr;
+ struct ath_dynack *da = &ah->dynack;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
+ return;
+
+ spin_lock_bh(&da->qlock);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* late ACK */
+ if (ts->ts_status & ATH9K_TXERR_XRETRY) {
+ if (ieee80211_is_assoc_req(hdr->frame_control) ||
+ ieee80211_is_assoc_resp(hdr->frame_control)) {
+ ath_dbg(common, DYNACK, "late ack\n");
+ ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
+ ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
+ ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
+ da->lto = jiffies + LATEACK_DELAY;
+ }
+
+ spin_unlock_bh(&da->qlock);
+ return;
+ }
+
+ ridx = ts->ts_rateindex;
+
+ da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
+ da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration;
+ ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
+ ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
+
+ if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
+ u32 phy, sifs;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_tx_rate *rates = info->status.rates;
+
+ rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
+ if (info->band == IEEE80211_BAND_2GHZ &&
+ !(rate->flags & IEEE80211_RATE_ERP_G))
+ phy = WLAN_RC_PHY_CCK;
+ else
+ phy = WLAN_RC_PHY_OFDM;
+
+ sifs = ath_dynack_get_sifs(ah, phy);
+ da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs;
+ }
+
+ ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
+ hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp,
+ da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb,
+ (da->st_rbf.t_rb + 1) % ATH_DYN_BUF);
+
+ INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
+ if (da->st_rbf.t_rb == da->st_rbf.h_rb)
+ INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
+
+ ath_dynack_compute_to(ah);
+
+ spin_unlock_bh(&da->qlock);
+}
+EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
+
+/**
+ * ath_dynack_sample_ack_ts - ACK timestamp sampling method
+ * @ah: ath hw
+ * @skb: socket buffer
+ * @ts: rx timestamp
+ *
+ */
+void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
+ u32 ts)
+{
+ struct ath_dynack *da = &ah->dynack;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
+ return;
+
+ spin_lock_bh(&da->qlock);
+ da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
+
+ ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
+ da->ack_rbf.tstamp[da->ack_rbf.t_rb],
+ da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF);
+
+ INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
+ if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
+ INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
+
+ ath_dynack_compute_to(ah);
+
+ spin_unlock_bh(&da->qlock);
+}
+EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
+
+/**
+ * ath_dynack_node_init - init ath_node related info
+ * @ah: ath hw
+ * @an: ath node
+ *
+ */
+void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
+{
+ /* ackto = slottime + sifs + air delay */
+ u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
+ struct ath_dynack *da = &ah->dynack;
+
+ an->ackto = ackto;
+
+ spin_lock(&da->qlock);
+ list_add_tail(&an->list, &da->nodes);
+ spin_unlock(&da->qlock);
+}
+EXPORT_SYMBOL(ath_dynack_node_init);
+
+/**
+ * ath_dynack_node_deinit - deinit ath_node related info
+ * @ah: ath hw
+ * @an: ath node
+ *
+ */
+void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
+{
+ struct ath_dynack *da = &ah->dynack;
+
+ spin_lock(&da->qlock);
+ list_del(&an->list);
+ spin_unlock(&da->qlock);
+}
+EXPORT_SYMBOL(ath_dynack_node_deinit);
+
+/**
+ * ath_dynack_reset - reset dynack processing
+ * @ah: ath hw
+ *
+ */
+void ath_dynack_reset(struct ath_hw *ah)
+{
+ /* ackto = slottime + sifs + air delay */
+ u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
+ struct ath_dynack *da = &ah->dynack;
+
+ da->lto = jiffies;
+ da->ackto = ackto;
+
+ da->st_rbf.t_rb = 0;
+ da->st_rbf.h_rb = 0;
+ da->ack_rbf.t_rb = 0;
+ da->ack_rbf.h_rb = 0;
+
+ /* init acktimeout */
+ ath9k_hw_setslottime(ah, (ackto - 3) / 2);
+ ath9k_hw_set_ack_timeout(ah, ackto);
+ ath9k_hw_set_cts_timeout(ah, ackto);
+}
+EXPORT_SYMBOL(ath_dynack_reset);
+
+/**
+ * ath_dynack_init - init dynack data structure
+ * @ah: ath hw
+ *
+ */
+void ath_dynack_init(struct ath_hw *ah)
+{
+ struct ath_dynack *da = &ah->dynack;
+
+ memset(da, 0, sizeof(struct ath_dynack));
+
+ spin_lock_init(&da->qlock);
+ INIT_LIST_HEAD(&da->nodes);
+
+ ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;
+}
diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
new file mode 100644
index 000000000..6d7bef976
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dynack.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DYNACK_H
+#define DYNACK_H
+
+#define ATH_DYN_BUF 64
+
+struct ath_hw;
+struct ath_node;
+
+/**
+ * struct ath_dyn_rxbuf - ACK frame ring buffer
+ * @h_rb: ring buffer head
+ * @t_rb: ring buffer tail
+ * @tstamp: ACK RX timestamp buffer
+ */
+struct ath_dyn_rxbuf {
+ u16 h_rb, t_rb;
+ u32 tstamp[ATH_DYN_BUF];
+};
+
+struct ts_info {
+ u32 tstamp;
+ u32 dur;
+};
+
+struct haddr_pair {
+ u8 h_dest[ETH_ALEN];
+ u8 h_src[ETH_ALEN];
+};
+
+/**
+ * struct ath_dyn_txbuf - tx frame ring buffer
+ * @h_rb: ring buffer head
+ * @t_rb: ring buffer tail
+ * @addr: dest/src address pair for a given TX frame
+ * @ts: TX frame timestamp buffer
+ */
+struct ath_dyn_txbuf {
+ u16 h_rb, t_rb;
+ struct haddr_pair addr[ATH_DYN_BUF];
+ struct ts_info ts[ATH_DYN_BUF];
+};
+
+/**
+ * struct ath_dynack - dynack processing info
+ * @enabled: enable dyn ack processing
+ * @ackto: current ACK timeout
+ * @lto: last ACK timeout computation
+ * @nodes: ath_node linked list
+ * @qlock: ts queue spinlock
+ * @ack_rbf: ACK ts ring buffer
+ * @st_rbf: status ts ring buffer
+ */
+struct ath_dynack {
+ bool enabled;
+ int ackto;
+ unsigned long lto;
+
+ struct list_head nodes;
+
+ /* protect timestamp queue access */
+ spinlock_t qlock;
+ struct ath_dyn_rxbuf ack_rbf;
+ struct ath_dyn_txbuf st_rbf;
+};
+
+#if defined(CONFIG_ATH9K_DYNACK)
+void ath_dynack_reset(struct ath_hw *ah);
+void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an);
+void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
+void ath_dynack_init(struct ath_hw *ah);
+void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
+void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+ struct ath_tx_status *ts);
+#else
+static inline void ath_dynack_init(struct ath_hw *ah) {}
+static inline void ath_dynack_node_init(struct ath_hw *ah,
+ struct ath_node *an) {}
+static inline void ath_dynack_node_deinit(struct ath_hw *ah,
+ struct ath_node *an) {}
+static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
+ struct sk_buff *skb, u32 ts) {}
+static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
+ struct sk_buff *skb,
+ struct ath_tx_status *ts) {}
+#endif
+
+#endif /* DYNACK_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
new file mode 100644
index 000000000..cc81482c9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
+{
+ REG_WRITE(ah, reg, val);
+
+ if (ah->config.analog_shiftreg)
+ udelay(100);
+}
+
+void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
+ u32 shift, u32 val)
+{
+ REG_RMW(ah, reg, ((val << shift) & mask), mask);
+
+ if (ah->config.analog_shiftreg)
+ udelay(100);
+}
+
+int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
+ int16_t targetLeft, int16_t targetRight)
+{
+ int16_t rv;
+
+ if (srcRight == srcLeft) {
+ rv = targetLeft;
+ } else {
+ rv = (int16_t) (((target - srcLeft) * targetRight +
+ (srcRight - target) * targetLeft) /
+ (srcRight - srcLeft));
+ }
+ return rv;
+}
+
+bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
+ u16 *indexL, u16 *indexR)
+{
+ u16 i;
+
+ if (target <= pList[0]) {
+ *indexL = *indexR = 0;
+ return true;
+ }
+ if (target >= pList[listSize - 1]) {
+ *indexL = *indexR = (u16) (listSize - 1);
+ return true;
+ }
+
+ for (i = 0; i < listSize - 1; i++) {
+ if (pList[i] == target) {
+ *indexL = *indexR = i;
+ return true;
+ }
+ if (target < pList[i + 1]) {
+ *indexL = i;
+ *indexR = (u16) (i + 1);
+ return false;
+ }
+ }
+ return false;
+}
+
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size)
+{
+ int i = 0, j, addr;
+ u32 addrdata[8];
+ u32 data[8];
+
+ for (addr = 0; addr < size; addr++) {
+ addrdata[i] = AR5416_EEPROM_OFFSET +
+ ((addr + eep_start_loc) << AR5416_EEPROM_S);
+ i++;
+ if (i == 8) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ i = 0;
+ }
+ }
+
+ if (i != 0) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ }
+}
+
+static bool ath9k_hw_nvram_read_blob(struct ath_hw *ah, u32 off,
+ u16 *data)
+{
+ u16 *blob_data;
+
+ if (off * sizeof(u16) > ah->eeprom_blob->size)
+ return false;
+
+ blob_data = (u16 *)ah->eeprom_blob->data;
+ *data = blob_data[off];
+ return true;
+}
+
+bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool ret;
+
+ if (ah->eeprom_blob)
+ ret = ath9k_hw_nvram_read_blob(ah, off, data);
+ else
+ ret = common->bus_ops->eeprom_read(common, off, data);
+
+ if (!ret)
+ ath_dbg(common, EEPROM,
+ "unable to read eeprom region at offset %u\n", off);
+
+ return ret;
+}
+
+void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
+ u8 *pVpdList, u16 numIntercepts,
+ u8 *pRetVpdList)
+{
+ u16 i, k;
+ u8 currPwr = pwrMin;
+ u16 idxL = 0, idxR = 0;
+
+ for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
+ ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
+ numIntercepts, &(idxL),
+ &(idxR));
+ if (idxR < 1)
+ idxR = 1;
+ if (idxL == numIntercepts - 1)
+ idxL = (u16) (numIntercepts - 2);
+ if (pPwrList[idxL] == pPwrList[idxR])
+ k = pVpdList[idxL];
+ else
+ k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] +
+ (pPwrList[idxR] - currPwr) * pVpdList[idxL]) /
+ (pPwrList[idxR] - pPwrList[idxL]));
+ pRetVpdList[i] = (u8) k;
+ currPwr += 2;
+ }
+}
+
+void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_leg *powInfo,
+ u16 numChannels,
+ struct cal_target_power_leg *pNewPower,
+ u16 numRates, bool isExtTarget)
+{
+ struct chan_centers centers;
+ u16 clo, chi;
+ int i;
+ int matchIndex = -1, lowIndex = -1;
+ u16 freq;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
+
+ if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = 0;
+ } else {
+ for (i = 0; (i < numChannels) &&
+ (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = i;
+ break;
+ } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan)) && i > 0 &&
+ freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ lowIndex = i - 1;
+ break;
+ }
+ }
+ if ((matchIndex == -1) && (lowIndex == -1))
+ matchIndex = i - 1;
+ }
+
+ if (matchIndex != -1) {
+ *pNewPower = powInfo[matchIndex];
+ } else {
+ clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
+ IS_CHAN_2GHZ(chan));
+ chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
+ IS_CHAN_2GHZ(chan));
+
+ for (i = 0; i < numRates; i++) {
+ pNewPower->tPow2x[i] =
+ (u8)ath9k_hw_interpolate(freq, clo, chi,
+ powInfo[lowIndex].tPow2x[i],
+ powInfo[lowIndex + 1].tPow2x[i]);
+ }
+ }
+}
+
+void ath9k_hw_get_target_powers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_ht *powInfo,
+ u16 numChannels,
+ struct cal_target_power_ht *pNewPower,
+ u16 numRates, bool isHt40Target)
+{
+ struct chan_centers centers;
+ u16 clo, chi;
+ int i;
+ int matchIndex = -1, lowIndex = -1;
+ u16 freq;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = isHt40Target ? centers.synth_center : centers.ctl_center;
+
+ if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
+ matchIndex = 0;
+ } else {
+ for (i = 0; (i < numChannels) &&
+ (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ matchIndex = i;
+ break;
+ } else
+ if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
+ IS_CHAN_2GHZ(chan)) && i > 0 &&
+ freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
+ IS_CHAN_2GHZ(chan))) {
+ lowIndex = i - 1;
+ break;
+ }
+ }
+ if ((matchIndex == -1) && (lowIndex == -1))
+ matchIndex = i - 1;
+ }
+
+ if (matchIndex != -1) {
+ *pNewPower = powInfo[matchIndex];
+ } else {
+ clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
+ IS_CHAN_2GHZ(chan));
+ chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
+ IS_CHAN_2GHZ(chan));
+
+ for (i = 0; i < numRates; i++) {
+ pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq,
+ clo, chi,
+ powInfo[lowIndex].tPow2x[i],
+ powInfo[lowIndex + 1].tPow2x[i]);
+ }
+ }
+}
+
+u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
+ bool is2GHz, int num_band_edges)
+{
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ int i;
+
+ for (i = 0; (i < num_band_edges) &&
+ (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
+ if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
+ twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
+ break;
+ } else if ((i > 0) &&
+ (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
+ is2GHz))) {
+ if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
+ is2GHz) < freq &&
+ CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
+ twiceMaxEdgePower =
+ CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
+ }
+ break;
+ }
+ }
+
+ return twiceMaxEdgePower;
+}
+
+u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
+ u8 antenna_reduction)
+{
+ u16 reduction = antenna_reduction;
+
+ /*
+ * Reduce scaled Power by number of chains active
+ * to get the per chain tx power level.
+ */
+ switch (ar5416_get_ntxchains(ah->txchainmask)) {
+ case 1:
+ break;
+ case 2:
+ reduction += POWER_CORRECTION_FOR_TWO_CHAIN;
+ break;
+ case 3:
+ reduction += POWER_CORRECTION_FOR_THREE_CHAIN;
+ break;
+ }
+
+ if (power_limit > reduction)
+ power_limit -= reduction;
+ else
+ power_limit = 0;
+
+ return power_limit;
+}
+
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+
+ switch (ar5416_get_ntxchains(ah->txchainmask)) {
+ case 1:
+ break;
+ case 2:
+ regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN;
+ break;
+ case 3:
+ regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN;
+ break;
+ default:
+ ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
+ break;
+ }
+}
+
+void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ void *pRawDataSet,
+ u8 *bChans, u16 availPiers,
+ u16 tPdGainOverlap,
+ u16 *pPdGainBoundaries, u8 *pPDADCValues,
+ u16 numXpdGains)
+{
+ int i, j, k;
+ int16_t ss;
+ u16 idxL = 0, idxR = 0, numPiers;
+ static u8 vpdTableL[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableR[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableI[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+
+ u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
+ u8 minPwrT4[AR5416_NUM_PD_GAINS];
+ u8 maxPwrT4[AR5416_NUM_PD_GAINS];
+ int16_t vpdStep;
+ int16_t tmpVal;
+ u16 sizeCurrVpdTable, maxIndex, tgtIndex;
+ bool match;
+ int16_t minDelta = 0;
+ struct chan_centers centers;
+ int pdgain_boundary_default;
+ struct cal_data_per_freq *data_def = pRawDataSet;
+ struct cal_data_per_freq_4k *data_4k = pRawDataSet;
+ struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet;
+ bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah);
+ int intercepts;
+
+ if (AR_SREV_9287(ah))
+ intercepts = AR9287_PD_GAIN_ICEPTS;
+ else
+ intercepts = AR5416_PD_GAIN_ICEPTS;
+
+ memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS);
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++) {
+ if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+ }
+
+ match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
+ IS_CHAN_2GHZ(chan)),
+ bChans, numPiers, &idxL, &idxR);
+
+ if (match) {
+ if (AR_SREV_9287(ah)) {
+ /* FIXME: array overrun? */
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_9287[idxL].pwrPdg[i],
+ data_9287[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ } else if (eeprom_4k) {
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_4k[idxL].pwrPdg[i],
+ data_4k[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ } else {
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_def[idxL].pwrPdg[i],
+ data_def[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ }
+ } else {
+ for (i = 0; i < numXpdGains; i++) {
+ if (AR_SREV_9287(ah)) {
+ pVpdL = data_9287[idxL].vpdPdg[i];
+ pPwrL = data_9287[idxL].pwrPdg[i];
+ pVpdR = data_9287[idxR].vpdPdg[i];
+ pPwrR = data_9287[idxR].pwrPdg[i];
+ } else if (eeprom_4k) {
+ pVpdL = data_4k[idxL].vpdPdg[i];
+ pPwrL = data_4k[idxL].pwrPdg[i];
+ pVpdR = data_4k[idxR].vpdPdg[i];
+ pPwrR = data_4k[idxR].pwrPdg[i];
+ } else {
+ pVpdL = data_def[idxL].vpdPdg[i];
+ pPwrL = data_def[idxL].pwrPdg[i];
+ pVpdR = data_def[idxR].vpdPdg[i];
+ pPwrR = data_def[idxR].pwrPdg[i];
+ }
+
+ minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
+
+ maxPwrT4[i] =
+ min(pPwrL[intercepts - 1],
+ pPwrR[intercepts - 1]);
+
+
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrL, pVpdL,
+ intercepts,
+ vpdTableL[i]);
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrR, pVpdR,
+ intercepts,
+ vpdTableR[i]);
+
+ for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
+ vpdTableI[i][j] =
+ (u8)(ath9k_hw_interpolate((u16)
+ FREQ2FBIN(centers.
+ synth_center,
+ IS_CHAN_2GHZ
+ (chan)),
+ bChans[idxL], bChans[idxR],
+ vpdTableL[i][j], vpdTableR[i][j]));
+ }
+ }
+ }
+
+ k = 0;
+
+ for (i = 0; i < numXpdGains; i++) {
+ if (i == (numXpdGains - 1))
+ pPdGainBoundaries[i] =
+ (u16)(maxPwrT4[i] / 2);
+ else
+ pPdGainBoundaries[i] =
+ (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
+
+ pPdGainBoundaries[i] =
+ min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]);
+
+ minDelta = 0;
+
+ if (i == 0) {
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ ss = (int16_t)(0 - (minPwrT4[i] / 2));
+ else
+ ss = 0;
+ } else {
+ ss = (int16_t)((pPdGainBoundaries[i - 1] -
+ (minPwrT4[i] / 2)) -
+ tPdGainOverlap + 1 + minDelta);
+ }
+ vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
+ vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
+ while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
+ pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
+ ss++;
+ }
+
+ sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
+ tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
+ (minPwrT4[i] / 2));
+ maxIndex = (tgtIndex < sizeCurrVpdTable) ?
+ tgtIndex : sizeCurrVpdTable;
+
+ while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ pPDADCValues[k++] = vpdTableI[i][ss++];
+ }
+
+ vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
+ vpdTableI[i][sizeCurrVpdTable - 2]);
+ vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
+ if (tgtIndex >= maxIndex) {
+ while ((ss <= tgtIndex) &&
+ (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
+ (ss - maxIndex + 1) * vpdStep));
+ pPDADCValues[k++] = (u8)((tmpVal > 255) ?
+ 255 : tmpVal);
+ ss++;
+ }
+ }
+ }
+
+ if (eeprom_4k)
+ pdgain_boundary_default = 58;
+ else
+ pdgain_boundary_default = pPdGainBoundaries[i - 1];
+
+ while (i < AR5416_PD_GAINS_IN_MASK) {
+ pPdGainBoundaries[i] = pdgain_boundary_default;
+ i++;
+ }
+
+ while (k < AR5416_NUM_PDADC_VALUES) {
+ pPDADCValues[k] = pPDADCValues[k - 1];
+ k++;
+ }
+}
+
+int ath9k_hw_eeprom_init(struct ath_hw *ah)
+{
+ int status;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->eep_ops = &eep_ar9300_ops;
+ else if (AR_SREV_9287(ah)) {
+ ah->eep_ops = &eep_ar9287_ops;
+ } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
+ ah->eep_ops = &eep_4k_ops;
+ } else {
+ ah->eep_ops = &eep_def_ops;
+ }
+
+ if (!ah->eep_ops->fill_eeprom(ah))
+ return -EIO;
+
+ status = ah->eep_ops->check_eeprom(ah);
+
+ return status;
+}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
new file mode 100644
index 000000000..40d4f62d0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef EEPROM_H
+#define EEPROM_H
+
+#define AR_EEPROM_MODAL_SPURS 5
+
+#include "../ath.h"
+#include <net/cfg80211.h>
+#include "ar9003_eeprom.h"
+
+#ifdef __BIG_ENDIAN
+#define AR5416_EEPROM_MAGIC 0x5aa5
+#else
+#define AR5416_EEPROM_MAGIC 0xa55a
+#endif
+
+#define CTRY_DEBUG 0x1ff
+#define CTRY_DEFAULT 0
+
+#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
+#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
+#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
+#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
+#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
+#define AR_EEPROM_EEPCAP_MAXQCU_S 4
+#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
+#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
+#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
+
+#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
+#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
+
+#define AR5416_EEPROM_MAGIC_OFFSET 0x0
+#define AR5416_EEPROM_S 2
+#define AR5416_EEPROM_OFFSET 0x2000
+#define AR5416_EEPROM_MAX 0xae0
+
+#define AR5416_EEPROM_START_ADDR \
+ (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
+
+#define SD_NO_CTL 0xE0
+#define NO_CTL 0xff
+#define CTL_MODE_M 0xf
+#define CTL_11A 0
+#define CTL_11B 1
+#define CTL_11G 2
+#define CTL_2GHT20 5
+#define CTL_5GHT20 6
+#define CTL_2GHT40 7
+#define CTL_5GHT40 8
+
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+
+#define SUB_NUM_CTL_MODES_AT_5G_40 2
+#define SUB_NUM_CTL_MODES_AT_2G_40 3
+
+#define POWER_CORRECTION_FOR_TWO_CHAIN 6 /* 10*log10(2)*2 */
+#define POWER_CORRECTION_FOR_THREE_CHAIN 10 /* 10*log10(3)*2 */
+
+/*
+ * For AR9285 and later chipsets, the following bits are not being programmed
+ * in EEPROM and so need to be enabled always.
+ *
+ * Bit 0: en_fcc_mid
+ * Bit 1: en_jap_mid
+ * Bit 2: en_fcc_dfs_ht40
+ * Bit 3: en_jap_ht40
+ * Bit 4: en_jap_dfs_ht40
+ */
+#define AR9285_RDEXT_DEFAULT 0x1F
+
+#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
+#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
+
+#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
+#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
+ ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
+ ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+
+#define EEP_RFSILENT_ENABLED 0x0001
+#define EEP_RFSILENT_ENABLED_S 0
+#define EEP_RFSILENT_POLARITY 0x0002
+#define EEP_RFSILENT_POLARITY_S 1
+#define EEP_RFSILENT_GPIO_SEL ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
+#define EEP_RFSILENT_GPIO_SEL_S 2
+
+#define AR5416_OPFLAGS_11A 0x01
+#define AR5416_OPFLAGS_11G 0x02
+#define AR5416_OPFLAGS_N_5G_HT40 0x04
+#define AR5416_OPFLAGS_N_2G_HT40 0x08
+#define AR5416_OPFLAGS_N_5G_HT20 0x10
+#define AR5416_OPFLAGS_N_2G_HT20 0x20
+
+#define AR5416_EEP_NO_BACK_VER 0x1
+#define AR5416_EEP_VER 0xE
+#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
+#define AR5416_EEP_MINOR_VER_2 0x2
+#define AR5416_EEP_MINOR_VER_3 0x3
+#define AR5416_EEP_MINOR_VER_7 0x7
+#define AR5416_EEP_MINOR_VER_9 0x9
+#define AR5416_EEP_MINOR_VER_16 0x10
+#define AR5416_EEP_MINOR_VER_17 0x11
+#define AR5416_EEP_MINOR_VER_19 0x13
+#define AR5416_EEP_MINOR_VER_20 0x14
+#define AR5416_EEP_MINOR_VER_21 0x15
+#define AR5416_EEP_MINOR_VER_22 0x16
+
+#define AR5416_NUM_5G_CAL_PIERS 8
+#define AR5416_NUM_2G_CAL_PIERS 4
+#define AR5416_NUM_5G_20_TARGET_POWERS 8
+#define AR5416_NUM_5G_40_TARGET_POWERS 8
+#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
+#define AR5416_NUM_2G_20_TARGET_POWERS 4
+#define AR5416_NUM_2G_40_TARGET_POWERS 4
+#define AR5416_NUM_CTLS 24
+#define AR5416_NUM_BAND_EDGES 8
+#define AR5416_NUM_PD_GAINS 4
+#define AR5416_PD_GAINS_IN_MASK 4
+#define AR5416_PD_GAIN_ICEPTS 5
+#define AR5416_NUM_PDADC_VALUES 128
+#define AR5416_BCHAN_UNUSED 0xFF
+#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR5416_MAX_CHAINS 3
+#define AR9300_MAX_CHAINS 3
+#define AR5416_PWR_TABLE_OFFSET_DB -5
+
+/* Rx gain type values */
+#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
+#define AR5416_EEP_RXGAIN_13DB_BACKOFF 1
+#define AR5416_EEP_RXGAIN_ORIG 2
+
+/* Tx gain type values */
+#define AR5416_EEP_TXGAIN_ORIGINAL 0
+#define AR5416_EEP_TXGAIN_HIGH_POWER 1
+
+#define AR5416_EEP4K_START_LOC 64
+#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3
+#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
+#define AR5416_EEP4K_NUM_2G_20_TARGET_POWERS 3
+#define AR5416_EEP4K_NUM_2G_40_TARGET_POWERS 3
+#define AR5416_EEP4K_NUM_CTLS 12
+#define AR5416_EEP4K_NUM_BAND_EDGES 4
+#define AR5416_EEP4K_NUM_PD_GAINS 2
+#define AR5416_EEP4K_MAX_CHAINS 1
+
+#define AR9280_TX_GAIN_TABLE_SIZE 22
+
+#define AR9287_EEP_VER 0xE
+#define AR9287_EEP_VER_MINOR_MASK 0xFFF
+#define AR9287_EEP_MINOR_VER_1 0x1
+#define AR9287_EEP_MINOR_VER_2 0x2
+#define AR9287_EEP_MINOR_VER_3 0x3
+#define AR9287_EEP_MINOR_VER AR9287_EEP_MINOR_VER_3
+#define AR9287_EEP_MINOR_VER_b AR9287_EEP_MINOR_VER
+#define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1
+
+#define AR9287_EEP_START_LOC 128
+#define AR9287_HTC_EEP_START_LOC 256
+#define AR9287_NUM_2G_CAL_PIERS 3
+#define AR9287_NUM_2G_CCK_TARGET_POWERS 3
+#define AR9287_NUM_2G_20_TARGET_POWERS 3
+#define AR9287_NUM_2G_40_TARGET_POWERS 3
+#define AR9287_NUM_CTLS 12
+#define AR9287_NUM_BAND_EDGES 4
+#define AR9287_PD_GAIN_ICEPTS 1
+#define AR9287_EEPMISC_BIG_ENDIAN 0x01
+#define AR9287_EEPMISC_WOW 0x02
+#define AR9287_MAX_CHAINS 2
+#define AR9287_ANT_16S 32
+
+#define AR9287_DATA_SZ 32
+
+#define AR9287_PWR_TABLE_OFFSET_DB -5
+
+#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
+
+#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+
+#define LNA_CTL_BUF_MODE BIT(0)
+#define LNA_CTL_ISEL_LO BIT(1)
+#define LNA_CTL_ISEL_HI BIT(2)
+#define LNA_CTL_BUF_IN BIT(3)
+#define LNA_CTL_FEM_BAND BIT(4)
+#define LNA_CTL_LOCAL_BIAS BIT(5)
+#define LNA_CTL_FORCE_XPA BIT(6)
+#define LNA_CTL_USE_ANT1 BIT(7)
+
+enum eeprom_param {
+ EEP_NFTHRESH_5,
+ EEP_NFTHRESH_2,
+ EEP_MAC_MSW,
+ EEP_MAC_MID,
+ EEP_MAC_LSW,
+ EEP_REG_0,
+ EEP_OP_CAP,
+ EEP_OP_MODE,
+ EEP_RF_SILENT,
+ EEP_OB_5,
+ EEP_DB_5,
+ EEP_OB_2,
+ EEP_DB_2,
+ EEP_MINOR_REV,
+ EEP_TX_MASK,
+ EEP_RX_MASK,
+ EEP_FSTCLK_5G,
+ EEP_RXGAIN_TYPE,
+ EEP_OL_PWRCTRL,
+ EEP_TXGAIN_TYPE,
+ EEP_RC_CHAIN_MASK,
+ EEP_DAC_HPWR_5G,
+ EEP_FRAC_N_5G,
+ EEP_DEV_TYPE,
+ EEP_TEMPSENSE_SLOPE,
+ EEP_TEMPSENSE_SLOPE_PAL_ON,
+ EEP_PWR_TABLE_OFFSET,
+ EEP_PAPRD,
+ EEP_MODAL_VER,
+ EEP_ANT_DIV_CTL1,
+ EEP_CHAIN_MASK_REDUCE,
+ EEP_ANTENNA_GAIN_2G,
+ EEP_ANTENNA_GAIN_5G,
+};
+
+enum ar5416_rates {
+ rate6mb, rate9mb, rate12mb, rate18mb,
+ rate24mb, rate36mb, rate48mb, rate54mb,
+ rate1l, rate2l, rate2s, rate5_5l,
+ rate5_5s, rate11l, rate11s, rateXr,
+ rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
+ rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
+ rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
+ rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
+ rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
+ Ar5416RateSize
+};
+
+enum ath9k_hal_freq_band {
+ ATH9K_HAL_FREQ_BAND_5GHZ = 0,
+ ATH9K_HAL_FREQ_BAND_2GHZ = 1
+};
+
+struct base_eep_header {
+ u16 length;
+ u16 checksum;
+ u16 version;
+ u8 opCapFlags;
+ u8 eepMisc;
+ u16 regDmn[2];
+ u8 macAddr[6];
+ u8 rxMask;
+ u8 txMask;
+ u16 rfSilent;
+ u16 blueToothOptions;
+ u16 deviceCap;
+ u32 binBuildNumber;
+ u8 deviceType;
+ u8 pwdclkind;
+ u8 fastClk5g;
+ u8 divChain;
+ u8 rxGainType;
+ u8 dacHiPwrMode_5G;
+ u8 openLoopPwrCntl;
+ u8 dacLpMode;
+ u8 txGainType;
+ u8 rcChainMask;
+ u8 desiredScaleCCK;
+ u8 pwr_table_offset;
+ u8 frac_n_5g;
+ u8 futureBase_3[21];
+} __packed;
+
+struct base_eep_header_4k {
+ u16 length;
+ u16 checksum;
+ u16 version;
+ u8 opCapFlags;
+ u8 eepMisc;
+ u16 regDmn[2];
+ u8 macAddr[6];
+ u8 rxMask;
+ u8 txMask;
+ u16 rfSilent;
+ u16 blueToothOptions;
+ u16 deviceCap;
+ u32 binBuildNumber;
+ u8 deviceType;
+ u8 txGainType;
+} __packed;
+
+
+struct spur_chan {
+ u16 spurChan;
+ u8 spurRangeLow;
+ u8 spurRangeHigh;
+} __packed;
+
+struct modal_eep_header {
+ u32 antCtrlChain[AR5416_MAX_CHAINS];
+ u32 antCtrlCommon;
+ u8 antennaGainCh[AR5416_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR5416_MAX_CHAINS];
+ u8 rxTxMarginCh[AR5416_MAX_CHAINS];
+ u8 adcDesiredSize;
+ u8 pgaDesiredSize;
+ u8 xlnaGainCh[AR5416_MAX_CHAINS];
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ u8 iqCalICh[AR5416_MAX_CHAINS];
+ u8 iqCalQCh[AR5416_MAX_CHAINS];
+ u8 pdGainOverlap;
+ u8 ob;
+ u8 db;
+ u8 xpaBiasLvl;
+ u8 pwrDecreaseFor2Chain;
+ u8 pwrDecreaseFor3Chain;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR5416_MAX_CHAINS];
+ u8 bswMargin[AR5416_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 xatten2Db[AR5416_MAX_CHAINS];
+ u8 xatten2Margin[AR5416_MAX_CHAINS];
+ u8 ob_ch1;
+ u8 db_ch1;
+ u8 lna_ctl;
+ u8 miscBits;
+ u16 xpaBiasLvlFreq[3];
+ u8 futureModal[6];
+
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
+} __packed;
+
+struct calDataPerFreqOpLoop {
+ u8 pwrPdg[2][5];
+ u8 vpdPdg[2][5];
+ u8 pcdac[2][5];
+ u8 empty[2][5];
+} __packed;
+
+struct modal_eep_4k_header {
+ u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
+ u32 antCtrlCommon;
+ u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 rxTxMarginCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 adcDesiredSize;
+ u8 pgaDesiredSize;
+ u8 xlnaGainCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 noiseFloorThreshCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ u8 iqCalICh[AR5416_EEP4K_MAX_CHAINS];
+ u8 iqCalQCh[AR5416_EEP4K_MAX_CHAINS];
+ u8 pdGainOverlap;
+#ifdef __BIG_ENDIAN_BITFIELD
+ u8 ob_1:4, ob_0:4;
+ u8 db1_1:4, db1_0:4;
+#else
+ u8 ob_0:4, ob_1:4;
+ u8 db1_0:4, db1_1:4;
+#endif
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR5416_EEP4K_MAX_CHAINS];
+ u8 bswMargin[AR5416_EEP4K_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 xatten2Db[AR5416_EEP4K_MAX_CHAINS];
+ u8 xatten2Margin[AR5416_EEP4K_MAX_CHAINS];
+#ifdef __BIG_ENDIAN_BITFIELD
+ u8 db2_1:4, db2_0:4;
+#else
+ u8 db2_0:4, db2_1:4;
+#endif
+ u8 version;
+#ifdef __BIG_ENDIAN_BITFIELD
+ u8 ob_3:4, ob_2:4;
+ u8 antdiv_ctl1:4, ob_4:4;
+ u8 db1_3:4, db1_2:4;
+ u8 antdiv_ctl2:4, db1_4:4;
+ u8 db2_2:4, db2_3:4;
+ u8 reserved:4, db2_4:4;
+#else
+ u8 ob_2:4, ob_3:4;
+ u8 ob_4:4, antdiv_ctl1:4;
+ u8 db1_2:4, db1_3:4;
+ u8 db1_4:4, antdiv_ctl2:4;
+ u8 db2_2:4, db2_3:4;
+ u8 db2_4:4, reserved:4;
+#endif
+ u8 tx_diversity;
+ u8 flc_pwr_thresh;
+ u8 bb_scale_smrt_antenna;
+#define EEP_4K_BB_DESIRED_SCALE_MASK 0x1f
+ u8 futureModal[1];
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
+} __packed;
+
+struct base_eep_ar9287_header {
+ u16 length;
+ u16 checksum;
+ u16 version;
+ u8 opCapFlags;
+ u8 eepMisc;
+ u16 regDmn[2];
+ u8 macAddr[6];
+ u8 rxMask;
+ u8 txMask;
+ u16 rfSilent;
+ u16 blueToothOptions;
+ u16 deviceCap;
+ u32 binBuildNumber;
+ u8 deviceType;
+ u8 openLoopPwrCntl;
+ int8_t pwrTableOffset;
+ int8_t tempSensSlope;
+ int8_t tempSensSlopePalOn;
+ u8 futureBase[29];
+} __packed;
+
+struct modal_eep_ar9287_header {
+ u32 antCtrlChain[AR9287_MAX_CHAINS];
+ u32 antCtrlCommon;
+ int8_t antennaGainCh[AR9287_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR9287_MAX_CHAINS];
+ u8 rxTxMarginCh[AR9287_MAX_CHAINS];
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ int8_t noiseFloorThreshCh[AR9287_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ int8_t iqCalICh[AR9287_MAX_CHAINS];
+ int8_t iqCalQCh[AR9287_MAX_CHAINS];
+ u8 pdGainOverlap;
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR9287_MAX_CHAINS];
+ u8 bswMargin[AR9287_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 version;
+ u8 db1;
+ u8 db2;
+ u8 ob_cck;
+ u8 ob_psk;
+ u8 ob_qam;
+ u8 ob_pal_off;
+ u8 futureModal[30];
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
+} __packed;
+
+struct cal_data_per_freq {
+ u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+} __packed;
+
+struct cal_data_per_freq_4k {
+ u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+} __packed;
+
+struct cal_target_power_leg {
+ u8 bChannel;
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_target_power_ht {
+ u8 bChannel;
+ u8 tPow2x[8];
+} __packed;
+
+struct cal_ctl_edges {
+ u8 bChannel;
+ u8 ctl;
+} __packed;
+
+struct cal_data_op_loop_ar9287 {
+ u8 pwrPdg[2][5];
+ u8 vpdPdg[2][5];
+ u8 pcdac[2][5];
+ u8 empty[2][5];
+} __packed;
+
+struct cal_data_per_freq_ar9287 {
+ u8 pwrPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
+} __packed;
+
+union cal_data_per_freq_ar9287_u {
+ struct cal_data_op_loop_ar9287 calDataOpen;
+ struct cal_data_per_freq_ar9287 calDataClose;
+} __packed;
+
+struct cal_ctl_data_ar9287 {
+ struct cal_ctl_edges
+ ctlEdges[AR9287_MAX_CHAINS][AR9287_NUM_BAND_EDGES];
+} __packed;
+
+struct cal_ctl_data {
+ struct cal_ctl_edges
+ ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
+} __packed;
+
+struct cal_ctl_data_4k {
+ struct cal_ctl_edges
+ ctlEdges[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_BAND_EDGES];
+} __packed;
+
+struct ar5416_eeprom_def {
+ struct base_eep_header baseEepHeader;
+ u8 custData[64];
+ struct modal_eep_header modalHeader[2];
+ u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
+ u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
+ struct cal_data_per_freq
+ calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
+ struct cal_data_per_freq
+ calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
+ struct cal_target_power_leg
+ calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex[AR5416_NUM_CTLS];
+ struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
+ u8 padding;
+} __packed;
+
+struct ar5416_eeprom_4k {
+ struct base_eep_header_4k baseEepHeader;
+ u8 custData[20];
+ struct modal_eep_4k_header modalHeader;
+ u8 calFreqPier2G[AR5416_EEP4K_NUM_2G_CAL_PIERS];
+ struct cal_data_per_freq_4k
+ calPierData2G[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_2G_CAL_PIERS];
+ struct cal_target_power_leg
+ calTargetPowerCck[AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPower2G[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT20[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT40[AR5416_EEP4K_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex[AR5416_EEP4K_NUM_CTLS];
+ struct cal_ctl_data_4k ctlData[AR5416_EEP4K_NUM_CTLS];
+ u8 padding;
+} __packed;
+
+struct ar9287_eeprom {
+ struct base_eep_ar9287_header baseEepHeader;
+ u8 custData[AR9287_DATA_SZ];
+ struct modal_eep_ar9287_header modalHeader;
+ u8 calFreqPier2G[AR9287_NUM_2G_CAL_PIERS];
+ union cal_data_per_freq_ar9287_u
+ calPierData2G[AR9287_MAX_CHAINS][AR9287_NUM_2G_CAL_PIERS];
+ struct cal_target_power_leg
+ calTargetPowerCck[AR9287_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_target_power_leg
+ calTargetPower2G[AR9287_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT20[AR9287_NUM_2G_20_TARGET_POWERS];
+ struct cal_target_power_ht
+ calTargetPower2GHT40[AR9287_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex[AR9287_NUM_CTLS];
+ struct cal_ctl_data_ar9287 ctlData[AR9287_NUM_CTLS];
+ u8 padding;
+} __packed;
+
+enum reg_ext_bitmap {
+ REG_EXT_FCC_MIDBAND = 0,
+ REG_EXT_JAPAN_MIDBAND = 1,
+ REG_EXT_FCC_DFS_HT40 = 2,
+ REG_EXT_JAPAN_NONDFS_HT40 = 3,
+ REG_EXT_JAPAN_DFS_HT40 = 4
+};
+
+struct ath9k_country_entry {
+ u16 countryCode;
+ u16 regDmnEnum;
+ u16 regDmn5G;
+ u16 regDmn2G;
+ u8 isMultidomain;
+ u8 iso[3];
+};
+
+struct eeprom_ops {
+ int (*check_eeprom)(struct ath_hw *hw);
+ u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
+ bool (*fill_eeprom)(struct ath_hw *hw);
+ u32 (*dump_eeprom)(struct ath_hw *hw, bool dump_base_hdr, u8 *buf,
+ u32 len, u32 size);
+ int (*get_eeprom_ver)(struct ath_hw *hw);
+ int (*get_eeprom_rev)(struct ath_hw *hw);
+ void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
+ void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
+ void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
+ u16 cfgCtl, u8 twiceAntennaReduction,
+ u8 powerLimit, bool test);
+ u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
+};
+
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
+void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
+ u32 shift, u32 val);
+int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
+ int16_t targetLeft,
+ int16_t targetRight);
+bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
+ u16 *indexL, u16 *indexR);
+bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size);
+void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
+ u8 *pVpdList, u16 numIntercepts,
+ u8 *pRetVpdList);
+void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_leg *powInfo,
+ u16 numChannels,
+ struct cal_target_power_leg *pNewPower,
+ u16 numRates, bool isExtTarget);
+void ath9k_hw_get_target_powers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_target_power_ht *powInfo,
+ u16 numChannels,
+ struct cal_target_power_ht *pNewPower,
+ u16 numRates, bool isHt40Target);
+u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
+ bool is2GHz, int num_band_edges);
+u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
+ u8 antenna_reduction);
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
+int ath9k_hw_eeprom_init(struct ath_hw *ah);
+
+void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ void *pRawDataSet,
+ u8 *bChans, u16 availPiers,
+ u16 tPdGainOverlap,
+ u16 *pPdGainBoundaries, u8 *pPDADCValues,
+ u16 numXpdGains);
+
+static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
+{
+ if (fbin == AR5416_BCHAN_UNUSED)
+ return fbin;
+
+ return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
+#define ar5416_get_ntxchains(_txchainmask) \
+ (((_txchainmask >> 2) & 1) + \
+ ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
+
+extern const struct eeprom_ops eep_def_ops;
+extern const struct eeprom_ops eep_4k_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9300_ops;
+
+#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
new file mode 100644
index 000000000..4773da6dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "hw.h"
+#include "ar9002_phy.h"
+
+static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF);
+}
+
+static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
+{
+ return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
+}
+
+#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+ int addr, eep_start_loc = 64;
+
+ for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
+ if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
+ return false;
+ eep_data++;
+ }
+
+ return true;
+}
+
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+ return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_4k_fill_eeprom(ah);
+ else
+ return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_4k_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize);
+ PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("O/D Bias Version", modal_hdr->version);
+ PR_EEP("CCK OutputBias", modal_hdr->ob_0);
+ PR_EEP("BPSK OutputBias", modal_hdr->ob_1);
+ PR_EEP("QPSK OutputBias", modal_hdr->ob_2);
+ PR_EEP("16QAM OutputBias", modal_hdr->ob_3);
+ PR_EEP("64QAM OutputBias", modal_hdr->ob_4);
+ PR_EEP("CCK Driver1_Bias", modal_hdr->db1_0);
+ PR_EEP("BPSK Driver1_Bias", modal_hdr->db1_1);
+ PR_EEP("QPSK Driver1_Bias", modal_hdr->db1_2);
+ PR_EEP("16QAM Driver1_Bias", modal_hdr->db1_3);
+ PR_EEP("64QAM Driver1_Bias", modal_hdr->db1_4);
+ PR_EEP("CCK Driver2_Bias", modal_hdr->db2_0);
+ PR_EEP("BPSK Driver2_Bias", modal_hdr->db2_1);
+ PR_EEP("QPSK Driver2_Bias", modal_hdr->db2_2);
+ PR_EEP("16QAM Driver2_Bias", modal_hdr->db2_3);
+ PR_EEP("64QAM Driver2_Bias", modal_hdr->db2_4);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]);
+ PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]);
+ PR_EEP("Ant. Diversity ctl1", modal_hdr->antdiv_ctl1);
+ PR_EEP("Ant. Diversity ctl2", modal_hdr->antdiv_ctl2);
+ PR_EEP("TX Diversity", modal_hdr->tx_diversity);
+
+ return len;
+}
+
+static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len = ath9k_dump_4k_modal_eeprom(buf, len, size,
+ &eep->modalHeader);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("TX Gain type", pBase->txGainType);
+
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
+#undef SIZE_EEPROM_4K
+
+static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
+{
+#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ u16 *eepdata, temp, magic, magic2;
+ u32 sum = 0, el;
+ bool need_swap = false;
+ int i, addr;
+
+
+ if (!ath9k_hw_use_flash(ah)) {
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
+ ath_err(common, "Reading Magic # failed\n");
+ return false;
+ }
+
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
+
+ if (magic != AR5416_EEPROM_MAGIC) {
+ magic2 = swab16(magic);
+
+ if (magic2 == AR5416_EEPROM_MAGIC) {
+ need_swap = true;
+ eepdata = (u16 *) (&ah->eeprom);
+
+ for (addr = 0; addr < EEPROM_4K_SIZE; addr++) {
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
+ }
+ } else {
+ ath_err(common,
+ "Invalid EEPROM Magic. Endianness mismatch.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
+ need_swap ? "True" : "False");
+
+ if (need_swap)
+ el = swab16(ah->eeprom.map4k.baseEepHeader.length);
+ else
+ el = ah->eeprom.map4k.baseEepHeader.length;
+
+ if (el > sizeof(struct ar5416_eeprom_4k))
+ el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
+ else
+ el = el / sizeof(u16);
+
+ eepdata = (u16 *)(&ah->eeprom);
+
+ for (i = 0; i < el; i++)
+ sum ^= *eepdata++;
+
+ if (need_swap) {
+ u32 integer;
+ u16 word;
+
+ ath_dbg(common, EEPROM,
+ "EEPROM Endianness is not native.. Changing\n");
+
+ word = swab16(eep->baseEepHeader.length);
+ eep->baseEepHeader.length = word;
+
+ word = swab16(eep->baseEepHeader.checksum);
+ eep->baseEepHeader.checksum = word;
+
+ word = swab16(eep->baseEepHeader.version);
+ eep->baseEepHeader.version = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ word = swab16(eep->baseEepHeader.rfSilent);
+ eep->baseEepHeader.rfSilent = word;
+
+ word = swab16(eep->baseEepHeader.blueToothOptions);
+ eep->baseEepHeader.blueToothOptions = word;
+
+ word = swab16(eep->baseEepHeader.deviceCap);
+ eep->baseEepHeader.deviceCap = word;
+
+ integer = swab32(eep->modalHeader.antCtrlCommon);
+ eep->modalHeader.antCtrlCommon = integer;
+
+ for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
+ integer = swab32(eep->modalHeader.antCtrlChain[i]);
+ eep->modalHeader.antCtrlChain[i] = integer;
+ }
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ word = swab16(eep->modalHeader.spurChans[i].spurChan);
+ eep->modalHeader.spurChans[i].spurChan = word;
+ }
+ }
+
+ if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
+ ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ah->eep_ops->get_eeprom_ver(ah));
+ return -EINVAL;
+ }
+
+ return 0;
+#undef EEPROM_4K_SIZE
+}
+
+static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ struct modal_eep_4k_header *pModal = &eep->modalHeader;
+ struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+ u16 ver_minor;
+
+ ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK;
+
+ switch (param) {
+ case EEP_NFTHRESH_2:
+ return pModal->noiseFloorThreshCh[0];
+ case EEP_MAC_LSW:
+ return get_unaligned_be16(pBase->macAddr);
+ case EEP_MAC_MID:
+ return get_unaligned_be16(pBase->macAddr + 2);
+ case EEP_MAC_MSW:
+ return get_unaligned_be16(pBase->macAddr + 4);
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_OB_2:
+ return pModal->ob_0;
+ case EEP_DB_2:
+ return pModal->db1_1;
+ case EEP_MINOR_REV:
+ return ver_minor;
+ case EEP_TX_MASK:
+ return pBase->txMask;
+ case EEP_RX_MASK:
+ return pBase->rxMask;
+ case EEP_FRAC_N_5G:
+ return 0;
+ case EEP_PWR_TABLE_OFFSET:
+ return AR5416_PWR_TABLE_OFFSET_DB;
+ case EEP_MODAL_VER:
+ return pModal->version;
+ case EEP_ANT_DIV_CTL1:
+ return pModal->antdiv_ctl1;
+ case EEP_TXGAIN_TYPE:
+ return pBase->txGainType;
+ case EEP_ANTENNA_GAIN_2G:
+ return pModal->antennaGainCh[0];
+ default:
+ return 0;
+ }
+}
+
+static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
+ struct cal_data_per_freq_4k *pRawDataset;
+ u8 *pCalBChans = NULL;
+ u16 pdGainOverlap_t2;
+ static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
+ u16 numPiers, i, j;
+ u16 numXpdGain, xpdMask;
+ u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 };
+ u32 reg32, regOffset, regChainOffset;
+
+ xpdMask = pEepData->modalHeader.xpdGain;
+
+ if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ pdGainOverlap_t2 =
+ pEepData->modalHeader.pdGainOverlap;
+ } else {
+ pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
+ }
+
+ pCalBChans = pEepData->calFreqPier2G;
+ numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS;
+
+ numXpdGain = 0;
+
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS)
+ break;
+ xpdGainValues[numXpdGain] =
+ (u16)(AR5416_PD_GAINS_IN_MASK - i);
+ numXpdGain++;
+ }
+ }
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
+ (numXpdGain - 1) & 0x3);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
+ xpdGainValues[0]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
+ xpdGainValues[1]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
+ regChainOffset = i * 0x1000;
+
+ if (pEepData->baseEepHeader.txMask & (1 << i)) {
+ pRawDataset = pEepData->calPierData2G[i];
+
+ ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
+ pRawDataset, pCalBChans,
+ numPiers, pdGainOverlap_t2,
+ gainBoundaries,
+ pdadcValues, numXpdGain);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
+ SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+ | SM(gainBoundaries[0],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+ | SM(gainBoundaries[1],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+ | SM(gainBoundaries[2],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+ | SM(gainBoundaries[3],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
+
+ regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
+ for (j = 0; j < 32; j++) {
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
+ REG_WRITE(ah, regOffset, reg32);
+
+ ath_dbg(common, EEPROM,
+ "PDADC (%d,%4x): %4.4x %8.8x\n",
+ i, regChainOffset, regOffset,
+ reg32);
+ ath_dbg(common, EEPROM,
+ "PDADC: Chain %d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d |\n",
+ i, 4 * j, pdadcValues[4 * j],
+ 4 * j + 1, pdadcValues[4 * j + 1],
+ 4 * j + 2, pdadcValues[4 * j + 2],
+ 4 * j + 3, pdadcValues[4 * j + 3]);
+
+ regOffset += 4;
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ }
+ }
+}
+
+static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int16_t *ratesArray,
+ u16 cfgCtl,
+ u16 antenna_reduction,
+ u16 powerLimit)
+{
+#define CMP_TEST_GRP \
+ (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ pEepData->ctlIndex[i]) \
+ || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
+
+ int i;
+ u16 twiceMinEdgePower;
+ u16 twiceMaxEdgePower;
+ u16 scaledPower = 0, minCtlPower;
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
+ struct chan_centers centers;
+ struct cal_ctl_data_4k *rep;
+ struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
+ struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
+ 0, { 0, 0, 0, 0}
+ };
+ struct cal_target_power_leg targetPowerOfdmExt = {
+ 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
+ 0, { 0, 0, 0, 0 }
+ };
+ struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
+ 0, {0, 0, 0, 0}
+ };
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ scaledPower = powerLimit - antenna_reduction;
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
+ pCtlMode = ctlModesFor11g;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCck, 4, false);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4, false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT20,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT40,
+ AR5416_NUM_2G_40_TARGET_POWERS,
+ &targetPowerHt40, 8, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCckExt, 4, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdmExt, 4, true);
+ }
+
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+ (pCtlMode[ctlMode] == CTL_2GHT40);
+
+ if (isHt40CtlMode)
+ freq = centers.synth_center;
+ else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+ freq = centers.ext_center;
+ else
+ freq = centers.ctl_center;
+
+ twiceMaxEdgePower = MAX_RATE_POWER;
+
+ for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
+ pEepData->ctlIndex[i]; i++) {
+
+ if (CMP_TEST_GRP) {
+ rep = &(pEepData->ctlData[i]);
+
+ twiceMinEdgePower = ath9k_hw_get_max_edge_power(
+ freq,
+ rep->ctlEdges[
+ ar5416_get_ntxchains(ah->txchainmask) - 1],
+ IS_CHAN_2GHZ(chan),
+ AR5416_EEP4K_NUM_BAND_EDGES);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ twiceMaxEdgePower =
+ min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ } else {
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+
+ minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
+ targetPowerCck.tPow2x[i] =
+ min((u16)targetPowerCck.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11G:
+ for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
+ targetPowerOfdm.tPow2x[i] =
+ min((u16)targetPowerOfdm.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_2GHT20:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
+ targetPowerHt20.tPow2x[i] =
+ min((u16)targetPowerHt20.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11B_EXT:
+ targetPowerCckExt.tPow2x[0] =
+ min((u16)targetPowerCckExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_11G_EXT:
+ targetPowerOfdmExt.tPow2x[0] =
+ min((u16)targetPowerOfdmExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_2GHT40:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ targetPowerHt40.tPow2x[i] =
+ min((u16)targetPowerHt40.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ ratesArray[rate6mb] =
+ ratesArray[rate9mb] =
+ ratesArray[rate12mb] =
+ ratesArray[rate18mb] =
+ ratesArray[rate24mb] =
+ targetPowerOfdm.tPow2x[0];
+
+ ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
+ ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
+ ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
+ ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
+
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
+ ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
+
+ ratesArray[rate1l] = targetPowerCck.tPow2x[0];
+ ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1];
+ ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
+ ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3];
+
+ if (IS_CHAN_HT40(chan)) {
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ ratesArray[rateHt40_0 + i] =
+ targetPowerHt40.tPow2x[i];
+ }
+ ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+ ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
+ }
+
+#undef CMP_TEST_GRP
+}
+
+static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 powerLimit, bool test)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
+ struct modal_eep_4k_header *pModal = &pEepData->modalHeader;
+ int16_t ratesArray[Ar5416RateSize];
+ u8 ht40PowerIncForPdadc = 2;
+ int i;
+
+ memset(ratesArray, 0, sizeof(ratesArray));
+
+ if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
+ }
+
+ ath9k_hw_set_4k_power_per_rate_table(ah, chan,
+ &ratesArray[0], cfgCtl,
+ twiceAntennaReduction,
+ powerLimit);
+
+ ath9k_hw_set_4k_power_cal_table(ah, chan);
+
+ regulatory->max_power_level = 0;
+ for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
+ }
+
+ if (test)
+ return;
+
+ for (i = 0; i < Ar5416RateSize; i++)
+ ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* OFDM power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
+ ATH9K_POW_SM(ratesArray[rate18mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate12mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate9mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate6mb], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
+ ATH9K_POW_SM(ratesArray[rate54mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate48mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate36mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate24mb], 0));
+
+ /* CCK power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
+ ATH9K_POW_SM(ratesArray[rate2s], 24)
+ | ATH9K_POW_SM(ratesArray[rate2l], 16)
+ | ATH9K_POW_SM(ratesArray[rateXr], 8)
+ | ATH9K_POW_SM(ratesArray[rate1l], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
+ ATH9K_POW_SM(ratesArray[rate11s], 24)
+ | ATH9K_POW_SM(ratesArray[rate11l], 16)
+ | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
+ | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
+
+ /* HT20 power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
+ ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
+ ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
+
+ /* HT40 power per rate */
+ if (IS_CHAN_HT40(chan)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
+ ATH9K_POW_SM(ratesArray[rateHt40_3] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_2] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_1] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_0] +
+ ht40PowerIncForPdadc, 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
+ ATH9K_POW_SM(ratesArray[rateHt40_7] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_6] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_5] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_4] +
+ ht40PowerIncForPdadc, 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
+ ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
+ | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
+ | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
+ | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
+ }
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
+ struct modal_eep_4k_header *pModal,
+ struct ar5416_eeprom_4k *eep,
+ u8 txRxAttenLocal)
+{
+ ENABLE_REG_RMW_BUFFER(ah);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
+ pModal->antCtrlChain[0], 0);
+
+ REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
+ SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_3) {
+ txRxAttenLocal = pModal->txRxAttenCh[0];
+
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->xatten2Margin[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
+
+ /* Set the block 1 value to block 0 value */
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->bswMargin[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->xatten2Margin[0]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB,
+ pModal->xatten2Db[0]);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
+ REG_RMW_BUFFER_FLUSH(ah);
+}
+
+/*
+ * Read EEPROM header info and program the device for correct operation
+ * given the channel value.
+ */
+static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct modal_eep_4k_header *pModal;
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
+ struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+ u8 txRxAttenLocal;
+ u8 ob[5], db1[5], db2[5];
+ u8 ant_div_control1, ant_div_control2;
+ u8 bb_desired_scale;
+ u32 regVal;
+
+ pModal = &eep->modalHeader;
+ txRxAttenLocal = 23;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+
+ /* Single chain for 4K EEPROM*/
+ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal);
+
+ /* Initialize Ant Diversity settings from EEPROM */
+ if (pModal->version >= 3) {
+ ant_div_control1 = pModal->antdiv_ctl1;
+ ant_div_control2 = pModal->antdiv_ctl2;
+
+ regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
+
+ regVal |= SM(ant_div_control1,
+ AR_PHY_9285_ANT_DIV_CTL);
+ regVal |= SM(ant_div_control2,
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+ regVal |= SM((ant_div_control2 >> 2),
+ AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+ regVal |= SM((ant_div_control1 >> 1),
+ AR_PHY_9285_ANT_DIV_ALT_GAINTB);
+ regVal |= SM((ant_div_control1 >> 2),
+ AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
+
+
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
+ regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ regVal |= SM((ant_div_control1 >> 3),
+ AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
+ regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ /*
+ * If diversity combining is enabled,
+ * set MAIN to LNA1 and ALT to LNA2 initially.
+ */
+ regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF));
+
+ regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
+ regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
+ regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
+ }
+ }
+
+ if (pModal->version >= 2) {
+ ob[0] = pModal->ob_0;
+ ob[1] = pModal->ob_1;
+ ob[2] = pModal->ob_2;
+ ob[3] = pModal->ob_3;
+ ob[4] = pModal->ob_4;
+
+ db1[0] = pModal->db1_0;
+ db1[1] = pModal->db1_1;
+ db1[2] = pModal->db1_2;
+ db1[3] = pModal->db1_3;
+ db1[4] = pModal->db1_4;
+
+ db2[0] = pModal->db2_0;
+ db2[1] = pModal->db2_1;
+ db2[2] = pModal->db2_2;
+ db2[3] = pModal->db2_3;
+ db2[4] = pModal->db2_4;
+ } else if (pModal->version == 1) {
+ ob[0] = pModal->ob_0;
+ ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1;
+ db1[0] = pModal->db1_0;
+ db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1;
+ db2[0] = pModal->db2_0;
+ db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1;
+ } else {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ ob[i] = pModal->ob_0;
+ db1[i] = pModal->db1_0;
+ db2[i] = pModal->db1_0;
+ }
+ }
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ if (AR_SREV_9271(ah)) {
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9271_AN_RF2G3_OB_cck,
+ AR9271_AN_RF2G3_OB_cck_S,
+ ob[0]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9271_AN_RF2G3_OB_psk,
+ AR9271_AN_RF2G3_OB_psk_S,
+ ob[1]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9271_AN_RF2G3_OB_qam,
+ AR9271_AN_RF2G3_OB_qam_S,
+ ob[2]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9271_AN_RF2G3_DB_1,
+ AR9271_AN_RF2G3_DB_1_S,
+ db1[0]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9271_AN_RF2G4_DB_2,
+ AR9271_AN_RF2G4_DB_2_S,
+ db2[0]);
+ } else {
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_OB_0,
+ AR9285_AN_RF2G3_OB_0_S,
+ ob[0]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_OB_1,
+ AR9285_AN_RF2G3_OB_1_S,
+ ob[1]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_OB_2,
+ AR9285_AN_RF2G3_OB_2_S,
+ ob[2]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_OB_3,
+ AR9285_AN_RF2G3_OB_3_S,
+ ob[3]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_OB_4,
+ AR9285_AN_RF2G3_OB_4_S,
+ ob[4]);
+
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_DB1_0,
+ AR9285_AN_RF2G3_DB1_0_S,
+ db1[0]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_DB1_1,
+ AR9285_AN_RF2G3_DB1_1_S,
+ db1[1]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G3,
+ AR9285_AN_RF2G3_DB1_2,
+ AR9285_AN_RF2G3_DB1_2_S,
+ db1[2]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB1_3,
+ AR9285_AN_RF2G4_DB1_3_S,
+ db1[3]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB1_4,
+ AR9285_AN_RF2G4_DB1_4_S, db1[4]);
+
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB2_0,
+ AR9285_AN_RF2G4_DB2_0_S,
+ db2[0]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB2_1,
+ AR9285_AN_RF2G4_DB2_1_S,
+ db2[1]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB2_2,
+ AR9285_AN_RF2G4_DB2_2_S,
+ db2[2]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB2_3,
+ AR9285_AN_RF2G4_DB2_3_S,
+ db2[3]);
+ ath9k_hw_analog_shift_rmw(ah,
+ AR9285_AN_RF2G4,
+ AR9285_AN_RF2G4_DB2_4,
+ AR9285_AN_RF2G4_DB2_4_S,
+ db2[4]);
+ }
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
+ pModal->switchSettling);
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
+ pModal->adcDesiredSize);
+
+ REG_RMW(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
+ SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
+ SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+ pModal->txEndToRxOn);
+
+ if (AR_SREV_9271_10(ah))
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+ pModal->txEndToRxOn);
+ REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
+ pModal->thresh62);
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START,
+ pModal->txFrameToDataStart);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
+ pModal->txFrameToPaOn);
+ }
+
+ if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_3) {
+ if (IS_CHAN_HT40(chan))
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH,
+ pModal->swSettleHt40);
+ }
+
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ bb_desired_scale = (pModal->bb_scale_smrt_antenna &
+ EEP_4K_BB_DESIRED_SCALE_MASK);
+ if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
+ u32 pwrctrl, mask, clr;
+
+ mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ ENABLE_REG_RMW_BUFFER(ah);
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
+
+ mask = BIT(0)|BIT(5)|BIT(15);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr);
+
+ mask = BIT(0)|BIT(5);
+ pwrctrl = mask * bb_desired_scale;
+ clr = mask * 0x1f;
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
+ REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
+ REG_RMW_BUFFER_FLUSH(ah);
+ }
+}
+
+static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
+{
+ return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
+}
+
+const struct eeprom_ops eep_4k_ops = {
+ .check_eeprom = ath9k_hw_4k_check_eeprom,
+ .get_eeprom = ath9k_hw_4k_get_eeprom,
+ .fill_eeprom = ath9k_hw_4k_fill_eeprom,
+ .dump_eeprom = ath9k_hw_4k_dump_eeprom,
+ .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
+ .set_board_values = ath9k_hw_4k_set_board_values,
+ .set_txpower = ath9k_hw_4k_set_txpower,
+ .get_spur_channel = ath9k_hw_4k_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
new file mode 100644
index 000000000..6ca33dfde
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "hw.h"
+#include "ar9002_phy.h"
+
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
+
+static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
+{
+ return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
+}
+
+static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
+{
+ return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
+}
+
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ u16 *eep_data;
+ int addr, eep_start_loc = AR9287_EEP_START_LOC;
+ eep_data = (u16 *)eep;
+
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
+ if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
+ return false;
+ eep_data++;
+ }
+
+ return true;
+}
+
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ AR9287_HTC_EEP_START_LOC,
+ SIZE_EEPROM_AR9287);
+ return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+ else
+ return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_ar9287_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain1 TxRxAtten", modal_hdr->txRxAttenCh[1]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("Chain1 RxTxMargin", modal_hdr->rxTxMarginCh[1]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain1 I Coefficient", modal_hdr->iqCalICh[1]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("Chain1 Q Coefficient", modal_hdr->iqCalQCh[1]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain1 bswAtten", modal_hdr->bswAtten[1]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("Chain1 bswMargin", modal_hdr->bswMargin[1]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("AR92x7 Version", modal_hdr->version);
+ PR_EEP("DriverBias1", modal_hdr->db1);
+ PR_EEP("DriverBias2", modal_hdr->db1);
+ PR_EEP("CCK OutputBias", modal_hdr->ob_cck);
+ PR_EEP("PSK OutputBias", modal_hdr->ob_psk);
+ PR_EEP("QAM OutputBias", modal_hdr->ob_qam);
+ PR_EEP("PAL_OFF OutputBias", modal_hdr->ob_pal_off);
+
+ return len;
+}
+
+static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len = ar9287_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("Power Table Offset", pBase->pwrTableOffset);
+ PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
+
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
+static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
+{
+ u32 sum = 0, el, integer;
+ u16 temp, word, magic, magic2, *eepdata;
+ int i, addr;
+ bool need_swap = false;
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
+ ath_err(common, "Reading Magic # failed\n");
+ return false;
+ }
+
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
+
+ if (magic != AR5416_EEPROM_MAGIC) {
+ magic2 = swab16(magic);
+
+ if (magic2 == AR5416_EEPROM_MAGIC) {
+ need_swap = true;
+ eepdata = (u16 *)(&ah->eeprom);
+
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
+ }
+ } else {
+ ath_err(common,
+ "Invalid EEPROM Magic. Endianness mismatch.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
+ need_swap ? "True" : "False");
+
+ if (need_swap)
+ el = swab16(ah->eeprom.map9287.baseEepHeader.length);
+ else
+ el = ah->eeprom.map9287.baseEepHeader.length;
+
+ if (el > sizeof(struct ar9287_eeprom))
+ el = sizeof(struct ar9287_eeprom) / sizeof(u16);
+ else
+ el = el / sizeof(u16);
+
+ eepdata = (u16 *)(&ah->eeprom);
+
+ for (i = 0; i < el; i++)
+ sum ^= *eepdata++;
+
+ if (need_swap) {
+ word = swab16(eep->baseEepHeader.length);
+ eep->baseEepHeader.length = word;
+
+ word = swab16(eep->baseEepHeader.checksum);
+ eep->baseEepHeader.checksum = word;
+
+ word = swab16(eep->baseEepHeader.version);
+ eep->baseEepHeader.version = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ word = swab16(eep->baseEepHeader.rfSilent);
+ eep->baseEepHeader.rfSilent = word;
+
+ word = swab16(eep->baseEepHeader.blueToothOptions);
+ eep->baseEepHeader.blueToothOptions = word;
+
+ word = swab16(eep->baseEepHeader.deviceCap);
+ eep->baseEepHeader.deviceCap = word;
+
+ integer = swab32(eep->modalHeader.antCtrlCommon);
+ eep->modalHeader.antCtrlCommon = integer;
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++) {
+ integer = swab32(eep->modalHeader.antCtrlChain[i]);
+ eep->modalHeader.antCtrlChain[i] = integer;
+ }
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ word = swab16(eep->modalHeader.spurChans[i].spurChan);
+ eep->modalHeader.spurChans[i].spurChan = word;
+ }
+ }
+
+ if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
+ || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ah->eep_ops->get_eeprom_ver(ah));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
+ struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
+ u16 ver_minor;
+
+ ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
+
+ switch (param) {
+ case EEP_NFTHRESH_2:
+ return pModal->noiseFloorThreshCh[0];
+ case EEP_MAC_LSW:
+ return get_unaligned_be16(pBase->macAddr);
+ case EEP_MAC_MID:
+ return get_unaligned_be16(pBase->macAddr + 2);
+ case EEP_MAC_MSW:
+ return get_unaligned_be16(pBase->macAddr + 4);
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_MINOR_REV:
+ return ver_minor;
+ case EEP_TX_MASK:
+ return pBase->txMask;
+ case EEP_RX_MASK:
+ return pBase->rxMask;
+ case EEP_DEV_TYPE:
+ return pBase->deviceType;
+ case EEP_OL_PWRCTRL:
+ return pBase->openLoopPwrCntl;
+ case EEP_TEMPSENSE_SLOPE:
+ if (ver_minor >= AR9287_EEP_MINOR_VER_2)
+ return pBase->tempSensSlope;
+ else
+ return 0;
+ case EEP_TEMPSENSE_SLOPE_PAL_ON:
+ if (ver_minor >= AR9287_EEP_MINOR_VER_3)
+ return pBase->tempSensSlopePalOn;
+ else
+ return 0;
+ case EEP_ANTENNA_GAIN_2G:
+ return max_t(u8, pModal->antennaGainCh[0],
+ pModal->antennaGainCh[1]);
+ default:
+ return 0;
+ }
+}
+
+static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
+ u8 *pCalChans, u16 availPiers, int8_t *pPwr)
+{
+ u16 idxL = 0, idxR = 0, numPiers;
+ bool match;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++) {
+ if (pCalChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+ }
+
+ match = ath9k_hw_get_lower_upper_index(
+ (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+ pCalChans, numPiers, &idxL, &idxR);
+
+ if (match) {
+ *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0];
+ } else {
+ *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] +
+ (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
+ }
+
+}
+
+static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
+ int32_t txPower, u16 chain)
+{
+ u32 tmpVal;
+ u32 a;
+
+ /* Enable OLPC for chain 0 */
+
+ tmpVal = REG_READ(ah, 0xa270);
+ tmpVal = tmpVal & 0xFCFFFFFF;
+ tmpVal = tmpVal | (0x3 << 24);
+ REG_WRITE(ah, 0xa270, tmpVal);
+
+ /* Enable OLPC for chain 1 */
+
+ tmpVal = REG_READ(ah, 0xb270);
+ tmpVal = tmpVal & 0xFCFFFFFF;
+ tmpVal = tmpVal | (0x3 << 24);
+ REG_WRITE(ah, 0xb270, tmpVal);
+
+ /* Write the OLPC ref power for chain 0 */
+
+ if (chain == 0) {
+ tmpVal = REG_READ(ah, 0xa398);
+ tmpVal = tmpVal & 0xff00ffff;
+ a = (txPower)&0xff;
+ tmpVal = tmpVal | (a << 16);
+ REG_WRITE(ah, 0xa398, tmpVal);
+ }
+
+ /* Write the OLPC ref power for chain 1 */
+
+ if (chain == 1) {
+ tmpVal = REG_READ(ah, 0xb398);
+ tmpVal = tmpVal & 0xff00ffff;
+ a = (txPower)&0xff;
+ tmpVal = tmpVal | (a << 16);
+ REG_WRITE(ah, 0xb398, tmpVal);
+ }
+}
+
+static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct cal_data_per_freq_ar9287 *pRawDataset;
+ struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
+ u8 *pCalBChans = NULL;
+ u16 pdGainOverlap_t2;
+ u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
+ u16 numPiers = 0, i, j;
+ u16 numXpdGain, xpdMask;
+ u16 xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0};
+ u32 reg32, regOffset, regChainOffset, regval;
+ int16_t diff = 0;
+ struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
+
+ xpdMask = pEepData->modalHeader.xpdGain;
+
+ if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
+ AR9287_EEP_MINOR_VER_2)
+ pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
+ else
+ pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
+
+ if (IS_CHAN_2GHZ(chan)) {
+ pCalBChans = pEepData->calFreqPier2G;
+ numPiers = AR9287_NUM_2G_CAL_PIERS;
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ pRawDatasetOpenLoop =
+ (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0];
+ ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0];
+ }
+ }
+
+ numXpdGain = 0;
+
+ /* Calculate the value of xpdgains from the xpdGain Mask */
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_NUM_PD_GAINS)
+ break;
+ xpdGainValues[numXpdGain] =
+ (u16)(AR5416_PD_GAINS_IN_MASK-i);
+ numXpdGain++;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
+ (numXpdGain - 1) & 0x3);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
+ xpdGainValues[0]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
+ xpdGainValues[1]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
+ xpdGainValues[2]);
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++) {
+ regChainOffset = i * 0x1000;
+
+ if (pEepData->baseEepHeader.txMask & (1 << i)) {
+ pRawDatasetOpenLoop =
+ (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i];
+
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ int8_t txPower;
+ ar9287_eeprom_get_tx_gain_index(ah, chan,
+ pRawDatasetOpenLoop,
+ pCalBChans, numPiers,
+ &txPower);
+ ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i);
+ } else {
+ pRawDataset =
+ (struct cal_data_per_freq_ar9287 *)
+ pEepData->calPierData2G[i];
+
+ ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
+ pRawDataset,
+ pCalBChans, numPiers,
+ pdGainOverlap_t2,
+ gainBoundaries,
+ pdadcValues,
+ numXpdGain);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ if (i == 0) {
+ if (!ath9k_hw_ar9287_get_eeprom(ah,
+ EEP_OL_PWRCTRL)) {
+
+ regval = SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+ | SM(gainBoundaries[0],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+ | SM(gainBoundaries[1],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+ | SM(gainBoundaries[2],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+ | SM(gainBoundaries[3],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4);
+
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ regval);
+ }
+ }
+
+ if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB !=
+ pEepData->baseEepHeader.pwrTableOffset) {
+ diff = (u16)(pEepData->baseEepHeader.pwrTableOffset -
+ (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
+ diff *= 2;
+
+ for (j = 0; j < ((u16)AR5416_NUM_PDADC_VALUES-diff); j++)
+ pdadcValues[j] = pdadcValues[j+diff];
+
+ for (j = (u16)(AR5416_NUM_PDADC_VALUES-diff);
+ j < AR5416_NUM_PDADC_VALUES; j++)
+ pdadcValues[j] =
+ pdadcValues[AR5416_NUM_PDADC_VALUES-diff];
+ }
+
+ if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ regOffset = AR_PHY_BASE +
+ (672 << 2) + regChainOffset;
+
+ for (j = 0; j < 32; j++) {
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
+
+ REG_WRITE(ah, regOffset, reg32);
+ regOffset += 4;
+ }
+ }
+ REGWRITE_BUFFER_FLUSH(ah);
+ }
+ }
+}
+
+static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int16_t *ratesArray,
+ u16 cfgCtl,
+ u16 antenna_reduction,
+ u16 powerLimit)
+{
+#define CMP_CTL \
+ (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ pEepData->ctlIndex[i])
+
+#define CMP_NO_CTL \
+ (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
+
+ u16 twiceMaxEdgePower;
+ int i;
+ struct cal_ctl_data_ar9287 *rep;
+ struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
+ targetPowerCck = {0, {0, 0, 0, 0} };
+ struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} },
+ targetPowerCckExt = {0, {0, 0, 0, 0} };
+ struct cal_target_power_ht targetPowerHt20,
+ targetPowerHt40 = {0, {0, 0, 0, 0} };
+ u16 scaledPower = 0, minCtlPower;
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+ u16 numCtlModes = 0;
+ const u16 *pCtlMode = NULL;
+ u16 ctlMode, freq;
+ struct chan_centers centers;
+ int tx_chainmask;
+ u16 twiceMinEdgePower;
+ struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
+ tx_chainmask = ah->txchainmask;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+ antenna_reduction);
+
+ /*
+ * Get TX power from EEPROM.
+ */
+ if (IS_CHAN_2GHZ(chan)) {
+ /* CTL_11B, CTL_11G, CTL_2GHT20 */
+ numCtlModes =
+ ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
+
+ pCtlMode = ctlModesFor11g;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR9287_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCck, 4, false);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR9287_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4, false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT20,
+ AR9287_NUM_2G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ /* All 2G CTLs */
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT40,
+ AR9287_NUM_2G_40_TARGET_POWERS,
+ &targetPowerHt40, 8, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR9287_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCckExt, 4, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR9287_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdmExt, 4, true);
+ }
+ }
+
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ bool isHt40CtlMode =
+ (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false;
+
+ if (isHt40CtlMode)
+ freq = centers.synth_center;
+ else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+ freq = centers.ext_center;
+ else
+ freq = centers.ctl_center;
+
+ twiceMaxEdgePower = MAX_RATE_POWER;
+ /* Walk through the CTL indices stored in EEPROM */
+ for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
+ struct cal_ctl_edges *pRdEdgesPower;
+
+ /*
+ * Compare test group from regulatory channel list
+ * with test mode from pCtlMode list
+ */
+ if (CMP_CTL || CMP_NO_CTL) {
+ rep = &(pEepData->ctlData[i]);
+ pRdEdgesPower =
+ rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1];
+
+ twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
+ pRdEdgesPower,
+ IS_CHAN_2GHZ(chan),
+ AR5416_NUM_BAND_EDGES);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ twiceMaxEdgePower = min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ } else {
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+
+ minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+
+ /* Apply ctl mode to correct target power set */
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
+ targetPowerCck.tPow2x[i] =
+ (u8)min((u16)targetPowerCck.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
+ targetPowerOfdm.tPow2x[i] =
+ (u8)min((u16)targetPowerOfdm.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
+ targetPowerHt20.tPow2x[i] =
+ (u8)min((u16)targetPowerHt20.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11B_EXT:
+ targetPowerCckExt.tPow2x[0] =
+ (u8)min((u16)targetPowerCckExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_11A_EXT:
+ case CTL_11G_EXT:
+ targetPowerOfdmExt.tPow2x[0] =
+ (u8)min((u16)targetPowerOfdmExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ targetPowerHt40.tPow2x[i] =
+ (u8)min((u16)targetPowerHt40.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Now set the rates array */
+
+ ratesArray[rate6mb] =
+ ratesArray[rate9mb] =
+ ratesArray[rate12mb] =
+ ratesArray[rate18mb] =
+ ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0];
+
+ ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
+ ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
+ ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
+ ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
+
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
+ ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
+
+ if (IS_CHAN_2GHZ(chan)) {
+ ratesArray[rate1l] = targetPowerCck.tPow2x[0];
+ ratesArray[rate2s] =
+ ratesArray[rate2l] = targetPowerCck.tPow2x[1];
+ ratesArray[rate5_5s] =
+ ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
+ ratesArray[rate11s] =
+ ratesArray[rate11l] = targetPowerCck.tPow2x[3];
+ }
+ if (IS_CHAN_HT40(chan)) {
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++)
+ ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i];
+
+ ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+
+ if (IS_CHAN_2GHZ(chan))
+ ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
+ }
+
+#undef CMP_CTL
+#undef CMP_NO_CTL
+}
+
+static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 powerLimit, bool test)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
+ struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
+ int16_t ratesArray[Ar5416RateSize];
+ u8 ht40PowerIncForPdadc = 2;
+ int i;
+
+ memset(ratesArray, 0, sizeof(ratesArray));
+
+ if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
+ AR9287_EEP_MINOR_VER_2)
+ ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
+
+ ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
+ &ratesArray[0], cfgCtl,
+ twiceAntennaReduction,
+ powerLimit);
+
+ ath9k_hw_set_ar9287_power_cal_table(ah, chan);
+
+ regulatory->max_power_level = 0;
+ for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
+ }
+
+ ath9k_hw_update_regulatory_maxpower(ah);
+
+ if (test)
+ return;
+
+ for (i = 0; i < Ar5416RateSize; i++)
+ ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* OFDM power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
+ ATH9K_POW_SM(ratesArray[rate18mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate12mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate9mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate6mb], 0));
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
+ ATH9K_POW_SM(ratesArray[rate54mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate48mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate36mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate24mb], 0));
+
+ /* CCK power per rate */
+ if (IS_CHAN_2GHZ(chan)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
+ ATH9K_POW_SM(ratesArray[rate2s], 24)
+ | ATH9K_POW_SM(ratesArray[rate2l], 16)
+ | ATH9K_POW_SM(ratesArray[rateXr], 8)
+ | ATH9K_POW_SM(ratesArray[rate1l], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
+ ATH9K_POW_SM(ratesArray[rate11s], 24)
+ | ATH9K_POW_SM(ratesArray[rate11l], 16)
+ | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
+ | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
+ }
+
+ /* HT20 power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
+ ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
+ ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
+
+ /* HT40 power per rate */
+ if (IS_CHAN_HT40(chan)) {
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
+ ATH9K_POW_SM(ratesArray[rateHt40_3], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_2], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_1], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_0], 0));
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
+ ATH9K_POW_SM(ratesArray[rateHt40_7], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_6], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_5], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_4], 0));
+ } else {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
+ ATH9K_POW_SM(ratesArray[rateHt40_3] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_2] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_1] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_0] +
+ ht40PowerIncForPdadc, 0));
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
+ ATH9K_POW_SM(ratesArray[rateHt40_7] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_6] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_5] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_4] +
+ ht40PowerIncForPdadc, 0));
+ }
+
+ /* Dup/Ext power per rate */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
+ ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
+ | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
+ | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
+ | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
+ }
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ar9287_eeprom *eep = &ah->eeprom.map9287;
+ struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
+ u32 regChainOffset, regval;
+ u8 txRxAttenLocal;
+ int i;
+
+ pModal = &eep->modalHeader;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++) {
+ regChainOffset = i * 0x1000;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
+ pModal->antCtrlChain[i]);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
+ (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset)
+ & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->bswMargin[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->bswAtten[i]);
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN,
+ txRxAttenLocal);
+ REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN,
+ pModal->rxTxMarginCh[i]);
+ }
+
+
+ if (IS_CHAN_HT40(chan))
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH, pModal->switchSettling);
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize);
+
+ REG_WRITE(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
+ | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
+ | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)
+ | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3,
+ AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn);
+
+ REG_RMW_FIELD(ah, AR_PHY_CCA,
+ AR9280_PHY_CCA_THRESH62, pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
+ AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62);
+
+ regval = REG_READ(ah, AR9287_AN_RF2G3_CH0);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH0, regval);
+
+ regval = REG_READ(ah, AR9287_AN_RF2G3_CH1);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH1, regval);
+
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn);
+
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TOP2,
+ AR9287_AN_TOP2_XPABIAS_LVL,
+ AR9287_AN_TOP2_XPABIAS_LVL_S,
+ pModal->xpaBiasLvl);
+}
+
+static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
+}
+
+const struct eeprom_ops eep_ar9287_ops = {
+ .check_eeprom = ath9k_hw_ar9287_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9287_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
+ .dump_eeprom = ath9k_hw_ar9287_dump_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
+ .set_board_values = ath9k_hw_ar9287_set_board_values,
+ .set_txpower = ath9k_hw_ar9287_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9287_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
new file mode 100644
index 000000000..056f516bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "hw.h"
+#include "ar9002_phy.h"
+
+static void ath9k_get_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct calDataPerFreqOpLoop *rawDatasetOpLoop,
+ u8 *calChans, u16 availPiers, u8 *pwr, u8 *pcdacIdx)
+{
+ u8 pcdac, i = 0;
+ u16 idxL = 0, idxR = 0, numPiers;
+ bool match;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++)
+ if (calChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+
+ match = ath9k_hw_get_lower_upper_index(
+ (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+ calChans, numPiers, &idxL, &idxR);
+ if (match) {
+ pcdac = rawDatasetOpLoop[idxL].pcdac[0][0];
+ *pwr = rawDatasetOpLoop[idxL].pwrPdg[0][0];
+ } else {
+ pcdac = rawDatasetOpLoop[idxR].pcdac[0][0];
+ *pwr = (rawDatasetOpLoop[idxL].pwrPdg[0][0] +
+ rawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
+ }
+
+ while (pcdac > ah->originalGain[i] &&
+ i < (AR9280_TX_GAIN_TABLE_SIZE - 1))
+ i++;
+
+ *pcdacIdx = i;
+}
+
+static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
+ u32 initTxGain,
+ int txPower,
+ u8 *pPDADCValues)
+{
+ u32 i;
+ u32 offset;
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL6_0,
+ AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3);
+ REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL6_1,
+ AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3);
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL7,
+ AR_PHY_TX_PWRCTRL_INIT_TX_GAIN, initTxGain);
+
+ offset = txPower;
+ for (i = 0; i < AR5416_NUM_PDADC_VALUES; i++)
+ if (i < offset)
+ pPDADCValues[i] = 0x0;
+ else
+ pPDADCValues[i] = 0xFF;
+}
+
+static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF);
+}
+
+static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
+{
+ return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
+}
+
+#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
+ int addr, ar5416_eep_start_loc = 0x100;
+
+ for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
+ if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
+ eep_data))
+ return false;
+ eep_data++;
+ }
+ return true;
+}
+
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ 0x100, SIZE_EEPROM_DEF);
+ return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_def_fill_eeprom(ah);
+ else
+ return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
+#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
+ struct modal_eep_header *modal_hdr)
+{
+ PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
+ PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
+ PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]);
+ PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+ PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
+ PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
+ PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]);
+ PR_EEP("Switch Settle", modal_hdr->switchSettling);
+ PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
+ PR_EEP("Chain1 TxRxAtten", modal_hdr->txRxAttenCh[1]);
+ PR_EEP("Chain2 TxRxAtten", modal_hdr->txRxAttenCh[2]);
+ PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]);
+ PR_EEP("Chain1 RxTxMargin", modal_hdr->rxTxMarginCh[1]);
+ PR_EEP("Chain2 RxTxMargin", modal_hdr->rxTxMarginCh[2]);
+ PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
+ PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize);
+ PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]);
+ PR_EEP("Chain1 xlna Gain", modal_hdr->xlnaGainCh[1]);
+ PR_EEP("Chain2 xlna Gain", modal_hdr->xlnaGainCh[2]);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
+ PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn);
+ PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
+ PR_EEP("CCA Threshold)", modal_hdr->thresh62);
+ PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
+ PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
+ PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("xpdGain", modal_hdr->xpdGain);
+ PR_EEP("External PD", modal_hdr->xpd);
+ PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]);
+ PR_EEP("Chain1 I Coefficient", modal_hdr->iqCalICh[1]);
+ PR_EEP("Chain2 I Coefficient", modal_hdr->iqCalICh[2]);
+ PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]);
+ PR_EEP("Chain1 Q Coefficient", modal_hdr->iqCalQCh[1]);
+ PR_EEP("Chain2 Q Coefficient", modal_hdr->iqCalQCh[2]);
+ PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap);
+ PR_EEP("Chain0 OutputBias", modal_hdr->ob);
+ PR_EEP("Chain0 DriverBias", modal_hdr->db);
+ PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
+ PR_EEP("2chain pwr decrease", modal_hdr->pwrDecreaseFor2Chain);
+ PR_EEP("3chain pwr decrease", modal_hdr->pwrDecreaseFor3Chain);
+ PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
+ PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
+ PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc);
+ PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]);
+ PR_EEP("Chain1 bswAtten", modal_hdr->bswAtten[1]);
+ PR_EEP("Chain2 bswAtten", modal_hdr->bswAtten[2]);
+ PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]);
+ PR_EEP("Chain1 bswMargin", modal_hdr->bswMargin[1]);
+ PR_EEP("Chain2 bswMargin", modal_hdr->bswMargin[2]);
+ PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40);
+ PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]);
+ PR_EEP("Chain1 xatten2Db", modal_hdr->xatten2Db[1]);
+ PR_EEP("Chain2 xatten2Db", modal_hdr->xatten2Db[2]);
+ PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]);
+ PR_EEP("Chain1 xatten2Margin", modal_hdr->xatten2Margin[1]);
+ PR_EEP("Chain2 xatten2Margin", modal_hdr->xatten2Margin[2]);
+ PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1);
+ PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1);
+ PR_EEP("LNA Control", modal_hdr->lna_ctl);
+ PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]);
+ PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]);
+ PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]);
+
+ return len;
+}
+
+static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+
+ if (!dump_base_hdr) {
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "2GHz modal Header");
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader[0]);
+ len += scnprintf(buf + len, size - len,
+ "%20s :\n", "5GHz modal Header");
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
+ &eep->modalHeader[1]);
+ goto out;
+ }
+
+ PR_EEP("Major Version", pBase->version >> 12);
+ PR_EEP("Minor Version", pBase->version & 0xFFF);
+ PR_EEP("Checksum", pBase->checksum);
+ PR_EEP("Length", pBase->length);
+ PR_EEP("RegDomain1", pBase->regDmn[0]);
+ PR_EEP("RegDomain2", pBase->regDmn[1]);
+ PR_EEP("TX Mask", pBase->txMask);
+ PR_EEP("RX Mask", pBase->rxMask);
+ PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+ PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+ PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT20));
+ PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_2G_HT40));
+ PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT20));
+ PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
+ AR5416_OPFLAGS_N_5G_HT40));
+ PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
+ PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
+ PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
+ PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+ PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
+
+ len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
+
+out:
+ if (len > size)
+ len = size;
+
+ return len;
+}
+#else
+static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
+ u8 *buf, u32 len, u32 size)
+{
+ return 0;
+}
+#endif
+
+
+static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
+{
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 *eepdata, temp, magic;
+ u32 sum = 0, el;
+ bool need_swap = false;
+ int i, addr, size;
+
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+ ath_err(common, "Reading Magic # failed\n");
+ return false;
+ }
+
+ if (swab16(magic) == AR5416_EEPROM_MAGIC &&
+ !(ah->ah_flags & AH_NO_EEP_SWAP)) {
+ size = sizeof(struct ar5416_eeprom_def);
+ need_swap = true;
+ eepdata = (u16 *) (&ah->eeprom);
+
+ for (addr = 0; addr < size / sizeof(u16); addr++) {
+ temp = swab16(*eepdata);
+ *eepdata = temp;
+ eepdata++;
+ }
+ }
+
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
+ need_swap ? "True" : "False");
+
+ if (need_swap)
+ el = swab16(ah->eeprom.def.baseEepHeader.length);
+ else
+ el = ah->eeprom.def.baseEepHeader.length;
+
+ if (el > sizeof(struct ar5416_eeprom_def))
+ el = sizeof(struct ar5416_eeprom_def) / sizeof(u16);
+ else
+ el = el / sizeof(u16);
+
+ eepdata = (u16 *)(&ah->eeprom);
+
+ for (i = 0; i < el; i++)
+ sum ^= *eepdata++;
+
+ if (need_swap) {
+ u32 integer, j;
+ u16 word;
+
+ ath_dbg(common, EEPROM,
+ "EEPROM Endianness is not native.. Changing.\n");
+
+ word = swab16(eep->baseEepHeader.length);
+ eep->baseEepHeader.length = word;
+
+ word = swab16(eep->baseEepHeader.checksum);
+ eep->baseEepHeader.checksum = word;
+
+ word = swab16(eep->baseEepHeader.version);
+ eep->baseEepHeader.version = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[0]);
+ eep->baseEepHeader.regDmn[0] = word;
+
+ word = swab16(eep->baseEepHeader.regDmn[1]);
+ eep->baseEepHeader.regDmn[1] = word;
+
+ word = swab16(eep->baseEepHeader.rfSilent);
+ eep->baseEepHeader.rfSilent = word;
+
+ word = swab16(eep->baseEepHeader.blueToothOptions);
+ eep->baseEepHeader.blueToothOptions = word;
+
+ word = swab16(eep->baseEepHeader.deviceCap);
+ eep->baseEepHeader.deviceCap = word;
+
+ for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
+ struct modal_eep_header *pModal =
+ &eep->modalHeader[j];
+ integer = swab32(pModal->antCtrlCommon);
+ pModal->antCtrlCommon = integer;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ integer = swab32(pModal->antCtrlChain[i]);
+ pModal->antCtrlChain[i] = integer;
+ }
+ for (i = 0; i < 3; i++) {
+ word = swab16(pModal->xpaBiasLvlFreq[i]);
+ pModal->xpaBiasLvlFreq[i] = word;
+ }
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ word = swab16(pModal->spurChans[i].spurChan);
+ pModal->spurChans[i].spurChan = word;
+ }
+ }
+ }
+
+ if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
+ ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ah->eep_ops->get_eeprom_ver(ah));
+ return -EINVAL;
+ }
+
+ /* Enable fixup for AR_AN_TOP2 if necessary */
+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+ ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+ (eep->baseEepHeader.pwdclkind == 0))
+ ah->need_an_top2_fixup = true;
+
+ if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+ (AR_SREV_9280(ah)))
+ eep->modalHeader[0].xpaBiasLvl = 0;
+
+ return 0;
+}
+
+static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ struct modal_eep_header *pModal = eep->modalHeader;
+ struct base_eep_header *pBase = &eep->baseEepHeader;
+ int band = 0;
+
+ switch (param) {
+ case EEP_NFTHRESH_5:
+ return pModal[0].noiseFloorThreshCh[0];
+ case EEP_NFTHRESH_2:
+ return pModal[1].noiseFloorThreshCh[0];
+ case EEP_MAC_LSW:
+ return get_unaligned_be16(pBase->macAddr);
+ case EEP_MAC_MID:
+ return get_unaligned_be16(pBase->macAddr + 2);
+ case EEP_MAC_MSW:
+ return get_unaligned_be16(pBase->macAddr + 4);
+ case EEP_REG_0:
+ return pBase->regDmn[0];
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_OB_5:
+ return pModal[0].ob;
+ case EEP_DB_5:
+ return pModal[0].db;
+ case EEP_OB_2:
+ return pModal[1].ob;
+ case EEP_DB_2:
+ return pModal[1].db;
+ case EEP_MINOR_REV:
+ return AR5416_VER_MASK;
+ case EEP_TX_MASK:
+ return pBase->txMask;
+ case EEP_RX_MASK:
+ return pBase->rxMask;
+ case EEP_FSTCLK_5G:
+ return pBase->fastClk5g;
+ case EEP_RXGAIN_TYPE:
+ return pBase->rxGainType;
+ case EEP_TXGAIN_TYPE:
+ return pBase->txGainType;
+ case EEP_OL_PWRCTRL:
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ return pBase->openLoopPwrCntl ? true : false;
+ else
+ return false;
+ case EEP_RC_CHAIN_MASK:
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ return pBase->rcChainMask;
+ else
+ return 0;
+ case EEP_DAC_HPWR_5G:
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20)
+ return pBase->dacHiPwrMode_5G;
+ else
+ return 0;
+ case EEP_FRAC_N_5G:
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22)
+ return pBase->frac_n_5g;
+ else
+ return 0;
+ case EEP_PWR_TABLE_OFFSET:
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
+ return pBase->pwr_table_offset;
+ else
+ return AR5416_PWR_TABLE_OFFSET_DB;
+ case EEP_ANTENNA_GAIN_2G:
+ band = 1;
+ /* fall through */
+ case EEP_ANTENNA_GAIN_5G:
+ return max_t(u8, max_t(u8,
+ pModal[band].antennaGainCh[0],
+ pModal[band].antennaGainCh[1]),
+ pModal[band].antennaGainCh[2]);
+ default:
+ return 0;
+ }
+}
+
+static void ath9k_hw_def_set_gain(struct ath_hw *ah,
+ struct modal_eep_header *pModal,
+ struct ar5416_eeprom_def *eep,
+ u8 txRxAttenLocal, int regChainOffset, int i)
+{
+ ENABLE_REG_RMW_BUFFER(ah);
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->bswMargin[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->bswAtten[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
+ pModal->xatten2Margin[i]);
+ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN2_DB,
+ pModal->xatten2Db[i]);
+ } else {
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
+ AR_PHY_GAIN_2GHZ_BSW_MARGIN);
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
+ AR_PHY_GAIN_2GHZ_BSW_ATTEN);
+ }
+ }
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
+ REG_RMW_FIELD(ah,
+ AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
+ } else {
+ REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
+ SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
+ AR_PHY_RXGAIN_TXRX_ATTEN);
+ REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
+ AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
+ }
+ REG_RMW_BUFFER_FLUSH(ah);
+}
+
+static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct modal_eep_header *pModal;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ int i, regChainOffset;
+ u8 txRxAttenLocal;
+
+ pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+ txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff);
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (AR_SREV_9280(ah)) {
+ if (i >= 2)
+ break;
+ }
+
+ if ((ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0))
+ regChainOffset = (i == 1) ? 0x2000 : 0x1000;
+ else
+ regChainOffset = i * 0x1000;
+
+ REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
+ pModal->antCtrlChain[i]);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
+ (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
+ ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+
+ ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal,
+ regChainOffset, i);
+ }
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_OB,
+ AR_AN_RF2G1_CH0_OB_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
+ AR_AN_RF2G1_CH0_DB,
+ AR_AN_RF2G1_CH0_DB_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_OB,
+ AR_AN_RF2G1_CH1_OB_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
+ AR_AN_RF2G1_CH1_DB,
+ AR_AN_RF2G1_CH1_DB_S,
+ pModal->db_ch1);
+ } else {
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_OB5,
+ AR_AN_RF5G1_CH0_OB5_S,
+ pModal->ob);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
+ AR_AN_RF5G1_CH0_DB5,
+ AR_AN_RF5G1_CH0_DB5_S,
+ pModal->db);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_OB5,
+ AR_AN_RF5G1_CH1_OB5_S,
+ pModal->ob_ch1);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
+ AR_AN_RF5G1_CH1_DB5,
+ AR_AN_RF5G1_CH1_DB5_S,
+ pModal->db_ch1);
+ }
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_XPABIAS_LVL,
+ AR_AN_TOP2_XPABIAS_LVL_S,
+ pModal->xpaBiasLvl);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
+ AR_AN_TOP2_LOCALBIAS,
+ AR_AN_TOP2_LOCALBIAS_S,
+ !!(pModal->lna_ctl &
+ LNA_CTL_LOCAL_BIAS));
+ REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
+ !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
+ pModal->switchSettling);
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
+ pModal->adcDesiredSize);
+
+ if (!AR_SREV_9280_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_PGA,
+ pModal->pgaDesiredSize);
+
+ REG_WRITE(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
+ | SM(pModal->txEndToXpaOff,
+ AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAA_ON)
+ | SM(pModal->txFrameToXpaOn,
+ AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
+ pModal->txEndToRxOn);
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
+ AR_PHY_EXT_CCA0_THRESH62,
+ pModal->thresh62);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
+ pModal->thresh62);
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_THRESH62,
+ pModal->thresh62);
+ }
+
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_END_DATA_START,
+ pModal->txFrameToDataStart);
+ REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
+ pModal->txFrameToPaOn);
+ }
+
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+ if (IS_CHAN_HT40(chan))
+ REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH,
+ pModal->swSettleHt40);
+ }
+
+ if (AR_SREV_9280_20_OR_LATER(ah) &&
+ AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+ REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
+ AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
+ pModal->miscBits);
+
+
+ if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
+ if (IS_CHAN_2GHZ(chan))
+ REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
+ eep->baseEepHeader.dacLpMode);
+ else if (eep->baseEepHeader.dacHiPwrMode_5G)
+ REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0);
+ else
+ REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
+ eep->baseEepHeader.dacLpMode);
+
+ udelay(100);
+
+ REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
+ pModal->miscBits >> 2);
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_DESIRED_SCALE_CCK,
+ eep->baseEepHeader.desiredScaleCCK);
+ }
+}
+
+static void ath9k_hw_def_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt])
+ struct modal_eep_header *pModal;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+ u8 biaslevel;
+
+ if (ah->hw_version.macVersion != AR_SREV_VERSION_9160)
+ return;
+
+ if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7)
+ return;
+
+ pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
+
+ if (pModal->xpaBiasLvl != 0xff) {
+ biaslevel = pModal->xpaBiasLvl;
+ } else {
+ u16 resetFreqBin, freqBin, freqCount = 0;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ resetFreqBin = FREQ2FBIN(centers.synth_center,
+ IS_CHAN_2GHZ(chan));
+ freqBin = XPA_LVL_FREQ(0) & 0xff;
+ biaslevel = (u8) (XPA_LVL_FREQ(0) >> 14);
+
+ freqCount++;
+
+ while (freqCount < 3) {
+ if (XPA_LVL_FREQ(freqCount) == 0x0)
+ break;
+
+ freqBin = XPA_LVL_FREQ(freqCount) & 0xff;
+ if (resetFreqBin >= freqBin)
+ biaslevel = (u8)(XPA_LVL_FREQ(freqCount) >> 14);
+ else
+ break;
+ freqCount++;
+ }
+ }
+
+ if (IS_CHAN_2GHZ(chan)) {
+ INI_RA(&ah->iniAddac, 7, 1) = (INI_RA(&ah->iniAddac,
+ 7, 1) & (~0x18)) | biaslevel << 3;
+ } else {
+ INI_RA(&ah->iniAddac, 6, 1) = (INI_RA(&ah->iniAddac,
+ 6, 1) & (~0xc0)) | biaslevel << 6;
+ }
+#undef XPA_LVL_FREQ
+}
+
+static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
+ u16 *gb,
+ u16 numXpdGain,
+ u16 pdGainOverlap_t2,
+ int8_t pwr_table_offset,
+ int16_t *diff)
+
+{
+ u16 k;
+
+ /* Prior to writing the boundaries or the pdadc vs. power table
+ * into the chip registers the default starting point on the pdadc
+ * vs. power table needs to be checked and the curve boundaries
+ * adjusted accordingly
+ */
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ u16 gb_limit;
+
+ if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
+ /* get the difference in dB */
+ *diff = (u16)(pwr_table_offset - AR5416_PWR_TABLE_OFFSET_DB);
+ /* get the number of half dB steps */
+ *diff *= 2;
+ /* change the original gain boundary settings
+ * by the number of half dB steps
+ */
+ for (k = 0; k < numXpdGain; k++)
+ gb[k] = (u16)(gb[k] - *diff);
+ }
+ /* Because of a hardware limitation, ensure the gain boundary
+ * is not larger than (63 - overlap)
+ */
+ gb_limit = (u16)(MAX_RATE_POWER - pdGainOverlap_t2);
+
+ for (k = 0; k < numXpdGain; k++)
+ gb[k] = (u16)min(gb_limit, gb[k]);
+ }
+
+ return *diff;
+}
+
+static void ath9k_adjust_pdadc_values(struct ath_hw *ah,
+ int8_t pwr_table_offset,
+ int16_t diff,
+ u8 *pdadcValues)
+{
+#define NUM_PDADC(diff) (AR5416_NUM_PDADC_VALUES - diff)
+ u16 k;
+
+ /* If this is a board that has a pwrTableOffset that differs from
+ * the default AR5416_PWR_TABLE_OFFSET_DB then the start of the
+ * pdadc vs pwr table needs to be adjusted prior to writing to the
+ * chip.
+ */
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
+ /* shift the table to start at the new offset */
+ for (k = 0; k < (u16)NUM_PDADC(diff); k++ ) {
+ pdadcValues[k] = pdadcValues[k + diff];
+ }
+
+ /* fill the back of the table */
+ for (k = (u16)NUM_PDADC(diff); k < NUM_PDADC(0); k++) {
+ pdadcValues[k] = pdadcValues[NUM_PDADC(diff)];
+ }
+ }
+ }
+#undef NUM_PDADC
+}
+
+static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x)
+#define SM_PDGAIN_B(x, y) \
+ SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y)
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
+ struct cal_data_per_freq *pRawDataset;
+ u8 *pCalBChans = NULL;
+ u16 pdGainOverlap_t2;
+ static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
+ u16 numPiers, i, j;
+ int16_t diff = 0;
+ u16 numXpdGain, xpdMask;
+ u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
+ u32 reg32, regOffset, regChainOffset;
+ int16_t modalIdx;
+ int8_t pwr_table_offset;
+
+ modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
+ xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
+
+ pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
+
+ if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ pdGainOverlap_t2 =
+ pEepData->modalHeader[modalIdx].pdGainOverlap;
+ } else {
+ pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
+ }
+
+ if (IS_CHAN_2GHZ(chan)) {
+ pCalBChans = pEepData->calFreqPier2G;
+ numPiers = AR5416_NUM_2G_CAL_PIERS;
+ } else {
+ pCalBChans = pEepData->calFreqPier5G;
+ numPiers = AR5416_NUM_5G_CAL_PIERS;
+ }
+
+ if (OLC_FOR_AR9280_20_LATER && IS_CHAN_2GHZ(chan)) {
+ pRawDataset = pEepData->calPierData2G[0];
+ ah->initPDADC = ((struct calDataPerFreqOpLoop *)
+ pRawDataset)->vpdPdg[0][0];
+ }
+
+ numXpdGain = 0;
+
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_NUM_PD_GAINS)
+ break;
+ xpdGainValues[numXpdGain] =
+ (u16)(AR5416_PD_GAINS_IN_MASK - i);
+ numXpdGain++;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
+ (numXpdGain - 1) & 0x3);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
+ xpdGainValues[0]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
+ xpdGainValues[1]);
+ REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
+ xpdGainValues[2]);
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if ((ah->rxchainmask == 5 || ah->txchainmask == 5) &&
+ (i != 0)) {
+ regChainOffset = (i == 1) ? 0x2000 : 0x1000;
+ } else
+ regChainOffset = i * 0x1000;
+
+ if (pEepData->baseEepHeader.txMask & (1 << i)) {
+ if (IS_CHAN_2GHZ(chan))
+ pRawDataset = pEepData->calPierData2G[i];
+ else
+ pRawDataset = pEepData->calPierData5G[i];
+
+
+ if (OLC_FOR_AR9280_20_LATER) {
+ u8 pcdacIdx;
+ u8 txPower;
+
+ ath9k_get_txgain_index(ah, chan,
+ (struct calDataPerFreqOpLoop *)pRawDataset,
+ pCalBChans, numPiers, &txPower, &pcdacIdx);
+ ath9k_olc_get_pdadcs(ah, pcdacIdx,
+ txPower/2, pdadcValues);
+ } else {
+ ath9k_hw_get_gain_boundaries_pdadcs(ah,
+ chan, pRawDataset,
+ pCalBChans, numPiers,
+ pdGainOverlap_t2,
+ gainBoundaries,
+ pdadcValues,
+ numXpdGain);
+ }
+
+ diff = ath9k_change_gain_boundary_setting(ah,
+ gainBoundaries,
+ numXpdGain,
+ pdGainOverlap_t2,
+ pwr_table_offset,
+ &diff);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ if (OLC_FOR_AR9280_20_LATER) {
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ SM(0x6,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP) |
+ SM_PD_GAIN(1) | SM_PD_GAIN(2) |
+ SM_PD_GAIN(3) | SM_PD_GAIN(4));
+ } else {
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)|
+ SM_PDGAIN_B(0, 1) |
+ SM_PDGAIN_B(1, 2) |
+ SM_PDGAIN_B(2, 3) |
+ SM_PDGAIN_B(3, 4));
+ }
+
+ ath9k_adjust_pdadc_values(ah, pwr_table_offset,
+ diff, pdadcValues);
+
+ regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
+ for (j = 0; j < 32; j++) {
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
+ REG_WRITE(ah, regOffset, reg32);
+
+ ath_dbg(common, EEPROM,
+ "PDADC (%d,%4x): %4.4x %8.8x\n",
+ i, regChainOffset, regOffset,
+ reg32);
+ ath_dbg(common, EEPROM,
+ "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n",
+ i, 4 * j, pdadcValues[4 * j],
+ 4 * j + 1, pdadcValues[4 * j + 1],
+ 4 * j + 2, pdadcValues[4 * j + 2],
+ 4 * j + 3, pdadcValues[4 * j + 3]);
+
+ regOffset += 4;
+ }
+ REGWRITE_BUFFER_FLUSH(ah);
+ }
+ }
+
+#undef SM_PD_GAIN
+#undef SM_PDGAIN_B
+}
+
+static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int16_t *ratesArray,
+ u16 cfgCtl,
+ u16 antenna_reduction,
+ u16 powerLimit)
+{
+ struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
+ u16 twiceMaxEdgePower;
+ int i;
+ struct cal_ctl_data *rep;
+ struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
+ 0, { 0, 0, 0, 0}
+ };
+ struct cal_target_power_leg targetPowerOfdmExt = {
+ 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
+ 0, { 0, 0, 0, 0 }
+ };
+ struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
+ 0, {0, 0, 0, 0}
+ };
+ u16 scaledPower = 0, minCtlPower;
+ static const u16 ctlModesFor11a[] = {
+ CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
+ };
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
+ struct chan_centers centers;
+ int tx_chainmask;
+ u16 twiceMinEdgePower;
+
+ tx_chainmask = ah->txchainmask;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+ antenna_reduction);
+
+ if (IS_CHAN_2GHZ(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
+ SUB_NUM_CTL_MODES_AT_2G_40;
+ pCtlMode = ctlModesFor11g;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCck, 4, false);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4, false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT20,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower2GHT40,
+ AR5416_NUM_2G_40_TARGET_POWERS,
+ &targetPowerHt40, 8, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPowerCck,
+ AR5416_NUM_2G_CCK_TARGET_POWERS,
+ &targetPowerCckExt, 4, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower2G,
+ AR5416_NUM_2G_20_TARGET_POWERS,
+ &targetPowerOfdmExt, 4, true);
+ }
+ } else {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
+ SUB_NUM_CTL_MODES_AT_5G_40;
+ pCtlMode = ctlModesFor11a;
+
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower5G,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerOfdm, 4, false);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower5GHT20,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerHt20, 8, false);
+
+ if (IS_CHAN_HT40(chan)) {
+ numCtlModes = ARRAY_SIZE(ctlModesFor11a);
+ ath9k_hw_get_target_powers(ah, chan,
+ pEepData->calTargetPower5GHT40,
+ AR5416_NUM_5G_40_TARGET_POWERS,
+ &targetPowerHt40, 8, true);
+ ath9k_hw_get_legacy_target_powers(ah, chan,
+ pEepData->calTargetPower5G,
+ AR5416_NUM_5G_20_TARGET_POWERS,
+ &targetPowerOfdmExt, 4, true);
+ }
+ }
+
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+ (pCtlMode[ctlMode] == CTL_2GHT40);
+ if (isHt40CtlMode)
+ freq = centers.synth_center;
+ else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+ freq = centers.ext_center;
+ else
+ freq = centers.ctl_center;
+
+ twiceMaxEdgePower = MAX_RATE_POWER;
+
+ for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
+ if ((((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ pEepData->ctlIndex[i]) ||
+ (((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
+ rep = &(pEepData->ctlData[i]);
+
+ twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
+ rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1],
+ IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ twiceMaxEdgePower = min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ } else {
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+
+ minCtlPower = min(twiceMaxEdgePower, scaledPower);
+
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
+ targetPowerCck.tPow2x[i] =
+ min((u16)targetPowerCck.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
+ targetPowerOfdm.tPow2x[i] =
+ min((u16)targetPowerOfdm.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
+ targetPowerHt20.tPow2x[i] =
+ min((u16)targetPowerHt20.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ case CTL_11B_EXT:
+ targetPowerCckExt.tPow2x[0] = min((u16)
+ targetPowerCckExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_11A_EXT:
+ case CTL_11G_EXT:
+ targetPowerOfdmExt.tPow2x[0] = min((u16)
+ targetPowerOfdmExt.tPow2x[0],
+ minCtlPower);
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ targetPowerHt40.tPow2x[i] =
+ min((u16)targetPowerHt40.tPow2x[i],
+ minCtlPower);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
+ ratesArray[rate18mb] = ratesArray[rate24mb] =
+ targetPowerOfdm.tPow2x[0];
+ ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
+ ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
+ ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
+ ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
+
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
+ ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
+
+ if (IS_CHAN_2GHZ(chan)) {
+ ratesArray[rate1l] = targetPowerCck.tPow2x[0];
+ ratesArray[rate2s] = ratesArray[rate2l] =
+ targetPowerCck.tPow2x[1];
+ ratesArray[rate5_5s] = ratesArray[rate5_5l] =
+ targetPowerCck.tPow2x[2];
+ ratesArray[rate11s] = ratesArray[rate11l] =
+ targetPowerCck.tPow2x[3];
+ }
+ if (IS_CHAN_HT40(chan)) {
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ ratesArray[rateHt40_0 + i] =
+ targetPowerHt40.tPow2x[i];
+ }
+ ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
+ ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+ if (IS_CHAN_2GHZ(chan)) {
+ ratesArray[rateExtCck] =
+ targetPowerCckExt.tPow2x[0];
+ }
+ }
+}
+
+static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 powerLimit, bool test)
+{
+#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
+ struct modal_eep_header *pModal =
+ &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
+ int16_t ratesArray[Ar5416RateSize];
+ u8 ht40PowerIncForPdadc = 2;
+ int i, cck_ofdm_delta = 0;
+
+ memset(ratesArray, 0, sizeof(ratesArray));
+
+ if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
+ AR5416_EEP_MINOR_VER_2) {
+ ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
+ }
+
+ ath9k_hw_set_def_power_per_rate_table(ah, chan,
+ &ratesArray[0], cfgCtl,
+ twiceAntennaReduction,
+ powerLimit);
+
+ ath9k_hw_set_def_power_cal_table(ah, chan);
+
+ regulatory->max_power_level = 0;
+ for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
+ }
+
+ ath9k_hw_update_regulatory_maxpower(ah);
+
+ if (test)
+ return;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ for (i = 0; i < Ar5416RateSize; i++) {
+ int8_t pwr_table_offset;
+
+ pwr_table_offset = ah->eep_ops->get_eeprom(ah,
+ EEP_PWR_TABLE_OFFSET);
+ ratesArray[i] -= pwr_table_offset * 2;
+ }
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
+ ATH9K_POW_SM(ratesArray[rate18mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate12mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate9mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate6mb], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
+ ATH9K_POW_SM(ratesArray[rate54mb], 24)
+ | ATH9K_POW_SM(ratesArray[rate48mb], 16)
+ | ATH9K_POW_SM(ratesArray[rate36mb], 8)
+ | ATH9K_POW_SM(ratesArray[rate24mb], 0));
+
+ if (IS_CHAN_2GHZ(chan)) {
+ if (OLC_FOR_AR9280_20_LATER) {
+ cck_ofdm_delta = 2;
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
+ ATH9K_POW_SM(RT_AR_DELTA(rate2s), 24)
+ | ATH9K_POW_SM(RT_AR_DELTA(rate2l), 16)
+ | ATH9K_POW_SM(ratesArray[rateXr], 8)
+ | ATH9K_POW_SM(RT_AR_DELTA(rate1l), 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
+ ATH9K_POW_SM(RT_AR_DELTA(rate11s), 24)
+ | ATH9K_POW_SM(RT_AR_DELTA(rate11l), 16)
+ | ATH9K_POW_SM(RT_AR_DELTA(rate5_5s), 8)
+ | ATH9K_POW_SM(RT_AR_DELTA(rate5_5l), 0));
+ } else {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
+ ATH9K_POW_SM(ratesArray[rate2s], 24)
+ | ATH9K_POW_SM(ratesArray[rate2l], 16)
+ | ATH9K_POW_SM(ratesArray[rateXr], 8)
+ | ATH9K_POW_SM(ratesArray[rate1l], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
+ ATH9K_POW_SM(ratesArray[rate11s], 24)
+ | ATH9K_POW_SM(ratesArray[rate11l], 16)
+ | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
+ | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
+ ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
+ ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
+ | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
+ | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
+ | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
+
+ if (IS_CHAN_HT40(chan)) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
+ ATH9K_POW_SM(ratesArray[rateHt40_3] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_2] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_1] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_0] +
+ ht40PowerIncForPdadc, 0));
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
+ ATH9K_POW_SM(ratesArray[rateHt40_7] +
+ ht40PowerIncForPdadc, 24)
+ | ATH9K_POW_SM(ratesArray[rateHt40_6] +
+ ht40PowerIncForPdadc, 16)
+ | ATH9K_POW_SM(ratesArray[rateHt40_5] +
+ ht40PowerIncForPdadc, 8)
+ | ATH9K_POW_SM(ratesArray[rateHt40_4] +
+ ht40PowerIncForPdadc, 0));
+ if (OLC_FOR_AR9280_20_LATER) {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
+ ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
+ | ATH9K_POW_SM(RT_AR_DELTA(rateExtCck), 16)
+ | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
+ | ATH9K_POW_SM(RT_AR_DELTA(rateDupCck), 0));
+ } else {
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
+ ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
+ | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
+ | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
+ | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
+ ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
+ | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
+
+ /* TPC initializations */
+ if (ah->tpc_enabled) {
+ int ht40_delta;
+
+ ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
+ ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
+ /* Enable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
+ MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
+ } else {
+ /* Disable TPC */
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
+{
+ return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
+}
+
+const struct eeprom_ops eep_def_ops = {
+ .check_eeprom = ath9k_hw_def_check_eeprom,
+ .get_eeprom = ath9k_hw_def_get_eeprom,
+ .fill_eeprom = ath9k_hw_def_fill_eeprom,
+ .dump_eeprom = ath9k_hw_def_dump_eeprom,
+ .get_eeprom_ver = ath9k_hw_def_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_def_get_eeprom_rev,
+ .set_board_values = ath9k_hw_def_set_board_values,
+ .set_addac = ath9k_hw_def_set_addac,
+ .set_txpower = ath9k_hw_def_set_txpower,
+ .get_spur_channel = ath9k_hw_def_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000..284706798
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+#ifdef CONFIG_MAC80211_LEDS
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
+ u32 val = (brightness == LED_OFF);
+
+ if (sc->sc_ah->config.led_active_high)
+ val = !val;
+
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, val);
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ if (!sc->led_registered)
+ return;
+
+ ath_led_brightness(&sc->led_cdev, LED_OFF);
+ led_classdev_unregister(&sc->led_cdev);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ int ret;
+
+ if (AR_SREV_9100(sc->sc_ah))
+ return;
+
+ if (!ath9k_led_blink)
+ sc->led_cdev.default_trigger =
+ ieee80211_get_radio_led_name(sc->hw);
+
+ snprintf(sc->led_name, sizeof(sc->led_name),
+ "ath9k-%s", wiphy_name(sc->hw->wiphy));
+ sc->led_cdev.name = sc->led_name;
+ sc->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
+ if (ret < 0)
+ return;
+
+ sc->led_registered = true;
+}
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (AR_SREV_9100(ah))
+ return;
+
+ if (ah->led_pin >= 0) {
+ if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
+ ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
+ return;
+ }
+
+ if (AR_SREV_9287(ah))
+ ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9485;
+ else if (AR_SREV_9300(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9300;
+ else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9462;
+ else
+ ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ /* LED off, active low */
+ ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
+}
+#endif
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ bool is_blocked;
+
+ ath9k_ps_wakeup(sc);
+ is_blocked = ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+ ath9k_ps_restore(sc);
+
+ return is_blocked;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
+ "BT scan detected\n");
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ set_bit(BT_OP_SCAN, &btcoex->op_flags);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
+ "BT priority traffic detected\n");
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+static void ath_mci_ftp_adjust(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
+ (mci->num_pan || mci->num_other_acl))
+ ah->btcoex_hw.mci.stomp_ftp =
+ (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
+ else
+ ah->btcoex_hw.mci.stomp_ftp = false;
+ btcoex->bt_wait_time = 0;
+ sc->rx.num_pkts = 0;
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ enum ath_stomp_type stomp_type;
+ u32 timer_period;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+ btcoex->bt_wait_time += btcoex->btcoex_period;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ goto skip_hw_wakeup;
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ath9k_mci_update_rssi(sc);
+ ath_mci_ftp_adjust(sc);
+ }
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath_detect_bt_priority(sc);
+
+ stomp_type = btcoex->bt_stomp_type;
+ timer_period = btcoex->btcoex_no_stomp;
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) {
+ if (test_bit(BT_OP_SCAN, &btcoex->op_flags)) {
+ stomp_type = ATH_BTCOEX_STOMP_ALL;
+ timer_period = btcoex->btscan_no_stomp;
+ }
+ } else if (btcoex->stomp_audio >= 5) {
+ stomp_type = ATH_BTCOEX_STOMP_AUDIO;
+ btcoex->stomp_audio = 0;
+ }
+
+ ath9k_hw_btcoex_bt_stomp(ah, stomp_type);
+ ath9k_hw_btcoex_enable(ah);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp)
+ mod_timer(&btcoex->no_stomp_timer,
+ jiffies + msecs_to_jiffies(timer_period));
+
+ ath9k_ps_restore(sc);
+
+skip_hw_wakeup:
+ mod_timer(&btcoex->period_timer,
+ jiffies + msecs_to_jiffies(btcoex->btcoex_period));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(unsigned long arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI) &&
+ test_bit(BT_OP_SCAN, &btcoex->op_flags)))
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+
+ ath9k_hw_btcoex_enable(ah);
+ spin_unlock_bh(&btcoex->btcoex_lock);
+ ath9k_ps_restore(sc);
+}
+
+static void ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+ setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ del_timer_sync(&btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_3WIRE &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n");
+
+ del_timer_sync(&btcoex->period_timer);
+ del_timer_sync(&btcoex->no_stomp_timer);
+}
+
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ del_timer_sync(&btcoex->no_stomp_timer);
+}
+
+u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &sc->btcoex.mci;
+ u16 aggr_limit = 0;
+
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+ aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+ else if (test_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags))
+ aggr_limit = min((max_4ms_framelen * 3) / 8,
+ (u32)ATH_AMPDU_LIMIT_MAX);
+
+ return aggr_limit;
+}
+
+void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
+{
+ if (status & ATH9K_INT_MCI)
+ ath_mci_intr(sc);
+}
+
+void ath9k_start_btcoex(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->btcoex_hw.enabled ||
+ ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT, 0);
+ else
+ ath9k_hw_btcoex_set_weight(ah, 0, 0,
+ ATH_BTCOEX_STOMP_NONE);
+ ath9k_hw_btcoex_enable(ah);
+ ath9k_btcoex_timer_resume(sc);
+}
+
+void ath9k_stop_btcoex(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (!ah->btcoex_hw.enabled ||
+ ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(ah);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ath_mci_flush_profile(&sc->btcoex.mci);
+}
+
+void ath9k_deinit_btcoex(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ath_mci_cleanup(sc);
+}
+
+int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
+ int r;
+
+ ath9k_hw_btcoex_init_scheme(ah);
+
+ switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ ath_init_btcoex_timer(sc);
+ txq = sc->tx.txq_map[IEEE80211_AC_BE];
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
+ break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath_init_btcoex_timer(sc);
+
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+ ath9k_hw_btcoex_init_mci(ah);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ u32 len = 0;
+ int i;
+
+ ATH_DUMP_BTCOEX("Total BT profiles", NUM_PROF(mci));
+ ATH_DUMP_BTCOEX("MGMT", mci->num_mgmt);
+ ATH_DUMP_BTCOEX("SCO", mci->num_sco);
+ ATH_DUMP_BTCOEX("A2DP", mci->num_a2dp);
+ ATH_DUMP_BTCOEX("HID", mci->num_hid);
+ ATH_DUMP_BTCOEX("PAN", mci->num_pan);
+ ATH_DUMP_BTCOEX("ACL", mci->num_other_acl);
+ ATH_DUMP_BTCOEX("BDR", mci->num_bdr);
+ ATH_DUMP_BTCOEX("Aggr. Limit", mci->aggr_limit);
+ ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+ ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+ ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+ ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+ ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
+ ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
+
+ len += scnprintf(buf + len, size - len, "BT Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "WLAN Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, "Tx Priorities: ");
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->tx_prio[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ return len;
+}
+
+static int ath9k_dump_legacy_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 len = 0;
+
+ ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+ ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+ ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+ ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+
+ return len;
+}
+
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ return ath9k_dump_mci_btcoex(sc, buf, size);
+ else
+ return ath9k_dump_legacy_btcoex(sc, buf, size);
+}
+
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 000000000..10c02f5cb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1383 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "htc.h"
+
+/* identify firmware images */
+#define FIRMWARE_AR7010_1_1 "htc_7010.fw"
+#define FIRMWARE_AR9271 "htc_9271.fw"
+
+MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
+MODULE_FIRMWARE(FIRMWARE_AR9271);
+
+static struct usb_device_id ath9k_hif_usb_ids[] = {
+ { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
+ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
+ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
+ { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
+ { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
+ { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
+ { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
+ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
+ { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
+ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
+ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
+ { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+
+ { USB_DEVICE(0x0cf3, 0x7015),
+ .driver_info = AR9287_USB }, /* Atheros */
+ { USB_DEVICE(0x1668, 0x1200),
+ .driver_info = AR9287_USB }, /* Verizon */
+
+ { USB_DEVICE(0x0cf3, 0x7010),
+ .driver_info = AR9280_USB }, /* Atheros */
+ { USB_DEVICE(0x0846, 0x9018),
+ .driver_info = AR9280_USB }, /* Netgear WNDA3200 */
+ { USB_DEVICE(0x083A, 0xA704),
+ .driver_info = AR9280_USB }, /* SMC Networks */
+ { USB_DEVICE(0x0411, 0x017f),
+ .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
+ { USB_DEVICE(0x0411, 0x0197),
+ .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
+ { USB_DEVICE(0x04da, 0x3904),
+ .driver_info = AR9280_USB },
+
+ { USB_DEVICE(0x0cf3, 0x20ff),
+ .driver_info = STORAGE_DEVICE },
+
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
+
+static int __hif_usb_tx(struct hif_device_usb *hif_dev);
+
+static void hif_usb_regout_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+ if (cmd) {
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, true);
+ kfree(cmd);
+ }
+
+ return;
+free:
+ kfree_skb(cmd->skb);
+ kfree(cmd);
+}
+
+static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ skb->data, skb->len,
+ hif_usb_regout_cb, cmd, 1);
+
+ usb_anchor_urb(urb, &hif_dev->regout_submitted);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static void hif_usb_mgmt_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+ struct hif_device_usb *hif_dev;
+ bool txok = true;
+
+ if (!cmd || !cmd->skb || !cmd->hif_dev)
+ return;
+
+ hif_dev = cmd->hif_dev;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ txok = false;
+
+ /*
+ * If the URBs are being flushed, no need to complete
+ * this packet.
+ */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ dev_kfree_skb_any(cmd->skb);
+ kfree(cmd);
+ return;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ break;
+ default:
+ txok = false;
+ break;
+ }
+
+ skb_pull(cmd->skb, 4);
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, txok);
+ kfree(cmd);
+}
+
+static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+ __le16 *hdr;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ hdr = (__le16 *) skb_push(skb, 4);
+ *hdr++ = cpu_to_le16(skb->len - 4);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ skb->data, skb->len,
+ hif_usb_mgmt_cb, cmd);
+
+ usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *queue,
+ bool txok)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(queue)) != NULL) {
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ int ln = skb->len;
+#endif
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, txok);
+ if (txok) {
+ TX_STAT_INC(skb_success);
+ TX_STAT_ADD(skb_success_bytes, ln);
+ }
+ else
+ TX_STAT_INC(skb_failed);
+ }
+}
+
+static void hif_usb_tx_cb(struct urb *urb)
+{
+ struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct hif_device_usb *hif_dev;
+ bool txok = true;
+
+ if (!tx_buf || !tx_buf->hif_dev)
+ return;
+
+ hif_dev = tx_buf->hif_dev;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ txok = false;
+
+ /*
+ * If the URBs are being flushed, no need to add this
+ * URB to the free list.
+ */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ return;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ break;
+ default:
+ txok = false;
+ break;
+ }
+
+ ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
+
+ /* Re-initialize the SKB queue */
+ tx_buf->len = tx_buf->offset = 0;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ /* Add this TX buffer to the free list */
+ spin_lock(&hif_dev->tx.tx_lock);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
+ __hif_usb_tx(hif_dev); /* Check for pending SKBs */
+ TX_STAT_INC(buf_completed);
+ spin_unlock(&hif_dev->tx.tx_lock);
+}
+
+/* TX lock has to be taken */
+static int __hif_usb_tx(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL;
+ struct sk_buff *nskb = NULL;
+ int ret = 0, i;
+ u16 tx_skb_cnt = 0;
+ u8 *buf;
+ __le16 *hdr;
+
+ if (hif_dev->tx.tx_skb_cnt == 0)
+ return 0;
+
+ /* Check if a free TX buffer is available */
+ if (list_empty(&hif_dev->tx.tx_buf))
+ return 0;
+
+ tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
+ hif_dev->tx.tx_buf_cnt--;
+
+ tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
+
+ for (i = 0; i < tx_skb_cnt; i++) {
+ nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
+
+ /* Should never be NULL */
+ BUG_ON(!nskb);
+
+ hif_dev->tx.tx_skb_cnt--;
+
+ buf = tx_buf->buf;
+ buf += tx_buf->offset;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
+ buf += 4;
+ memcpy(buf, nskb->data, nskb->len);
+ tx_buf->len = nskb->len + 4;
+
+ if (i < (tx_skb_cnt - 1))
+ tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
+
+ if (i == (tx_skb_cnt - 1))
+ tx_buf->len += tx_buf->offset;
+
+ __skb_queue_tail(&tx_buf->skb_queue, nskb);
+ TX_STAT_INC(skb_queued);
+ }
+
+ usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ tx_buf->buf, tx_buf->len,
+ hif_usb_tx_cb, tx_buf);
+
+ ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
+ if (ret) {
+ tx_buf->len = tx_buf->offset = 0;
+ ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
+ __skb_queue_head_init(&tx_buf->skb_queue);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ }
+
+ if (!ret)
+ TX_STAT_INC(buf_queued);
+
+ return ret;
+}
+
+static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
+{
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENODEV;
+ }
+
+ /* Check if the max queue count has been reached */
+ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENOMEM;
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ tx_ctl = HTC_SKB_CB(skb);
+
+ /* Mgmt/Beacon frames don't use the TX buffer pool */
+ if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
+ (tx_ctl->type == ATH9K_HTC_BEACON)) {
+ ret = hif_usb_send_mgmt(hif_dev, skb);
+ }
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
+ (tx_ctl->type == ATH9K_HTC_AMPDU)) {
+ __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+ hif_dev->tx.tx_skb_cnt++;
+ }
+
+ /* Check if AMPDUs have to be sent immediately */
+ if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+ (hif_dev->tx.tx_skb_cnt < 2)) {
+ __hif_usb_tx(hif_dev);
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ return ret;
+}
+
+static void hif_usb_start(void *hif_handle)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ hif_dev->flags |= HIF_USB_START;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static void hif_usb_stop(void *hif_handle)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
+ hif_dev->tx.tx_skb_cnt = 0;
+ hif_dev->tx.flags |= HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ /* The pending URBs have to be canceled. */
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ }
+
+ usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+}
+
+static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ int ret = 0;
+
+ switch (pipe_id) {
+ case USB_WLAN_TX_PIPE:
+ ret = hif_usb_send_tx(hif_dev, skb);
+ break;
+ case USB_REG_OUT_PIPE:
+ ret = hif_usb_send_regout(hif_dev, skb);
+ break;
+ default:
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static inline bool check_index(struct sk_buff *skb, u8 idx)
+{
+ struct ath9k_htc_tx_ctl *tx_ctl;
+
+ tx_ctl = HTC_SKB_CB(skb);
+
+ if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
+ (tx_ctl->sta_idx == idx))
+ return true;
+
+ return false;
+}
+
+static void hif_usb_sta_drain(void *hif_handle, u8 idx)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
+ if (check_index(skb, idx)) {
+ __skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, false);
+ hif_dev->tx.tx_skb_cnt--;
+ TX_STAT_INC(skb_failed);
+ }
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static struct ath9k_htc_hif hif_usb = {
+ .transport = ATH9K_HIF_USB,
+ .name = "ath9k_hif_usb",
+
+ .control_ul_pipe = USB_REG_OUT_PIPE,
+ .control_dl_pipe = USB_REG_IN_PIPE,
+
+ .start = hif_usb_start,
+ .stop = hif_usb_stop,
+ .sta_drain = hif_usb_sta_drain,
+ .send = hif_usb_send,
+};
+
+static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
+ int index = 0, i = 0, len = skb->len;
+ int rx_remain_len, rx_pkt_len;
+ u16 pool_index = 0;
+ u8 *ptr;
+
+ spin_lock(&hif_dev->rx_lock);
+
+ rx_remain_len = hif_dev->rx_remain_len;
+ rx_pkt_len = hif_dev->rx_transfer_len;
+
+ if (rx_remain_len != 0) {
+ struct sk_buff *remain_skb = hif_dev->remain_skb;
+
+ if (remain_skb) {
+ ptr = (u8 *) remain_skb->data;
+
+ index = rx_remain_len;
+ rx_remain_len -= hif_dev->rx_pad_len;
+ ptr += rx_pkt_len;
+
+ memcpy(ptr, skb->data, rx_remain_len);
+
+ rx_pkt_len += rx_remain_len;
+ hif_dev->rx_remain_len = 0;
+ skb_put(remain_skb, rx_pkt_len);
+
+ skb_pool[pool_index++] = remain_skb;
+
+ } else {
+ index = rx_remain_len;
+ }
+ }
+
+ spin_unlock(&hif_dev->rx_lock);
+
+ while (index < len) {
+ u16 pkt_len;
+ u16 pkt_tag;
+ u16 pad_len;
+ int chk_idx;
+
+ ptr = (u8 *) skb->data;
+
+ pkt_len = get_unaligned_le16(ptr + index);
+ pkt_tag = get_unaligned_le16(ptr + index + 2);
+
+ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation error\n");
+ spin_unlock(&hif_dev->rx_lock);
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
+ }
+ }
+
+err:
+ for (i = 0; i < pool_index; i++) {
+ RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
+ skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ RX_STAT_INC(skb_completed);
+ }
+}
+
+static void ath9k_hif_usb_rx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct hif_device_usb *hif_dev =
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+ ath9k_hif_usb_rx_stream(hif_dev, skb);
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+}
+
+static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct sk_buff *nskb;
+ struct hif_device_usb *hif_dev =
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+ /* Process the command first */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+
+ nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+ urb->context = NULL;
+ return;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
+ USB_REG_IN_PIPE),
+ nskb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
+ }
+
+resubmit:
+ usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+ urb->context = NULL;
+}
+
+static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+ unsigned long flags;
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+}
+
+static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf;
+ int i;
+
+ INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
+ INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
+ spin_lock_init(&hif_dev->tx.tx_lock);
+ __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+ init_usb_anchor(&hif_dev->mgmt_submitted);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+ tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+ tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
+ if (!tx_buf->buf)
+ goto err;
+
+ tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_buf->urb)
+ goto err;
+
+ tx_buf->hif_dev = hif_dev;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ }
+
+ hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
+
+ return 0;
+err:
+ if (tx_buf) {
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+}
+
+static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+ spin_lock_init(&hif_dev->rx_lock);
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+ ath9k_hif_usb_rx_cb, skb);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
+}
+
+static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->reg_in_submitted);
+
+ for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
+ USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, skb, 1);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
+ return ret;
+}
+
+static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
+{
+ /* Register Write */
+ init_usb_anchor(&hif_dev->regout_submitted);
+
+ /* TX */
+ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* RX */
+ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
+ goto err_rx;
+
+ /* Register Read */
+ if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
+ goto err_reg;
+
+ return 0;
+err_reg:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+err_rx:
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+err:
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+}
+
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+{
+ int transfer, err;
+ const void *data = hif_dev->fw_data;
+ size_t len = hif_dev->fw_size;
+ u32 addr = AR9271_FIRMWARE;
+ u8 *buf = kzalloc(4096, GFP_KERNEL);
+ u32 firm_offset;
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(size_t, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(hif_dev->udev,
+ usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, HZ);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
+ firm_offset = AR7010_FIRMWARE_TEXT;
+ else
+ firm_offset = AR9271_FIRMWARE_TEXT;
+
+ /*
+ * Issue FW download complete command to firmware.
+ */
+ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD_COMP,
+ 0x40 | USB_DIR_OUT,
+ firm_offset >> 8, 0, NULL, 0, HZ);
+ if (err)
+ return -EIO;
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
+ hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
+{
+ int ret;
+
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s download failed\n",
+ hif_dev->fw_name);
+ return ret;
+ }
+
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
+{
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+}
+
+/*
+ * If initialization fails or the FW cannot be retrieved,
+ * detach the device.
+ */
+static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
+{
+ struct device *dev = &hif_dev->udev->dev;
+ struct device *parent = dev->parent;
+
+ complete_all(&hif_dev->fw_done);
+
+ if (parent)
+ device_lock(parent);
+
+ device_release_driver(dev);
+
+ if (parent)
+ device_unlock(parent);
+}
+
+static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
+{
+ struct hif_device_usb *hif_dev = context;
+ int ret;
+
+ if (!fw) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Failed to get firmware %s\n",
+ hif_dev->fw_name);
+ goto err_fw;
+ }
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
+ &hif_dev->udev->dev);
+ if (hif_dev->htc_handle == NULL)
+ goto err_dev_alloc;
+
+ hif_dev->fw_data = fw->data;
+ hif_dev->fw_size = fw->size;
+
+ /* Proceed with initialization */
+
+ ret = ath9k_hif_usb_dev_init(hif_dev);
+ if (ret)
+ goto err_dev_init;
+
+ ret = ath9k_htc_hw_init(hif_dev->htc_handle,
+ &hif_dev->interface->dev,
+ hif_dev->usb_device_id->idProduct,
+ hif_dev->udev->product,
+ hif_dev->usb_device_id->driver_info);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ release_firmware(fw);
+ hif_dev->flags |= HIF_USB_READY;
+ complete_all(&hif_dev->fw_done);
+
+ return;
+
+err_htc_hw_init:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_dev_init:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+err_dev_alloc:
+ release_firmware(fw);
+err_fw:
+ ath9k_hif_usb_firmware_fail(hif_dev);
+}
+
+/*
+ * An exact copy of the function from zd1211rw.
+ */
+static int send_eject_command(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_endpoint_descriptor *endpoint;
+ unsigned char *cmd;
+ u8 bulk_out_ep;
+ int r;
+
+ /* Find bulk out endpoint */
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
+ dev_err(&udev->dev,
+ "ath9k_htc: Could not find bulk out endpoint\n");
+ return -ENODEV;
+ }
+
+ cmd = kzalloc(31, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENODEV;
+
+ /* USB bulk command block */
+ cmd[0] = 0x55; /* bulk command signature */
+ cmd[1] = 0x53; /* bulk command signature */
+ cmd[2] = 0x42; /* bulk command signature */
+ cmd[3] = 0x43; /* bulk command signature */
+ cmd[14] = 6; /* command length */
+
+ cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+ cmd[19] = 0x2; /* eject disc */
+
+ dev_info(&udev->dev, "Ejecting storage device...\n");
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+ cmd, 31, NULL, 2000);
+ kfree(cmd);
+ if (r)
+ return r;
+
+ /* At this point, the device disconnects and reconnects with the real
+ * ID numbers. */
+
+ usb_set_intfdata(interface, NULL);
+ return 0;
+}
+
+static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev;
+ int ret = 0;
+
+ if (id->driver_info == STORAGE_DEVICE)
+ return send_eject_command(interface);
+
+ hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
+ if (!hif_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ usb_get_dev(udev);
+
+ hif_dev->udev = udev;
+ hif_dev->interface = interface;
+ hif_dev->usb_device_id = id;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif
+ usb_set_intfdata(interface, hif_dev);
+
+ init_completion(&hif_dev->fw_done);
+
+ /* Find out which firmware to load */
+
+ if (IS_AR7010_DEVICE(id->driver_info))
+ hif_dev->fw_name = FIRMWARE_AR7010_1_1;
+ else
+ hif_dev->fw_name = FIRMWARE_AR9271;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
+ &hif_dev->udev->dev, GFP_KERNEL,
+ hif_dev, ath9k_hif_usb_firmware_cb);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Async request for firmware %s failed\n",
+ hif_dev->fw_name);
+ goto err_fw_req;
+ }
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
+ hif_dev->fw_name);
+
+ return 0;
+
+err_fw_req:
+ usb_set_intfdata(interface, NULL);
+ kfree(hif_dev);
+ usb_put_dev(udev);
+err_alloc:
+ return ret;
+}
+
+static void ath9k_hif_usb_reboot(struct usb_device *udev)
+{
+ u32 reboot_cmd = 0xffffffff;
+ void *buf;
+ int ret;
+
+ buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
+ buf, 4, NULL, HZ);
+ if (ret)
+ dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
+
+ kfree(buf);
+}
+
+static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+ bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
+
+ if (!hif_dev)
+ return;
+
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->flags & HIF_USB_READY) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ }
+
+ usb_set_intfdata(interface, NULL);
+
+ /* If firmware was loaded we should drop it
+ * go back to first stage bootloader. */
+ if (!unplugged && (hif_dev->flags & HIF_USB_READY))
+ ath9k_hif_usb_reboot(udev);
+
+ kfree(hif_dev);
+ dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int ath9k_hif_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+
+ /*
+ * The device has to be set to FULLSLEEP mode in case no
+ * interface is up.
+ */
+ if (!(hif_dev->flags & HIF_USB_START))
+ ath9k_htc_suspend(hif_dev->htc_handle);
+
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->flags & HIF_USB_READY)
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_resume(struct usb_interface *interface)
+{
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+ struct htc_target *htc_handle = hif_dev->htc_handle;
+ int ret;
+ const struct firmware *fw;
+
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret)
+ return ret;
+
+ if (hif_dev->flags & HIF_USB_READY) {
+ /* request cached firmware during suspend/resume cycle */
+ ret = request_firmware(&fw, hif_dev->fw_name,
+ &hif_dev->udev->dev);
+ if (ret)
+ goto fail_resume;
+
+ hif_dev->fw_data = fw->data;
+ hif_dev->fw_size = fw->size;
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ release_firmware(fw);
+ if (ret)
+ goto fail_resume;
+ } else {
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ ret = ath9k_htc_resume(htc_handle);
+
+ if (ret)
+ goto fail_resume;
+
+ return 0;
+
+fail_resume:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return ret;
+}
+#endif
+
+static struct usb_driver ath9k_hif_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = ath9k_hif_usb_probe,
+ .disconnect = ath9k_hif_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = ath9k_hif_usb_suspend,
+ .resume = ath9k_hif_usb_resume,
+ .reset_resume = ath9k_hif_usb_resume,
+#endif
+ .id_table = ath9k_hif_usb_ids,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+int ath9k_hif_usb_init(void)
+{
+ return usb_register(&ath9k_hif_usb_driver);
+}
+
+void ath9k_hif_usb_exit(void)
+{
+ usb_deregister(&ath9k_hif_usb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 000000000..51496e74b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_USB_H
+#define HTC_USB_H
+
+#define MAJOR_VERSION_REQ 1
+#define MINOR_VERSION_REQ 3
+
+#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
+
+#define AR9271_FIRMWARE 0x501000
+#define AR9271_FIRMWARE_TEXT 0x903000
+#define AR7010_FIRMWARE_TEXT 0x906000
+
+#define FIRMWARE_DOWNLOAD 0x30
+#define FIRMWARE_DOWNLOAD_COMP 0x31
+
+#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
+#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
+
+/* FIXME: Verify these numbers (with Windows) */
+#define MAX_TX_URB_NUM 8
+#define MAX_TX_BUF_NUM 256
+#define MAX_TX_BUF_SIZE 32768
+#define MAX_TX_AGGR_NUM 20
+
+#define MAX_RX_URB_NUM 8
+#define MAX_RX_BUF_SIZE 16384
+#define MAX_PKT_NUM_IN_TRANSFER 10
+
+#define MAX_REG_OUT_URB_NUM 1
+#define MAX_REG_IN_URB_NUM 64
+
+#define MAX_REG_IN_BUF_SIZE 64
+
+/* USB Endpoint definition */
+#define USB_WLAN_TX_PIPE 1
+#define USB_WLAN_RX_PIPE 2
+#define USB_REG_IN_PIPE 3
+#define USB_REG_OUT_PIPE 4
+
+#define HIF_USB_MAX_RXPIPES 2
+#define HIF_USB_MAX_TXPIPES 4
+
+struct tx_buf {
+ u8 *buf;
+ u16 len;
+ u16 offset;
+ struct urb *urb;
+ struct sk_buff_head skb_queue;
+ struct hif_device_usb *hif_dev;
+ struct list_head list;
+};
+
+#define HIF_USB_TX_STOP BIT(0)
+#define HIF_USB_TX_FLUSH BIT(1)
+
+struct hif_usb_tx {
+ u8 flags;
+ u8 tx_buf_cnt;
+ u16 tx_skb_cnt;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head tx_buf;
+ struct list_head tx_pending;
+ spinlock_t tx_lock;
+};
+
+struct cmd_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
+#define HIF_USB_START BIT(0)
+#define HIF_USB_READY BIT(1)
+
+struct hif_device_usb {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ const struct usb_device_id *usb_device_id;
+ const void *fw_data;
+ size_t fw_size;
+ struct completion fw_done;
+ struct htc_target *htc_handle;
+ struct hif_usb_tx tx;
+ struct usb_anchor regout_submitted;
+ struct usb_anchor rx_submitted;
+ struct usb_anchor reg_in_submitted;
+ struct usb_anchor mgmt_submitted;
+ struct sk_buff *remain_skb;
+ const char *fw_name;
+ int rx_remain_len;
+ int rx_pkt_len;
+ int rx_transfer_len;
+ int rx_pad_len;
+ spinlock_t rx_lock;
+ u8 flags; /* HIF_USB_* */
+};
+
+int ath9k_hif_usb_init(void);
+void ath9k_hif_usb_exit(void);
+
+#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 000000000..5dbc617ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "htc_hst.h"
+#include "hif_usb.h"
+#include "wmi.h"
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+extern struct ieee80211_ops ath9k_htc_ops;
+extern int htc_modparam_nohwcrypt;
+#ifdef CONFIG_MAC80211_LEDS
+extern int ath9k_htc_led_blink;
+#endif
+
+enum htc_phymode {
+ HTC_MODE_11NA = 0,
+ HTC_MODE_11NG = 1
+};
+
+enum htc_opmode {
+ HTC_M_STA = 1,
+ HTC_M_IBSS = 0,
+ HTC_M_AHDEMO = 3,
+ HTC_M_HOSTAP = 6,
+ HTC_M_MONITOR = 8,
+ HTC_M_WDS = 2
+};
+
+#define ATH9K_HTC_AMPDU 1
+#define ATH9K_HTC_NORMAL 2
+#define ATH9K_HTC_BEACON 3
+#define ATH9K_HTC_MGMT 4
+
+#define ATH9K_HTC_TX_CTSONLY 0x1
+#define ATH9K_HTC_TX_RTSCTS 0x2
+
+struct tx_frame_hdr {
+ u8 data_type;
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ __be32 flags; /* ATH9K_HTC_TX_* */
+ u8 key_type;
+ u8 keyix;
+ u8 cookie;
+ u8 pad;
+} __packed;
+
+struct tx_mgmt_hdr {
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u8 flags;
+ u8 key_type;
+ u8 keyix;
+ u8 cookie;
+ u8 pad;
+} __packed;
+
+struct tx_beacon_header {
+ u8 vif_index;
+ u8 len_changed;
+ u16 rev;
+} __packed;
+
+#define MAX_TX_AMPDU_SUBFRAMES_9271 17
+#define MAX_TX_AMPDU_SUBFRAMES_7010 22
+
+struct ath9k_htc_cap_target {
+ __be32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 enable_coex;
+ u8 tx_chainmask;
+ u8 pad;
+} __packed;
+
+struct ath9k_htc_target_vif {
+ u8 index;
+ u8 opmode;
+ u8 myaddr[ETH_ALEN];
+ u8 ath_cap;
+ __be16 rtsthreshold;
+ u8 pad;
+} __packed;
+
+struct ath9k_htc_target_sta {
+ u8 macaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 sta_index;
+ u8 vif_index;
+ u8 is_vif_sta;
+ __be16 flags;
+ __be16 htcap;
+ __be16 maxampdu;
+ u8 pad;
+} __packed;
+
+struct ath9k_htc_target_aggr {
+ u8 sta_index;
+ u8 tidno;
+ u8 aggr_enable;
+ u8 padding;
+} __packed;
+
+#define ATH_HTC_RATE_MAX 30
+
+#define WLAN_RC_DS_FLAG 0x01
+#define WLAN_RC_40_FLAG 0x02
+#define WLAN_RC_SGI_FLAG 0x04
+#define WLAN_RC_HT_FLAG 0x08
+#define ATH_RC_TX_STBC_FLAG 0x20
+
+struct ath9k_htc_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_HTC_RATE_MAX];
+};
+
+struct ath9k_htc_rate {
+ struct ath9k_htc_rateset legacy_rates;
+ struct ath9k_htc_rateset ht_rates;
+} __packed;
+
+struct ath9k_htc_target_rate {
+ u8 sta_index;
+ u8 isnew;
+ __be32 capflags;
+ struct ath9k_htc_rate rates;
+};
+
+struct ath9k_htc_target_rate_mask {
+ u8 vif_index;
+ u8 band;
+ __be32 mask;
+ u16 pad;
+} __packed;
+
+struct ath9k_htc_target_int_stats {
+ __be32 rx;
+ __be32 rxorn;
+ __be32 rxeol;
+ __be32 txurn;
+ __be32 txto;
+ __be32 cst;
+} __packed;
+
+struct ath9k_htc_target_tx_stats {
+ __be32 xretries;
+ __be32 fifoerr;
+ __be32 filtered;
+ __be32 timer_exp;
+ __be32 shortretries;
+ __be32 longretries;
+ __be32 qnull;
+ __be32 encap_fail;
+ __be32 nobuf;
+} __packed;
+
+struct ath9k_htc_target_rx_stats {
+ __be32 nobuf;
+ __be32 host_send;
+ __be32 host_done;
+} __packed;
+
+#define ATH9K_HTC_MAX_VIF 2
+#define ATH9K_HTC_MAX_BCN_VIF 2
+
+#define INC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif++; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif++; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif++; \
+ break; \
+ case NL80211_IFTYPE_MESH_POINT: \
+ _priv->num_mbss_vif++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif--; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif--; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif--; \
+ break; \
+ case NL80211_IFTYPE_MESH_POINT: \
+ _priv->num_mbss_vif--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+struct ath9k_htc_vif {
+ u8 index;
+ u16 seq_no;
+ bool beacon_configured;
+ int bslot;
+ __le64 tsfadjust;
+};
+
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr;
+ u8 mask[ETH_ALEN];
+};
+
+#define ATH9K_HTC_MAX_STA 8
+#define ATH9K_HTC_MAX_TID 8
+
+enum tid_aggr_state {
+ AGGR_STOP = 0,
+ AGGR_PROGRESS,
+ AGGR_START,
+ AGGR_OPERATIONAL
+};
+
+struct ath9k_htc_sta {
+ u8 index;
+ enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
+};
+
+#define ATH9K_HTC_RXBUF 256
+#define HTC_RX_FRAME_HEADER_SIZE 40
+
+struct ath9k_htc_rxbuf {
+ bool in_process;
+ struct sk_buff *skb;
+ struct ath_htc_rx_status rxstatus;
+ struct list_head list;
+};
+
+struct ath9k_htc_rx {
+ struct list_head rxbuf;
+ spinlock_t rxbuflock;
+};
+
+#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
+#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 3000 /* ms */
+#define ATH9K_HTC_TX_RESERVE 10
+#define ATH9K_HTC_TX_TIMEOUT_COUNT 40
+#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE)
+
+#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0)
+#define ATH9K_HTC_OP_TX_DRAIN BIT(1)
+
+struct ath9k_htc_tx {
+ u8 flags;
+ int queued_cnt;
+ struct sk_buff_head mgmt_ep_queue;
+ struct sk_buff_head cab_ep_queue;
+ struct sk_buff_head data_be_queue;
+ struct sk_buff_head data_bk_queue;
+ struct sk_buff_head data_vi_queue;
+ struct sk_buff_head data_vo_queue;
+ struct sk_buff_head tx_failed;
+ DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
+ struct timer_list cleanup_timer;
+ spinlock_t tx_lock;
+};
+
+struct ath9k_htc_tx_ctl {
+ u8 type; /* ATH9K_HTC_* */
+ u8 epid;
+ u8 txok;
+ u8 sta_idx;
+ unsigned long timestamp;
+};
+
+static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON(sizeof(struct ath9k_htc_tx_ctl) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath9k_htc_tx_ctl *) &tx_info->driver_data;
+}
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
+#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
+
+#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+
+void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
+ struct ath_rx_status *rs);
+
+struct ath_tx_stats {
+ u32 buf_queued;
+ u32 buf_completed;
+ u32 skb_queued;
+ u32 skb_success;
+ u32 skb_success_bytes;
+ u32 skb_failed;
+ u32 cab_queued;
+ u32 queue_stats[IEEE80211_NUM_ACS];
+};
+
+struct ath_skbrx_stats {
+ u32 skb_allocated;
+ u32 skb_completed;
+ u32 skb_completed_bytes;
+ u32 skb_dropped;
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ struct ath_tx_stats tx_stats;
+ struct ath_rx_stats rx_stats;
+ struct ath_skbrx_stats skbrx_stats;
+};
+
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+#else
+
+#define TX_STAT_INC(c) do { } while (0)
+#define TX_STAT_ADD(c, a) do { } while (0)
+#define RX_STAT_INC(c) do { } while (0)
+#define RX_STAT_ADD(c, a) do { } while (0)
+#define CAB_STAT_INC do { } while (0)
+
+#define TX_QSTAT_INC(c) do { } while (0)
+
+static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
+ struct ath_rx_status *rs)
+{
+}
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 10
+#define ATH_LED_PIN_9271 15
+#define ATH_LED_PIN_7010 12
+
+#define BSTUCK_THRESHOLD 10
+
+/*
+ * Adjust these when the max. no of beaconing interfaces is
+ * increased.
+ */
+#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
+#define MIN_SWBA_RESPONSE 10 /* in TUs */
+
+struct htc_beacon {
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } updateslot; /* slot time update fsm */
+
+ struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
+ u32 bmisscnt;
+ u32 beaconq;
+ int slottime;
+ int slotupdate;
+};
+
+struct ath_btcoex {
+ u32 bt_priority_cnt;
+ unsigned long bt_priority_time;
+ int bt_stomp_type; /* Types of BT stomping */
+ u32 btcoex_no_stomp;
+ u32 btcoex_period;
+ u32 btscan_no_stomp;
+};
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product);
+void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv);
+#else
+static inline void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
+{
+}
+static inline void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
+{
+}
+static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#define OP_BT_PRIORITY_DETECTED 3
+#define OP_BT_SCAN 4
+#define OP_TSF_RESET 6
+
+enum htc_op_flags {
+ HTC_FWFLAG_NO_RMW,
+};
+
+struct ath9k_htc_priv {
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_hw *ah;
+ struct htc_target *htc;
+ struct wmi *wmi;
+
+ u16 fw_version_major;
+ u16 fw_version_minor;
+
+ enum htc_endpoint_id wmi_cmd_ep;
+ enum htc_endpoint_id beacon_ep;
+ enum htc_endpoint_id cab_ep;
+ enum htc_endpoint_id uapsd_ep;
+ enum htc_endpoint_id mgmt_ep;
+ enum htc_endpoint_id data_be_ep;
+ enum htc_endpoint_id data_bk_ep;
+ enum htc_endpoint_id data_vi_ep;
+ enum htc_endpoint_id data_vo_ep;
+
+ u8 vif_slot;
+ u8 mon_vif_idx;
+ u8 sta_slot;
+ u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
+ u8 num_ibss_vif;
+ u8 num_mbss_vif;
+ u8 num_sta_vif;
+ u8 num_sta_assoc_vif;
+ u8 num_ap_vif;
+
+ u16 curtxpow;
+ u16 txpowlimit;
+ u16 nvifs;
+ u16 nstations;
+ bool rearm_ani;
+ bool reconfig_beacon;
+ unsigned int rxfilter;
+ unsigned long op_flags;
+ unsigned long fw_flags;
+
+ struct ath9k_hw_cal_data caldata;
+ struct ath_spec_scan_priv spec_priv;
+
+ spinlock_t beacon_lock;
+ struct ath_beacon_config cur_beacon_conf;
+ struct htc_beacon beacon;
+
+ struct ath9k_htc_rx rx;
+ struct ath9k_htc_tx tx;
+
+ struct tasklet_struct swba_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct delayed_work ani_work;
+ struct tasklet_struct tx_failed_tasklet;
+ struct work_struct ps_work;
+ struct work_struct fatal_work;
+
+ struct mutex htc_pm_lock;
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
+
+#ifdef CONFIG_MAC80211_LEDS
+ enum led_brightness brightness;
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+ struct work_struct led_work;
+#endif
+
+ int cabq;
+ int hwq_map[IEEE80211_NUM_ACS];
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ struct ath_btcoex btcoex;
+#endif
+
+ struct delayed_work coex_period_work;
+ struct delayed_work duty_cycle_work;
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct mutex mutex;
+};
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_htc_reset(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
+void ath9k_htc_swba(struct ath9k_htc_priv *priv,
+ struct wmi_event_swba *swba);
+
+void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id);
+void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
+ bool txok);
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok);
+
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
+ u8 enable_coex);
+void ath9k_htc_ani_work(struct work_struct *work);
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv);
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, u8 slot, bool is_cab);
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
+int get_hw_qnum(u16 queue, int *hwq_map);
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo);
+void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv);
+void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv);
+int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv);
+void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
+void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
+void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
+void ath9k_tx_failed_tasklet(unsigned long data);
+void ath9k_htc_tx_cleanup_timer(unsigned long data);
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_tasklet(unsigned long data);
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
+void ath9k_ps_work(struct work_struct *work);
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode);
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
+
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
+
+#ifdef CONFIG_MAC80211_LEDS
+void ath9k_configure_leds(struct ath9k_htc_priv *priv);
+void ath9k_init_leds(struct ath9k_htc_priv *priv);
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
+void ath9k_led_work(struct work_struct *work);
+#else
+static inline void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+}
+
+static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+}
+
+static inline void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+}
+
+static inline void ath9k_led_work(struct work_struct *work)
+{
+}
+#endif
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid, char *product, u32 drv_info);
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
+#ifdef CONFIG_PM
+void ath9k_htc_suspend(struct htc_target *htc_handle);
+int ath9k_htc_resume(struct htc_target *htc_handle);
+#endif
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv);
+#else
+static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+}
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 000000000..e8b6ec3c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define FUDGE 2
+
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath9k_tx_queue_info qi, qi_be;
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+ memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
+
+ ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
+
+ if (priv->ah->opmode == NL80211_IFTYPE_AP ||
+ priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+ } else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
+ int qnum = priv->hwq_map[IEEE80211_AC_BE];
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+
+ qi.tqi_aifs = qi_be.tqi_aifs;
+
+ /*
+ * For WIFI Beacon Distribution
+ * Long slot time : 2x cwmin
+ * Short slot time : 4x cwmin
+ */
+ if (ah->slottime == ATH9K_SLOT_TIME_20)
+ qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
+ else
+ qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+
+ qi.tqi_cwmax = qi_be.tqi_cwmax;
+
+ }
+
+ if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
+ ath_err(ath9k_hw_common(ah),
+ "Unable to update beacon queue %u!\n", priv->beacon.beaconq);
+ } else {
+ ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
+ }
+}
+
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf,
+ bool reset_tsf)
+{
+ struct ath_hw *ah = priv->ah;
+ int ret __attribute__ ((unused));
+ __be32 htc_imask = 0;
+ u8 cmd_rsp;
+
+ if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
+ ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
+ else
+ ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
+ ath9k_htc_beaconq_config(priv);
+ ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
+ priv->beacon.bmisscnt = 0;
+ htc_imask = cpu_to_be32(ah->imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *bss_conf)
+{
+ struct ath9k_beacon_state bs;
+ enum ath9k_int imask = 0;
+ __be32 htc_imask = 0;
+ int ret __attribute__ ((unused));
+ u8 cmd_rsp;
+
+ if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
+ return;
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
+
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
+ ath9k_htc_beacon_init(priv, conf, false);
+}
+
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
+
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
+ ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
+}
+
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
+ int slot)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_vif *vif;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ int padpos, padsize, ret, tx_slot;
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ vif = priv->beacon.bslot[slot];
+
+ skb = ieee80211_get_buffered_bc(priv->hw, vif);
+
+ while(skb) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ tx_slot = ath9k_htc_tx_get_slot(priv);
+ if (tx_slot < 0) {
+ ath_dbg(common, XMIT, "No free CAB slot\n");
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
+ if (ret != 0) {
+ ath9k_htc_tx_clear_slot(priv, tx_slot);
+ dev_kfree_skb_any(skb);
+
+ ath_dbg(common, XMIT, "Failed to send CAB frame\n");
+ } else {
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.queued_cnt++;
+ spin_unlock_bh(&priv->tx.tx_lock);
+ }
+ next:
+ skb = ieee80211_get_buffered_bc(priv->hw, vif);
+ }
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
+ int slot)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_vif *vif;
+ struct ath9k_htc_vif *avp;
+ struct tx_beacon_header beacon_hdr;
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *beacon;
+ u8 *tx_fhdr;
+ int ret;
+
+ memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ vif = priv->beacon.bslot[slot];
+ avp = (struct ath9k_htc_vif *)vif->drv_priv;
+
+ if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /* Get a new beacon */
+ beacon = ieee80211_beacon_get(priv->hw, vif);
+ if (!beacon) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /*
+ * Update the TSF adjust value here, the HW will
+ * add this value for every beacon.
+ */
+ mgmt = (struct ieee80211_mgmt *)beacon->data;
+ mgmt->u.beacon.timestamp = avp->tsfadjust;
+
+ info = IEEE80211_SKB_CB(beacon);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) beacon->data;
+ avp->seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+ }
+
+ tx_ctl = HTC_SKB_CB(beacon);
+ memset(tx_ctl, 0, sizeof(*tx_ctl));
+
+ tx_ctl->type = ATH9K_HTC_BEACON;
+ tx_ctl->epid = priv->beacon_ep;
+
+ beacon_hdr.vif_index = avp->index;
+ tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
+ memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
+
+ ret = htc_send(priv->htc, beacon);
+ if (ret != 0) {
+ if (ret == -ENOMEM) {
+ ath_dbg(common, BSTUCK,
+ "Failed to send beacon, no free TX buffer\n");
+ }
+ dev_kfree_skb_any(beacon);
+ }
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
+ struct wmi_event_swba *swba)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ u64 tsf;
+ u32 tsftu;
+ u16 intval;
+ int slot;
+
+ intval = priv->cur_beacon_conf.beacon_interval;
+
+ tsf = be64_to_cpu(swba->tsf);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf);
+ slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
+ slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
+
+ ath_dbg(common, BEACON,
+ "Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
+ slot, tsf, tsftu, intval);
+
+ return slot;
+}
+
+void ath9k_htc_swba(struct ath9k_htc_priv *priv,
+ struct wmi_event_swba *swba)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int slot;
+
+ if (swba->beacon_pending != 0) {
+ priv->beacon.bmisscnt++;
+ if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
+ ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
+ ieee80211_queue_work(priv->hw,
+ &priv->fatal_work);
+ }
+ return;
+ }
+
+ if (priv->beacon.bmisscnt) {
+ ath_dbg(common, BSTUCK,
+ "Resuming beacon xmit after %u misses\n",
+ priv->beacon.bmisscnt);
+ priv->beacon.bmisscnt = 0;
+ }
+
+ slot = ath9k_htc_choose_bslot(priv, swba);
+ spin_lock_bh(&priv->beacon_lock);
+ if (priv->beacon.bslot[slot] == NULL) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+ spin_unlock_bh(&priv->beacon_lock);
+
+ ath9k_htc_send_buffered(priv, slot);
+ ath9k_htc_send_beacon(priv, slot);
+}
+
+void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
+ int i = 0;
+
+ spin_lock_bh(&priv->beacon_lock);
+ for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
+ if (priv->beacon.bslot[i] == NULL) {
+ avp->bslot = i;
+ break;
+ }
+ }
+
+ priv->beacon.bslot[avp->bslot] = vif;
+ spin_unlock_bh(&priv->beacon_lock);
+
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->bslot);
+}
+
+void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
+
+ spin_lock_bh(&priv->beacon_lock);
+ priv->beacon.bslot[avp->bslot] = NULL;
+ spin_unlock_bh(&priv->beacon_lock);
+
+ ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
+ avp->bslot);
+}
+
+/*
+ * Calculate the TSF adjustment value for all slots
+ * other than zero.
+ */
+void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ u64 tsfadjust;
+
+ if (avp->bslot == 0)
+ return;
+
+ /*
+ * The beacon interval cannot be different for multi-AP mode,
+ * and we reach here only for VIF slots greater than zero,
+ * so beacon_interval is guaranteed to be set in cur_conf.
+ */
+ tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
+ avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
+ (unsigned long long)tsfadjust, avp->bslot);
+}
+
+static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *beacon_configured = (bool *)data;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ avp->beacon_configured)
+ *beacon_configured = true;
+}
+
+static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ bool beacon_configured;
+
+ /*
+ * Changing the beacon interval when multiple AP interfaces
+ * are configured will affect beacon transmission of all
+ * of them.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ (priv->num_ap_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_AP) &&
+ (cur_conf->beacon_interval != bss_conf->beacon_int)) {
+ ath_dbg(common, CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
+ return false;
+ }
+
+ /*
+ * If the HW is operating in AP mode, any new station interfaces that
+ * are added cannot change the beacon parameters.
+ */
+ if (priv->num_ap_vif &&
+ (vif->type != NL80211_IFTYPE_AP)) {
+ ath_dbg(common, CONFIG,
+ "HW in AP mode, cannot set STA beacon parameters\n");
+ return false;
+ }
+
+ /*
+ * The beacon parameters are configured only for the first
+ * station interface.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->num_sta_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_STATION)) {
+ beacon_configured = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_beacon_iter, &beacon_configured);
+
+ if (beacon_configured) {
+ ath_dbg(common, CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (!ath9k_htc_check_beacon_config(priv, vif))
+ return;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ avp->beacon_configured = true;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
+ default:
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
+ return;
+ }
+}
+
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
+
+ switch (priv->ah->opmode) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
+ default:
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
new file mode 100644
index 000000000..dc79afd7e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -0,0 +1,524 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ struct ath9k_htc_target_int_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ ath9k_htc_ps_wakeup(priv);
+
+ WMI_CMD(WMI_INT_STATS_CMDID);
+ if (ret) {
+ ath9k_htc_ps_restore(priv);
+ return -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RX",
+ be32_to_cpu(cmd_rsp.rx));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXORN",
+ be32_to_cpu(cmd_rsp.rxorn));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "RXEOL",
+ be32_to_cpu(cmd_rsp.rxeol));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXURN",
+ be32_to_cpu(cmd_rsp.txurn));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TXTO",
+ be32_to_cpu(cmd_rsp.txto));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CST",
+ be32_to_cpu(cmd_rsp.cst));
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_int_stats = {
+ .read = read_file_tgt_int_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ struct ath9k_htc_target_tx_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ ath9k_htc_ps_wakeup(priv);
+
+ WMI_CMD(WMI_TX_STATS_CMDID);
+ if (ret) {
+ ath9k_htc_ps_restore(priv);
+ return -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Xretries",
+ be32_to_cpu(cmd_rsp.xretries));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "FifoErr",
+ be32_to_cpu(cmd_rsp.fifoerr));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Filtered",
+ be32_to_cpu(cmd_rsp.filtered));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "TimerExp",
+ be32_to_cpu(cmd_rsp.timer_exp));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "ShortRetries",
+ be32_to_cpu(cmd_rsp.shortretries));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "LongRetries",
+ be32_to_cpu(cmd_rsp.longretries));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "QueueNull",
+ be32_to_cpu(cmd_rsp.qnull));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "EncapFail",
+ be32_to_cpu(cmd_rsp.encap_fail));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_tx_stats = {
+ .read = read_file_tgt_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ struct ath9k_htc_target_rx_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ ath9k_htc_ps_wakeup(priv);
+
+ WMI_CMD(WMI_RX_STATS_CMDID);
+ if (ret) {
+ ath9k_htc_ps_restore(priv);
+ return -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "NoBuf",
+ be32_to_cpu(cmd_rsp.nobuf));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostSend",
+ be32_to_cpu(cmd_rsp.host_send));
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "HostDone",
+ be32_to_cpu(cmd_rsp.host_done));
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_rx_stats = {
+ .read = read_file_tgt_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs success",
+ priv->debug.tx_stats.skb_success);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs failed",
+ priv->debug.tx_stats.skb_failed);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "CAB queued",
+ priv->debug.tx_stats.cab_queued);
+
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_xmit = {
+ .read = read_file_xmit,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
+ struct ath_rx_status *rs)
+{
+ ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
+}
+
+static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1500;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.skbrx_stats.skb_allocated);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.skbrx_stats.skb_completed);
+ len += scnprintf(buf + len, size - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.skbrx_stats.skb_dropped);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_skb_rx = {
+ .read = read_file_skb_rx,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_slot(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ char buf[512];
+ unsigned int len;
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ len = scnprintf(buf, sizeof(buf),
+ "TX slot bitmap : %*pb\n"
+ "Used slots : %d\n",
+ MAX_TX_BUF_NUM, priv->tx.tx_slot,
+ bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+ spin_unlock_bh(&priv->tx.tx_lock);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_slot = {
+ .read = read_file_slot,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_queue(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+ "Queued count", priv->tx.queued_cnt);
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+}
+
+static const struct file_operations fops_queue = {
+ .read = read_file_queue,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_debug(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "0x%08x\n", common->debug_mask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv = file->private_data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->debug_mask = mask;
+ return count;
+}
+
+static const struct file_operations fops_debug = {
+ .read = read_file_debug,
+ .write = write_file_debug,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* Ethtool support for get-stats */
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+
+ AMKSTR(d_tx_pkts),
+
+ "d_rx_crc_err",
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats)
+
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_htc_gstrings_stats,
+ sizeof(ath9k_htc_gstrings_stats));
+}
+
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_HTC_SSTATS_LEN;
+ return 0;
+}
+
+#define STXBASE priv->debug.tx_stats
+#define SRXBASE priv->debug.rx_stats
+#define SKBTXBASE priv->debug.tx_stats
+#define SKBRXBASE priv->debug.skbrx_stats
+#define ASTXQ(a) \
+ data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_VI]; \
+ data[i++] = STXBASE.a[IEEE80211_AC_VO]
+
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int i = 0;
+
+ data[i++] = SKBTXBASE.skb_success;
+ data[i++] = SKBTXBASE.skb_success_bytes;
+ data[i++] = SKBRXBASE.skb_completed;
+ data[i++] = SKBRXBASE.skb_completed_bytes;
+
+ ASTXQ(queue_stats);
+
+ data[i++] = SRXBASE.crc_err;
+ data[i++] = SRXBASE.decrypt_crc_err;
+ data[i++] = SRXBASE.phy_err;
+ data[i++] = SRXBASE.mic_err;
+ data[i++] = SRXBASE.pre_delim_crc_err;
+ data[i++] = SRXBASE.post_delim_crc_err;
+ data[i++] = SRXBASE.decrypt_busy_err;
+
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
+
+ WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
+}
+
+void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
+{
+ ath9k_cmn_spectral_deinit_debug(&priv->spec_priv);
+}
+
+int ath9k_htc_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
+ priv->hw->wiphy->debugfsdir);
+ if (!priv->debug.debugfs_phy)
+ return -ENOMEM;
+
+ ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+
+ debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_tgt_int_stats);
+ debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_tgt_tx_stats);
+ debugfs_create_file("tgt_rx_stats", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_tgt_rx_stats);
+ debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_xmit);
+ debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_skb_rx);
+
+ ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+ ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+
+ debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_slot);
+ debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_queue);
+ debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
+ priv, &fops_debug);
+
+ ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
+ ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
new file mode 100644
index 000000000..2aabcbdab
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******************/
+/* BTCOEX */
+/******************/
+
+#define ATH_HTC_BTCOEX_PRODUCT_ID "wb193"
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
+{
+ struct ath_btcoex *btcoex = &priv->btcoex;
+ struct ath_hw *ah = priv->ah;
+
+ if (ath9k_hw_gpio_get(ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
+ "BT scan detected\n");
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ set_bit(OP_BT_SCAN, &priv->op_flags);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
+ "BT priority traffic detected\n");
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * This is the master bt coex work which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ coex_period_work.work);
+ struct ath_btcoex *btcoex = &priv->btcoex;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ u32 timer_period;
+ int ret;
+
+ ath_detect_bt_priority(priv);
+
+ ret = ath9k_htc_update_cap_target(priv,
+ test_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags));
+ if (ret) {
+ ath_err(common, "Unable to set BTCOEX parameters\n");
+ return;
+ }
+
+ ath9k_hw_btcoex_bt_stomp(priv->ah, test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type);
+
+ ath9k_hw_btcoex_enable(priv->ah);
+ timer_period = test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp;
+ ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
+ msecs_to_jiffies(timer_period));
+ ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
+ msecs_to_jiffies(btcoex->btcoex_period));
+}
+
+/*
+ * Work to time slice between wlan and bt traffic and
+ * configure weight registers
+ */
+static void ath_btcoex_duty_cycle_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ duty_cycle_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_btcoex *btcoex = &priv->btcoex;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ test_bit(OP_BT_SCAN, &priv->op_flags))
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+
+ ath9k_hw_btcoex_enable(priv->ah);
+}
+
+static void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
+{
+ struct ath_btcoex *btcoex = &priv->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ INIT_DELAYED_WORK(&priv->coex_period_work, ath_btcoex_period_work);
+ INIT_DELAYED_WORK(&priv->duty_cycle_work, ath_btcoex_duty_cycle_work);
+}
+
+/*
+ * (Re)start btcoex work
+ */
+
+static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
+{
+ struct ath_btcoex *btcoex = &priv->btcoex;
+ struct ath_hw *ah = priv->ah;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
+ ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
+}
+
+
+/*
+ * Cancel btcoex and bt duty cycle work.
+ */
+static void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->coex_period_work);
+ cancel_delayed_work_sync(&priv->duty_cycle_work);
+}
+
+void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT, 0);
+ ath9k_hw_btcoex_enable(ah);
+ ath_htc_resume_btcoex_work(priv);
+ }
+}
+
+void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+ if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_htc_cancel_btcoex_work(priv);
+ ath9k_hw_btcoex_disable(ah);
+ }
+}
+
+void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int qnum;
+
+ /*
+ * Check if BTCOEX is globally disabled.
+ */
+ if (!common->btcoex_enabled) {
+ ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
+ return;
+ }
+
+ if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
+ ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
+ }
+
+ switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ priv->ah->btcoex_hw.btactive_gpio = 7;
+ priv->ah->btcoex_hw.btpriority_gpio = 6;
+ priv->ah->btcoex_hw.wlanactive_gpio = 8;
+ priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ ath9k_hw_btcoex_init_3wire(priv->ah);
+ ath_htc_init_btcoex_work(priv);
+ qnum = priv->hwq_map[IEEE80211_AC_BE];
+ ath9k_hw_init_btcoex_hw(priv->ah, qnum);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+/*******/
+/* LED */
+/*******/
+
+#ifdef CONFIG_MAC80211_LEDS
+void ath9k_led_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work,
+ struct ath9k_htc_priv,
+ led_work);
+
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->brightness == LED_OFF));
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath9k_htc_priv *priv = container_of(led_cdev,
+ struct ath9k_htc_priv,
+ led_cdev);
+
+ /* Not locked, but it's just a tiny green light..*/
+ priv->brightness = brightness;
+ ieee80211_queue_work(priv->hw, &priv->led_work);
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ if (!priv->led_registered)
+ return;
+
+ ath9k_led_brightness(&priv->led_cdev, LED_OFF);
+ led_classdev_unregister(&priv->led_cdev);
+ cancel_work_sync(&priv->led_work);
+}
+
+
+void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else if (AR_DEVID_7010(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_7010;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ if (!ath9k_htc_led_blink)
+ priv->led_cdev.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+
+ ath9k_configure_leds(priv);
+
+ snprintf(priv->led_name, sizeof(priv->led_name),
+ "ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
+ priv->led_cdev.name = priv->led_name;
+ priv->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &priv->led_cdev);
+ if (ret < 0)
+ return;
+
+ INIT_WORK(&priv->led_work, ath9k_led_work);
+ priv->led_registered = true;
+
+ return;
+}
+#endif
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ bool is_blocked;
+
+ ath9k_htc_ps_wakeup(priv);
+ is_blocked = ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+ ath9k_htc_ps_restore(priv);
+
+ return is_blocked;
+}
+
+void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 000000000..d7beefe60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,1024 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "htc.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int htc_modparam_nohwcrypt;
+module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+static int ath9k_htc_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
+#ifdef CONFIG_MAC80211_LEDS
+int ath9k_htc_led_blink = 1;
+module_param_named(blink, ath9k_htc_led_blink, int, 0444);
+MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+
+static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
+static void ath9k_htc_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_htc_ps_wakeup((struct ath9k_htc_priv *) common->priv);
+}
+
+static void ath9k_htc_op_ps_restore(struct ath_common *common)
+{
+ ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_htc_ps_ops = {
+ .wakeup = ath9k_htc_op_ps_wakeup,
+ .restore = ath9k_htc_op_ps_restore,
+};
+
+static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
+{
+ int time_left;
+
+ if (atomic_read(&priv->htc->tgt_ready) > 0) {
+ atomic_dec(&priv->htc->tgt_ready);
+ return 0;
+ }
+
+ /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
+ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
+ if (!time_left) {
+ dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
+ return -ETIMEDOUT;
+ }
+
+ atomic_dec(&priv->htc->tgt_ready);
+
+ return 0;
+}
+
+static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
+{
+ ath9k_hw_deinit(priv->ah);
+ kfree(priv->ah);
+ priv->ah = NULL;
+}
+
+static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+
+ wiphy_rfkill_stop_polling(hw->wiphy);
+ ath9k_deinit_leds(priv);
+ ath9k_htc_deinit_debug(priv);
+ ieee80211_unregister_hw(hw);
+ ath9k_rx_cleanup(priv);
+ ath9k_tx_cleanup(priv);
+ ath9k_deinit_priv(priv);
+}
+
+static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
+ u16 service_id,
+ void (*tx) (void *,
+ struct sk_buff *,
+ enum htc_endpoint_id,
+ bool txok),
+ enum htc_endpoint_id *ep_id)
+{
+ struct htc_service_connreq req;
+
+ memset(&req, 0, sizeof(struct htc_service_connreq));
+
+ req.service_id = service_id;
+ req.ep_callbacks.priv = priv;
+ req.ep_callbacks.rx = ath9k_htc_rxep;
+ req.ep_callbacks.tx = tx;
+
+ return htc_connect_service(priv->htc, &req, ep_id);
+}
+
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
+ u32 drv_info)
+{
+ int ret;
+
+ /* WMI CMD*/
+ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
+ if (ret)
+ goto err;
+
+ /* Beacon */
+ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
+ &priv->beacon_ep);
+ if (ret)
+ goto err;
+
+ /* CAB */
+ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
+ &priv->cab_ep);
+ if (ret)
+ goto err;
+
+
+ /* UAPSD */
+ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
+ &priv->uapsd_ep);
+ if (ret)
+ goto err;
+
+ /* MGMT */
+ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
+ &priv->mgmt_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BE */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
+ &priv->data_be_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BK */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
+ &priv->data_bk_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VI */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
+ &priv->data_vi_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VO */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
+ &priv->data_vo_ep);
+ if (ret)
+ goto err;
+
+ /*
+ * Setup required credits before initializing HTC.
+ * This is a bit hacky, but, since queuing is done in
+ * the HIF layer, shouldn't matter much.
+ */
+
+ if (IS_AR7010_DEVICE(drv_info))
+ priv->htc->credits = 45;
+ else
+ priv->htc->credits = 33;
+
+ ret = htc_init(priv->htc);
+ if (ret)
+ goto err;
+
+ dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n",
+ priv->htc->credits);
+
+ return 0;
+
+err:
+ dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
+ return ret;
+}
+
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
+}
+
+static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 val, reg = cpu_to_be32(reg_offset);
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *) &reg, sizeof(reg),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
+ return -EIO;
+ }
+
+ return be32_to_cpu(val);
+}
+
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 tmpaddr[8];
+ __be32 tmpval[8];
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *)tmpaddr , sizeof(u32) * count,
+ (u8 *)tmpval, sizeof(u32) * count,
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, WMI,
+ "Multiple REGISTER READ FAILED (count: %d)\n", count);
+ }
+
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
+}
+
+static void ath9k_regwrite_multi(struct ath_common *common)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+}
+
+static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ const __be32 buf[2] = {
+ cpu_to_be32(reg_offset),
+ cpu_to_be32(val),
+ };
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
+ }
+}
+
+static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
+ cpu_to_be32(val);
+
+ priv->wmi->multi_write_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER)
+ ath9k_regwrite_multi(common);
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (atomic_read(&priv->wmi->mwrite_cnt))
+ ath9k_regwrite_buffer(hw_priv, val, reg_offset);
+ else
+ ath9k_regwrite_single(hw_priv, val, reg_offset);
+}
+
+static void ath9k_enable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_inc(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_regwrite_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_dec(&priv->wmi->mwrite_cnt);
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ if (priv->wmi->multi_write_idx)
+ ath9k_regwrite_multi(common);
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_reg_rmw_buffer(void *hw_priv,
+ u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
+ cpu_to_be32(set);
+ priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
+ cpu_to_be32(clr);
+
+ priv->wmi->multi_rmw_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &priv->wmi->multi_rmw,
+ sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER RMW FAILED, multi len: %d\n",
+ priv->wmi->multi_rmw_idx);
+ }
+ priv->wmi->multi_rmw_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_reg_rmw_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+ return;
+
+ atomic_dec(&priv->wmi->m_rmw_cnt);
+
+ mutex_lock(&priv->wmi->multi_rmw_mutex);
+
+ if (priv->wmi->multi_rmw_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &priv->wmi->multi_rmw,
+ sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER RMW FAILED, multi len: %d\n",
+ priv->wmi->multi_rmw_idx);
+ }
+ priv->wmi->multi_rmw_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_rmw_mutex);
+}
+
+static void ath9k_enable_rmw_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
+ return;
+
+ atomic_inc(&priv->wmi->m_rmw_cnt);
+}
+
+static u32 ath9k_reg_rmw_single(void *hw_priv,
+ u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct register_rmw buf, buf_ret;
+ int ret;
+ u32 val = 0;
+
+ buf.reg = cpu_to_be32(reg_offset);
+ buf.set = cpu_to_be32(set);
+ buf.clr = cpu_to_be32(clr);
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &buf_ret, sizeof(buf_ret),
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
+ reg_offset, ret);
+ }
+ return val;
+}
+
+static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
+ u32 val;
+
+ val = REG_READ(ah, reg_offset);
+ val &= ~clr;
+ val |= set;
+ REG_WRITE(ah, reg_offset, val);
+
+ return 0;
+ }
+
+ if (atomic_read(&priv->wmi->m_rmw_cnt))
+ ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
+ else
+ ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
+
+ return 0;
+}
+
+static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT))
+ return false;
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static const struct ath_bus_ops ath9k_usb_bus_ops = {
+ .ath_bus_type = ATH_USB,
+ .read_cachesize = ath_usb_read_cachesize,
+ .eeprom_read = ath_usb_eeprom_read,
+};
+
+static int ath9k_init_queues(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
+ priv->hwq_map[i] = -1;
+
+ priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
+ if (priv->beacon.beaconq == -1) {
+ ath_err(common, "Unable to setup BEACON xmit queue\n");
+ goto err;
+ }
+
+ priv->cabq = ath9k_htc_cabq_setup(priv);
+ if (priv->cabq == -1) {
+ ath_err(common, "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BE)) {
+ ath_err(common, "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BK)) {
+ ath_err(common, "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VI)) {
+ ath_err(common, "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VO)) {
+ ath_err(common, "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static void ath9k_init_misc(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ priv->spec_priv.ah = priv->ah;
+ priv->spec_priv.spec_config.enabled = 0;
+ priv->spec_priv.spec_config.short_repeat = false;
+ priv->spec_priv.spec_config.count = 8;
+ priv->spec_priv.spec_config.endless = false;
+ priv->spec_priv.spec_config.period = 0x12;
+ priv->spec_priv.spec_config.fft_period = 0x02;
+}
+
+static int ath9k_init_priv(struct ath9k_htc_priv *priv,
+ u16 devid, char *product,
+ u32 drv_info)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int i, ret = 0, csz = 0;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->dev = priv->dev;
+ ah->hw = priv->hw;
+ ah->hw_version.devid = devid;
+ ah->hw_version.usbdev = drv_info;
+ ah->ah_flags |= AH_USE_EEPROM;
+ ah->reg_ops.read = ath9k_regread;
+ ah->reg_ops.multi_read = ath9k_multi_regread;
+ ah->reg_ops.write = ath9k_regwrite;
+ ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
+ ah->reg_ops.write_flush = ath9k_regwrite_flush;
+ ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
+ ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
+ ah->reg_ops.rmw = ath9k_reg_rmw;
+ priv->ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ah->reg_ops;
+ common->ps_ops = &ath9k_htc_ps_ops;
+ common->bus_ops = &ath9k_usb_bus_ops;
+ common->ah = ah;
+ common->hw = priv->hw;
+ common->priv = priv;
+ common->debug_mask = ath9k_debug;
+ common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ spin_lock_init(&priv->beacon_lock);
+ spin_lock_init(&priv->tx.tx_lock);
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->htc_pm_lock);
+ tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet,
+ (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
+ INIT_WORK(&priv->ps_work, ath9k_ps_work);
+ INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
+ setup_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer,
+ (unsigned long)priv);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_err(common,
+ "Unable to initialize hardware; initialization status: %d\n",
+ ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_queues(priv);
+ if (ret)
+ goto err_queues;
+
+ for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
+ priv->beacon.bslot[i] = NULL;
+ priv->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ ath9k_cmn_init_channels_rates(common);
+ ath9k_cmn_init_crypto(ah);
+ ath9k_init_misc(priv);
+ ath9k_htc_init_btcoex(priv, product);
+
+ return 0;
+
+err_queues:
+ ath9k_hw_deinit(ah);
+err_hw:
+
+ kfree(ah);
+ priv->ah = NULL;
+
+ return ret;
+}
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) },
+ { .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+};
+
+static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct base_eep_header *pBase;
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
+ hw->queues = 4;
+ hw->max_listen_interval = 1;
+
+ hw->vif_data_size = sizeof(struct ath9k_htc_vif);
+ hw->sta_data_size = sizeof(struct ath9k_htc_sta);
+
+ /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
+ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
+ sizeof(struct htc_frame_hdr) + 4;
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &common->sbands[IEEE80211_BAND_2GHZ];
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &common->sbands[IEEE80211_BAND_5GHZ];
+
+ ath9k_cmn_reload_chainmask(ah);
+
+ pBase = ath9k_htc_get_eeprom_base(priv);
+ if (pBase) {
+ hw->wiphy->available_antennas_rx = pBase->rxMask;
+ hw->wiphy->available_antennas_tx = pBase->txMask;
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct wmi_fw_version cmd_rsp;
+ int ret;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_GET_FW_VERSION);
+ if (ret)
+ return -EINVAL;
+
+ priv->fw_version_major = be16_to_cpu(cmd_rsp.major);
+ priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor);
+
+ snprintf(hw->wiphy->fw_version, sizeof(hw->wiphy->fw_version), "%d.%d",
+ priv->fw_version_major,
+ priv->fw_version_minor);
+
+ dev_info(priv->dev, "ath9k_htc: FW Version: %d.%d\n",
+ priv->fw_version_major,
+ priv->fw_version_minor);
+
+ /*
+ * Check if the available FW matches the driver's
+ * required version.
+ */
+ if (priv->fw_version_major != MAJOR_VERSION_REQ ||
+ priv->fw_version_minor < MINOR_VERSION_REQ) {
+ dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
+ MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
+ return -EINVAL;
+ }
+
+ if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
+ set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
+
+ dev_info(priv->dev, "FW RMW support: %s\n",
+ test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
+
+ return 0;
+}
+
+static int ath9k_init_device(struct ath9k_htc_priv *priv,
+ u16 devid, char *product, u32 drv_info)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+ char hw_name[64];
+
+ /* Bring up device */
+ error = ath9k_init_priv(priv, devid, product, drv_info);
+ if (error != 0)
+ goto err_init;
+
+ ah = priv->ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(priv, hw);
+
+ error = ath9k_init_firmware_version(priv);
+ if (error != 0)
+ goto err_fw;
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto err_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX */
+ error = ath9k_tx_init(priv);
+ if (error != 0)
+ goto err_tx;
+
+ /* Setup RX */
+ error = ath9k_rx_init(priv);
+ if (error != 0)
+ goto err_rx;
+
+ ath9k_hw_disable(priv->ah);
+#ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_htc_tpt_blink,
+ ARRAY_SIZE(ath9k_htc_tpt_blink));
+#endif
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto err_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto err_world;
+ }
+
+ error = ath9k_htc_init_debug(priv->ah);
+ if (error) {
+ ath_err(common, "Unable to create debugfs files\n");
+ goto err_world;
+ }
+
+ ath_dbg(common, CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n",
+ priv->wmi_cmd_ep,
+ priv->beacon_ep,
+ priv->cab_ep,
+ priv->uapsd_ep,
+ priv->mgmt_ep,
+ priv->data_be_ep,
+ priv->data_bk_ep,
+ priv->data_vi_ep,
+ priv->data_vo_ep);
+
+ ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
+ wiphy_info(hw->wiphy, "%s\n", hw_name);
+
+ ath9k_init_leds(priv);
+ ath9k_start_rfkill_poll(priv);
+
+ return 0;
+
+err_world:
+ ieee80211_unregister_hw(hw);
+err_register:
+ ath9k_rx_cleanup(priv);
+err_rx:
+ ath9k_tx_cleanup(priv);
+err_tx:
+ /* Nothing */
+err_regd:
+ /* Nothing */
+err_fw:
+ ath9k_deinit_priv(priv);
+err_init:
+ return error;
+}
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid, char *product, u32 drv_info)
+{
+ struct ieee80211_hw *hw;
+ struct ath9k_htc_priv *priv;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->htc = htc_handle;
+ priv->dev = dev;
+ htc_handle->drv_priv = priv;
+ SET_IEEE80211_DEV(hw, priv->dev);
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ goto err_free;
+
+ priv->wmi = ath9k_init_wmi(priv);
+ if (!priv->wmi) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = ath9k_init_htc_services(priv, devid, drv_info);
+ if (ret)
+ goto err_init;
+
+ ret = ath9k_init_device(priv, devid, product, drv_info);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ ath9k_deinit_wmi(priv);
+err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+{
+ if (htc_handle->drv_priv) {
+
+ /* Check if the device has been yanked out. */
+ if (hotunplug)
+ htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+ ath9k_deinit_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+}
+
+#ifdef CONFIG_PM
+
+void ath9k_htc_suspend(struct htc_target *htc_handle)
+{
+ ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP);
+}
+
+int ath9k_htc_resume(struct htc_target *htc_handle)
+{
+ struct ath9k_htc_priv *priv = htc_handle->drv_priv;
+ int ret;
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ return ret;
+
+ ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
+ priv->ah->hw_version.usbdev);
+ ath9k_configure_leds(priv);
+
+ return ret;
+}
+#endif
+
+static int __init ath9k_htc_init(void)
+{
+ if (ath9k_hif_usb_init() < 0) {
+ pr_err("No USB devices found, driver not installed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(ath9k_htc_init);
+
+static void __exit ath9k_htc_exit(void)
+{
+ ath9k_hif_usb_exit();
+ pr_info("Driver unloaded\n");
+}
+module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 000000000..564923c0d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1877 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/*************/
+/* Utilities */
+/*************/
+
+/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
+static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
+ struct ath9k_channel *ichan)
+{
+ if (IS_CHAN_5GHZ(ichan))
+ return HTC_MODE_11NA;
+
+ return HTC_MODE_11NG;
+}
+
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
+{
+ bool ret;
+
+ mutex_lock(&priv->htc_pm_lock);
+ ret = ath9k_hw_setpower(priv->ah, mode);
+ mutex_unlock(&priv->htc_pm_lock);
+
+ return ret;
+}
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (++priv->ps_usecount != 1)
+ goto unlock;
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
+{
+ bool reset;
+
+ mutex_lock(&priv->htc_pm_lock);
+ if (--priv->ps_usecount != 0)
+ goto unlock;
+
+ if (priv->ps_idle) {
+ ath9k_hw_setrxabort(priv->ah, true);
+ ath9k_hw_stopdmarecv(priv->ah, &reset);
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
+ } else if (priv->ps_enabled) {
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+ }
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_ps_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+
+ /* The chip wakes up after receiving the first beacon
+ while network sleep is enabled. For the driver to
+ be in sync with the hw, set the chip to awake and
+ only then set it to sleep.
+ */
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+}
+
+static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = data;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if ((vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ bss_conf->enable_beacon) {
+ priv->reconfig_beacon = true;
+ priv->rearm_ani = true;
+ }
+
+ if (bss_conf->assoc) {
+ priv->rearm_ani = true;
+ priv->reconfig_beacon = true;
+ }
+}
+
+static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
+{
+ priv->rearm_ani = false;
+ priv->reconfig_beacon = false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_vif_iter, priv);
+ if (priv->rearm_ani)
+ ath9k_htc_start_ani(priv);
+
+ if (priv->reconfig_beacon) {
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_beacon_reconfig(priv);
+ ath9k_htc_ps_restore(priv);
+ }
+}
+
+static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ if (iter_data->hw_macaddr != NULL) {
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ } else {
+ iter_data->hw_macaddr = mac;
+ }
+}
+
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
+ */
+ iter_data.hw_macaddr = NULL;
+ eth_broadcast_addr(iter_data.mask);
+
+ if (vif)
+ ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
+
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_bssid_iter, &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+ if (iter_data.hw_macaddr)
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
+ ath_hw_setbssidmask(common);
+}
+
+static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
+{
+ if (priv->num_ibss_vif)
+ priv->ah->opmode = NL80211_IFTYPE_ADHOC;
+ else if (priv->num_ap_vif)
+ priv->ah->opmode = NL80211_IFTYPE_AP;
+ else if (priv->num_mbss_vif)
+ priv->ah->opmode = NL80211_IFTYPE_MESH_POINT;
+ else
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ ath9k_hw_setopmode(priv->ah);
+}
+
+void ath9k_htc_reset(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *channel = priv->hw->conf.chandef.chan;
+ struct ath9k_hw_cal_data *caldata = NULL;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ ath9k_htc_stop_ani(priv);
+ ieee80211_stop_queues(priv->hw);
+
+ del_timer_sync(&priv->tx.cleanup_timer);
+ ath9k_htc_tx_drain(priv);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath9k_wmi_event_drain(priv);
+
+ caldata = &priv->caldata;
+ ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset device (%u Mhz) reset status %d\n",
+ channel->center_freq, ret);
+ }
+
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, ah->curchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ htc_start(priv->htc);
+ ath9k_htc_vif_reconfig(priv);
+ ieee80211_wake_queues(priv->hw);
+
+ mod_timer(&priv->tx.cleanup_timer,
+ jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ bool fastcc;
+ struct ieee80211_channel *channel = hw->conf.chandef.chan;
+ struct ath9k_hw_cal_data *caldata = NULL;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
+ return -EIO;
+
+ fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+ ath9k_htc_ps_wakeup(priv);
+
+ ath9k_htc_stop_ani(priv);
+ del_timer_sync(&priv->tx.cleanup_timer);
+ ath9k_htc_tx_drain(priv);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath9k_wmi_event_drain(priv);
+
+ ath_dbg(common, CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
+ fastcc);
+
+ if (!fastcc)
+ caldata = &priv->caldata;
+
+ ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset channel (%u Mhz) reset status %d\n",
+ channel->center_freq, ret);
+ goto err;
+ }
+
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ if (ret)
+ goto err;
+
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, hchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ if (ret)
+ goto err;
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ if (ret)
+ goto err;
+
+ htc_start(priv->htc);
+
+ if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+ !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ ath9k_htc_vif_reconfig(priv);
+
+ mod_timer(&priv->tx.cleanup_timer,
+ jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+
+ /* perform spectral scan if requested. */
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+ priv->spec_priv.spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_cmn_spectral_scan_trigger(common, &priv->spec_priv);
+err:
+ ath9k_htc_ps_restore(priv);
+ return ret;
+}
+
+/*
+ * Monitor mode handling is a tad complicated because the firmware requires
+ * an interface to be created exclusively, while mac80211 doesn't associate
+ * an interface with the mode.
+ *
+ * So, for now, only one monitor interface can be configured.
+ */
+static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = priv->mon_vif_idx;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ if (ret) {
+ ath_err(common, "Unable to remove monitor interface at idx: %d\n",
+ priv->mon_vif_idx);
+ }
+
+ priv->nvifs--;
+ priv->vif_slot &= ~(1 << priv->mon_vif_idx);
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ struct ath9k_htc_target_sta tsta;
+ int ret = 0, sta_idx;
+ u8 cmd_rsp;
+
+ if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
+ (priv->nstations >= ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
+
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
+
+ /*
+ * Add an interface.
+ */
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+
+ hvif.opmode = HTC_M_MONITOR;
+ hvif.index = ffz(priv->vif_slot);
+
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto err_vif;
+
+ /*
+ * Assign the monitor interface index as a special case here.
+ * This is needed when the interface is brought down.
+ */
+ priv->mon_vif_idx = hvif.index;
+ priv->vif_slot |= (1 << hvif.index);
+
+ /*
+ * Set the hardware mode to monitor only if there are no
+ * other interfaces.
+ */
+ if (!priv->nvifs)
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
+
+ priv->nvifs++;
+
+ /*
+ * Associate a station with the interface for packet injection.
+ */
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
+
+ tsta.is_vif_sta = 1;
+ tsta.sta_index = sta_idx;
+ tsta.vif_index = hvif.index;
+ tsta.maxampdu = cpu_to_be16(0xffff);
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ ath_err(common, "Unable to add station entry for monitor mode\n");
+ goto err_sta;
+ }
+
+ priv->sta_slot |= (1 << sta_idx);
+ priv->nstations++;
+ priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
+ priv->ah->is_monitoring = true;
+
+ ath_dbg(common, CONFIG,
+ "Attached a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
+ return 0;
+
+err_sta:
+ /*
+ * Remove the interface from the target.
+ */
+ __ath9k_htc_remove_monitor_interface(priv);
+err_vif:
+ ath_dbg(common, FATAL, "Unable to attach a monitor interface\n");
+
+ return ret;
+}
+
+static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+ u8 cmd_rsp, sta_idx;
+
+ __ath9k_htc_remove_monitor_interface(priv);
+
+ sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ ath_err(common, "Unable to remove station entry for monitor mode\n");
+ return ret;
+ }
+
+ priv->sta_slot &= ~(1 << sta_idx);
+ priv->nstations--;
+ priv->ah->is_monitoring = false;
+
+ ath_dbg(common, CONFIG,
+ "Removed a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
+ return 0;
+}
+
+static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_sta tsta;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret, sta_idx;
+ u8 cmd_rsp;
+ u16 maxampdu;
+
+ if (priv->nstations >= ATH9K_HTC_MAX_STA)
+ return -ENOBUFS;
+
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
+ return -ENOBUFS;
+
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
+ memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
+ ista->index = sta_idx;
+ tsta.is_vif_sta = 0;
+ maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor);
+ tsta.maxampdu = cpu_to_be16(maxampdu);
+ } else {
+ memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
+ tsta.is_vif_sta = 1;
+ tsta.maxampdu = cpu_to_be16(0xffff);
+ }
+
+ tsta.sta_index = sta_idx;
+ tsta.vif_index = avp->index;
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ if (sta)
+ ath_err(common,
+ "Unable to add station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta) {
+ ath_dbg(common, CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+ } else {
+ ath_dbg(common, CONFIG,
+ "Added a station entry for VIF %d (idx: %d)\n",
+ avp->index, tsta.sta_index);
+ }
+
+ priv->sta_slot |= (1 << sta_idx);
+ priv->nstations++;
+ if (!sta)
+ priv->vif_sta_pos[avp->index] = sta_idx;
+
+ return 0;
+}
+
+static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp, sta_idx;
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = priv->vif_sta_pos[avp->index];
+ }
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ if (sta)
+ ath_err(common,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta) {
+ ath_dbg(common, CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+ } else {
+ ath_dbg(common, CONFIG,
+ "Removed a station entry for VIF %d (idx: %d)\n",
+ avp->index, sta_idx);
+ }
+
+ priv->sta_slot &= ~(1 << sta_idx);
+ priv->nstations--;
+
+ return 0;
+}
+
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
+ u8 enable_coex)
+{
+ struct ath9k_htc_cap_target tcap;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
+
+ tcap.ampdu_limit = cpu_to_be32(0xffff);
+ tcap.ampdu_subframes = 0xff;
+ tcap.enable_coex = enable_coex;
+ tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
+
+ WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
+
+ return ret;
+}
+
+static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
+ struct ath9k_htc_target_rate *trate)
+{
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ struct ieee80211_supported_band *sband;
+ u32 caps = 0;
+ int i, j;
+
+ sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
+
+ for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[sband->band] & BIT(i)) {
+ trate->rates.legacy_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ trate->rates.legacy_rates.rs_nrates = j;
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0, j = 0; i < 77; i++) {
+ if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ trate->rates.ht_rates.rs_rates[j++] = i;
+ if (j == ATH_HTC_RATE_MAX)
+ break;
+ }
+ trate->rates.ht_rates.rs_nrates = j;
+
+ caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ caps |= ATH_RC_TX_STBC_FLAG;
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
+ if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
+ (conf_is_ht40(&priv->hw->conf)))
+ caps |= WLAN_RC_40_FLAG;
+ if (conf_is_ht40(&priv->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ caps |= WLAN_RC_SGI_FLAG;
+ else if (conf_is_ht20(&priv->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
+ caps |= WLAN_RC_SGI_FLAG;
+ }
+
+ trate->sta_index = ista->index;
+ trate->isnew = 1;
+ trate->capflags = cpu_to_be32(caps);
+}
+
+static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_target_rate *trate)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u8 cmd_rsp;
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
+ if (ret) {
+ ath_err(common,
+ "Unable to initialize Rate information on target\n");
+ }
+
+ return ret;
+}
+
+static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+ int ret;
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ ret = ath9k_htc_send_rate_cmd(priv, &trate);
+ if (!ret)
+ ath_dbg(common, CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+}
+
+static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+ struct ieee80211_sta *sta;
+ int ret;
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
+ }
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ rcu_read_unlock();
+
+ ret = ath9k_htc_send_rate_cmd(priv, &trate);
+ if (!ret)
+ ath_dbg(common, CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ bss_conf->bssid, be32_to_cpu(trate.capflags));
+}
+
+static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_ampdu_mlme_action action,
+ u16 tid)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_aggr aggr;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (tid >= ATH9K_HTC_MAX_TID)
+ return -EINVAL;
+
+ memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ aggr.sta_index = ista->index;
+ aggr.tidno = tid & 0xf;
+ aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false;
+
+ WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
+ if (ret)
+ ath_dbg(common, CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
+ else
+ ath_dbg(common, CONFIG,
+ "%s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "Starting" : "Stopping",
+ sta->addr, tid);
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ return ret;
+}
+
+/*******/
+/* ANI */
+/*******/
+
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
+ msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ cancel_delayed_work_sync(&priv->ani_work);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
+}
+
+void ath9k_htc_ani_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv, ani_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval;
+
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+ longcal = true;
+ ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >=
+ short_cal_interval) {
+ shortcal = true;
+ ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Skip all processing if there's nothing to do. */
+ if (longcal || shortcal || aniflag) {
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Call ANI routine if necessary */
+ if (aniflag)
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal)
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ ah->rxchainmask, longcal);
+
+ ath9k_htc_ps_restore(priv);
+ }
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
+ msecs_to_jiffies(cal_interval));
+}
+
+/**********************/
+/* mac80211 Callbacks */
+/**********************/
+
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int padpos, padsize, ret, slot;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, XMIT, "No room for padding\n");
+ goto fail_tx;
+ }
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ slot = ath9k_htc_tx_get_slot(priv);
+ if (slot < 0) {
+ ath_dbg(common, XMIT, "No free TX slot\n");
+ goto fail_tx;
+ }
+
+ ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
+ if (ret != 0) {
+ ath_dbg(common, XMIT, "Tx failed\n");
+ goto clear_slot;
+ }
+
+ ath9k_htc_check_stop_queues(priv);
+
+ return;
+
+clear_slot:
+ ath9k_htc_tx_clear_slot(priv, slot);
+fail_tx:
+ dev_kfree_skb_any(skb);
+}
+
+static int ath9k_htc_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ struct ath9k_channel *init_channel;
+ int ret = 0;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ ath_dbg(common, CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ /* Ensure that HW is awake before flushing RX */
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ WMI_CMD(WMI_FLUSH_RECV_CMDID);
+
+ /* setup initial channel */
+ init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
+
+ ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ ret, curchan->center_freq);
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
+
+ mode = ath9k_htc_get_curmode(priv, init_channel);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ WMI_CMD(WMI_ATH_INIT_CMDID);
+ WMI_CMD(WMI_START_RECV_CMDID);
+
+ ath9k_host_rx_init(priv);
+
+ ret = ath9k_htc_update_cap_target(priv, 0);
+ if (ret)
+ ath_dbg(common, CONFIG,
+ "Failed to update capability in target\n");
+
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
+ htc_start(priv->htc);
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ ieee80211_wake_queues(hw);
+
+ mod_timer(&priv->tx.cleanup_timer,
+ jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+
+ ath9k_htc_start_btcoex(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret __attribute__ ((unused));
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ ath9k_htc_ps_wakeup(priv);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ tasklet_kill(&priv->rx_tasklet);
+
+ del_timer_sync(&priv->tx.cleanup_timer);
+ ath9k_htc_tx_drain(priv);
+ ath9k_wmi_event_drain(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->fatal_work);
+ cancel_work_sync(&priv->ps_work);
+
+#ifdef CONFIG_MAC80211_LEDS
+ cancel_work_sync(&priv->led_work);
+#endif
+ ath9k_htc_stop_ani(priv);
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_stop_btcoex(priv);
+
+ /* Remove a monitor interface if it's present. */
+ if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
+
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ ath_dbg(common, CONFIG, "Driver halt\n");
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_ps_wakeup(priv);
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ hvif.opmode = HTC_M_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ hvif.opmode = HTC_M_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ hvif.opmode = HTC_M_HOSTAP;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ hvif.opmode = HTC_M_WDS; /* close enough */
+ break;
+ default:
+ ath_err(common,
+ "Interface type %d not yet supported\n", vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* Index starts from zero on the target */
+ avp->index = hvif.index = ffz(priv->vif_slot);
+ hvif.rtsthreshold = cpu_to_be16(2304);
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto out;
+
+ /*
+ * We need a node in target to tx mgmt frames
+ * before association.
+ */
+ ret = ath9k_htc_add_station(priv, vif, NULL);
+ if (ret) {
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ goto out;
+ }
+
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
+
+ INC_VIF(priv, vif->type);
+
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT) ||
+ (vif->type == NL80211_IFTYPE_ADHOC))
+ ath9k_htc_assign_bslot(priv, vif);
+
+ ath9k_htc_set_opmode(priv);
+
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
+ ath9k_hw_set_tsfadjust(priv->ah, true);
+ ath9k_htc_start_ani(priv);
+ }
+
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n",
+ vif->type, avp->index);
+
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+ hvif.index = avp->index;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ if (ret) {
+ ath_err(common, "Unable to remove interface at idx: %d\n",
+ avp->index);
+ }
+ priv->nvifs--;
+ priv->vif_slot &= ~(1 << avp->index);
+
+ ath9k_htc_remove_station(priv, vif, NULL);
+
+ DEC_VIF(priv, vif->type);
+
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ (vif->type == NL80211_IFTYPE_ADHOC))
+ ath9k_htc_remove_bslot(priv, vif);
+
+ ath9k_htc_set_opmode(priv);
+
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+ */
+ if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
+ priv->rearm_ani = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_vif_iter, priv);
+ if (!priv->rearm_ani)
+ ath9k_htc_stop_ani(priv);
+ }
+
+ ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_conf *conf = &hw->conf;
+ bool chip_reset = false;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ mutex_lock(&priv->htc_pm_lock);
+
+ priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (!priv->ps_idle)
+ chip_reset = true;
+
+ mutex_unlock(&priv->htc_pm_lock);
+ }
+
+ /*
+ * Monitor interface should be added before
+ * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+ */
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if ((conf->flags & IEEE80211_CONF_MONITOR) &&
+ !priv->ah->is_monitoring)
+ ath9k_htc_add_monitor_interface(priv);
+ else if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
+ struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ int pos = curchan->hw_value;
+
+ ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
+
+ ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
+ if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
+ ath_err(common, "Unable to set channel\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS) {
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+ priv->ps_enabled = true;
+ } else {
+ priv->ps_enabled = false;
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ priv->txpowlimit = 2 * conf->power_level;
+ ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+ priv->txpowlimit, &priv->curtxpow);
+ }
+
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ u32 rfilt;
+
+ mutex_lock(&priv->mutex);
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_dbg(ath9k_hw_common(priv->ah), ANY,
+ "Unable to configure filter on invalid state\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+ ath9k_htc_ps_wakeup(priv);
+
+ priv->rxfilter = *total_flags;
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(priv->ah, rfilt);
+
+ ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
+ ath9k_htc_init_rate(priv, sta);
+ }
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ int ret;
+
+ cancel_work_sync(&ista->rc_update_work);
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ htc_sta_drain(priv->htc, ista->index);
+ ret = ath9k_htc_remove_station(priv, vif, sta);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
+
+ schedule_work(&ista->rc_update_work);
+}
+
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= IEEE80211_NUM_ACS)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop * 32;
+
+ qnum = get_hw_qnum(queue, priv->hwq_map);
+
+ ath_dbg(common, CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ret = ath_htc_txq_update(priv, qnum, &qi);
+ if (ret) {
+ ath_err(common, "TXQ Update failed\n");
+ goto out;
+ }
+
+ if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
+ (qnum == priv->hwq_map[IEEE80211_AC_BE]))
+ ath9k_htc_beaconq_config(priv);
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+
+ if (htc_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /*
+ * For now, disable hw crypto for the RSN IBSS group keys. This
+ * could be optimized in the future to use a modified key cache
+ * design to support per-STA RX GTK, but until that gets
+ * implemented, use of software crypto for group addressed
+ * frames is a acceptable to allow RSN IBSS to be used.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&priv->mutex);
+ ath_dbg(common, CONFIG, "Set HW Key\n");
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (priv->ah->sw_mgmt_crypto_tx &&
+ key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ ath9k_hw_write_associd(priv->ah);
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+}
+
+static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
+ common->curaid = bss_conf->aid;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+ }
+}
+
+static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
+{
+ if (priv->num_sta_assoc_vif == 1) {
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_bss_iter, priv);
+ ath9k_htc_set_bssid(priv);
+ }
+}
+
+static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int slottime;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ bss_conf->assoc ?
+ priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
+
+ if (!bss_conf->assoc)
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
+ if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_htc_choose_set_bssid(priv);
+ if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
+ ath9k_htc_start_ani(priv);
+ else if (priv->num_sta_assoc_vif == 0)
+ ath9k_htc_stop_ani(priv);
+ }
+ }
+
+ if (changed & BSS_CHANGED_IBSS) {
+ if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
+ common->curaid = bss_conf->aid;
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_htc_set_bssid(priv);
+ }
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
+ ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
+ bss_conf->bssid);
+ ath9k_htc_set_tsfadjust(priv, vif);
+ priv->cur_beacon_conf.enable_beacon = 1;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
+ /*
+ * Disable SWBA interrupt only if there are no
+ * concurrent AP/mesh or IBSS interfaces.
+ */
+ if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) ||
+ priv->num_ibss_vif) {
+ ath_dbg(common, CONFIG,
+ "Beacon disabled for BSS: %pM\n",
+ bss_conf->bssid);
+ priv->cur_beacon_conf.enable_beacon = 0;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ /*
+ * Reset the HW TSF for the first AP or mesh interface.
+ */
+ if (priv->nvifs == 1 &&
+ ((priv->ah->opmode == NL80211_IFTYPE_AP &&
+ vif->type == NL80211_IFTYPE_AP &&
+ priv->num_ap_vif == 1) ||
+ (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT &&
+ vif->type == NL80211_IFTYPE_MESH_POINT &&
+ priv->num_mbss_vif == 1))) {
+ set_bit(OP_TSF_RESET, &priv->op_flags);
+ }
+ ath_dbg(common, CONFIG,
+ "Beacon interval changed for BSS: %pM\n",
+ bss_conf->bssid);
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ priv->beacon.slottime = slottime;
+ priv->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
+ if (changed & BSS_CHANGED_HT)
+ ath9k_htc_update_rate(priv, vif, bss_conf);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return tsf;
+}
+
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 tsf)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_hw_settsf64(priv->ah, tsf);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_hw_reset_tsf(priv->ah);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn, u8 buf_size)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ if (!ret)
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ spin_lock_bh(&priv->tx.tx_lock);
+ ista->tid_state[tid] = AGGR_OPERATIONAL;
+ spin_unlock_bh(&priv->tx.tx_lock);
+ break;
+ default:
+ ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
+ spin_unlock_bh(&priv->beacon_lock);
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_stop_ani(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
+ spin_unlock_bh(&priv->beacon_lock);
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_vif_reconfig(priv);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ priv->ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(priv->ah);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/*
+ * Currently, this is used only for selecting the minimum rate
+ * for management frames, rate selection for data frames remain
+ * unaffected.
+ */
+static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate_mask tmask;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
+
+ tmask.vif_index = avp->index;
+ tmask.band = IEEE80211_BAND_2GHZ;
+ tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
+
+ WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
+ if (ret) {
+ ath_err(common,
+ "Unable to set 2G rate mask for "
+ "interface at idx: %d\n", avp->index);
+ goto out;
+ }
+
+ tmask.band = IEEE80211_BAND_5GHZ;
+ tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
+
+ WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
+ if (ret) {
+ ath_err(common,
+ "Unable to set 5G rate mask for "
+ "interface at idx: %d\n", avp->index);
+ goto out;
+ }
+
+ ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
+ mask->control[IEEE80211_BAND_2GHZ].legacy,
+ mask->control[IEEE80211_BAND_5GHZ].legacy);
+out:
+ return ret;
+}
+
+
+static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
+
+ stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
+ stats->dot11RTSFailureCount = mib_stats->rts_bad;
+ stats->dot11FCSErrorCount = mib_stats->fcs_bad;
+ stats->dot11RTSSuccessCount = mib_stats->rts_good;
+
+ return 0;
+}
+
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
+{
+ struct base_eep_header *pBase = NULL;
+ /*
+ * This can be done since all the 3 EEPROM families have the
+ * same base header upto a certain point, and we are interested in
+ * the data only upto that point.
+ */
+
+ if (AR_SREV_9271(priv->ah))
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.map4k.baseEepHeader;
+ else if (priv->ah->hw_version.usbdev == AR9280_USB)
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.def.baseEepHeader;
+ else if (priv->ah->hw_version.usbdev == AR9287_USB)
+ pBase = (struct base_eep_header *)
+ &priv->ah->eeprom.map9287.baseEepHeader;
+ return pBase;
+}
+
+
+static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
+ u32 *rx_ant)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
+ if (pBase) {
+ *tx_ant = pBase->txMask;
+ *rx_ant = pBase->rxMask;
+ } else {
+ *tx_ant = 0;
+ *rx_ant = 0;
+ }
+ return 0;
+}
+
+struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+ .add_interface = ath9k_htc_add_interface,
+ .remove_interface = ath9k_htc_remove_interface,
+ .config = ath9k_htc_config,
+ .configure_filter = ath9k_htc_configure_filter,
+ .sta_add = ath9k_htc_sta_add,
+ .sta_remove = ath9k_htc_sta_remove,
+ .conf_tx = ath9k_htc_conf_tx,
+ .sta_rc_update = ath9k_htc_sta_rc_update,
+ .bss_info_changed = ath9k_htc_bss_info_changed,
+ .set_key = ath9k_htc_set_key,
+ .get_tsf = ath9k_htc_get_tsf,
+ .set_tsf = ath9k_htc_set_tsf,
+ .reset_tsf = ath9k_htc_reset_tsf,
+ .ampdu_action = ath9k_htc_ampdu_action,
+ .sw_scan_start = ath9k_htc_sw_scan_start,
+ .sw_scan_complete = ath9k_htc_sw_scan_complete,
+ .set_rts_threshold = ath9k_htc_set_rts_threshold,
+ .rfkill_poll = ath9k_htc_rfkill_poll_state,
+ .set_coverage_class = ath9k_htc_set_coverage_class,
+ .set_bitrate_mask = ath9k_htc_set_bitrate_mask,
+ .get_stats = ath9k_htc_get_stats,
+ .get_antenna = ath9k_htc_get_antenna,
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ .get_et_sset_count = ath9k_htc_get_et_sset_count,
+ .get_et_stats = ath9k_htc_get_et_stats,
+ .get_et_strings = ath9k_htc_get_et_strings,
+#endif
+};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 000000000..a0f58e2aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******/
+/* TX */
+/******/
+
+static const int subtype_txq_to_hwq[] = {
+ [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+ [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+ [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+ [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
+};
+
+#define ATH9K_HTC_INIT_TXQ(subtype) do { \
+ qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_physCompBuf = 0; \
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
+ TXQ_FLAG_TXDESCINT_ENABLE; \
+ } while (0)
+
+int get_hw_qnum(u16 queue, int *hwq_map)
+{
+ switch (queue) {
+ case 0:
+ return hwq_map[IEEE80211_AC_VO];
+ case 1:
+ return hwq_map[IEEE80211_AC_VI];
+ case 2:
+ return hwq_map[IEEE80211_AC_BE];
+ case 3:
+ return hwq_map[IEEE80211_AC_BK];
+ default:
+ return hwq_map[IEEE80211_AC_BE];
+ }
+}
+
+void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv)
+{
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.queued_cnt++;
+ if ((priv->tx.queued_cnt >= ATH9K_HTC_TX_THRESHOLD) &&
+ !(priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
+ priv->tx.flags |= ATH9K_HTC_OP_TX_QUEUES_STOP;
+ ieee80211_stop_queues(priv->hw);
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
+void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv)
+{
+ spin_lock_bh(&priv->tx.tx_lock);
+ if ((priv->tx.queued_cnt < ATH9K_HTC_TX_THRESHOLD) &&
+ (priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
+ ieee80211_wake_queues(priv->hw);
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
+int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv)
+{
+ int slot;
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ slot = find_first_zero_bit(priv->tx.tx_slot, MAX_TX_BUF_NUM);
+ if (slot >= MAX_TX_BUF_NUM) {
+ spin_unlock_bh(&priv->tx.tx_lock);
+ return -ENOBUFS;
+ }
+ __set_bit(slot, priv->tx.tx_slot);
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ return slot;
+}
+
+void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot)
+{
+ spin_lock_bh(&priv->tx.tx_lock);
+ __clear_bit(slot, priv->tx.tx_slot);
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
+static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
+ u16 qnum)
+{
+ enum htc_endpoint_id epid;
+
+ switch (qnum) {
+ case 0:
+ TX_QSTAT_INC(IEEE80211_AC_VO);
+ epid = priv->data_vo_ep;
+ break;
+ case 1:
+ TX_QSTAT_INC(IEEE80211_AC_VI);
+ epid = priv->data_vi_ep;
+ break;
+ case 2:
+ TX_QSTAT_INC(IEEE80211_AC_BE);
+ epid = priv->data_be_ep;
+ break;
+ case 3:
+ default:
+ TX_QSTAT_INC(IEEE80211_AC_BK);
+ epid = priv->data_bk_ep;
+ break;
+ }
+
+ return epid;
+}
+
+static inline struct sk_buff_head*
+get_htc_epid_queue(struct ath9k_htc_priv *priv, u8 epid)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct sk_buff_head *epid_queue = NULL;
+
+ if (epid == priv->mgmt_ep)
+ epid_queue = &priv->tx.mgmt_ep_queue;
+ else if (epid == priv->cab_ep)
+ epid_queue = &priv->tx.cab_ep_queue;
+ else if (epid == priv->data_be_ep)
+ epid_queue = &priv->tx.data_be_queue;
+ else if (epid == priv->data_bk_ep)
+ epid_queue = &priv->tx.data_bk_queue;
+ else if (epid == priv->data_vi_ep)
+ epid_queue = &priv->tx.data_vi_queue;
+ else if (epid == priv->data_vo_ep)
+ epid_queue = &priv->tx.data_vo_queue;
+ else
+ ath_err(common, "Invalid EPID: %d\n", epid);
+
+ return epid_queue;
+}
+
+/*
+ * Removes the driver header and returns the TX slot number
+ */
+static inline int strip_drv_header(struct ath9k_htc_priv *priv,
+ struct sk_buff *skb)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ int slot;
+
+ tx_ctl = HTC_SKB_CB(skb);
+
+ if (tx_ctl->epid == priv->mgmt_ep) {
+ struct tx_mgmt_hdr *tx_mhdr =
+ (struct tx_mgmt_hdr *)skb->data;
+ slot = tx_mhdr->cookie;
+ skb_pull(skb, sizeof(struct tx_mgmt_hdr));
+ } else if ((tx_ctl->epid == priv->data_bk_ep) ||
+ (tx_ctl->epid == priv->data_be_ep) ||
+ (tx_ctl->epid == priv->data_vi_ep) ||
+ (tx_ctl->epid == priv->data_vo_ep) ||
+ (tx_ctl->epid == priv->cab_ep)) {
+ struct tx_frame_hdr *tx_fhdr =
+ (struct tx_frame_hdr *)skb->data;
+ slot = tx_fhdr->cookie;
+ skb_pull(skb, sizeof(struct tx_frame_hdr));
+ } else {
+ ath_err(common, "Unsupported EPID: %d\n", tx_ctl->epid);
+ slot = -EINVAL;
+ }
+
+ return slot;
+}
+
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = priv->ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_err(ath9k_hw_common(ah),
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+static void ath9k_htc_tx_mgmt(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_vif *avp,
+ struct sk_buff *skb,
+ u8 sta_idx, u8 vif_idx, u8 slot)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_hdr *hdr;
+ struct tx_mgmt_hdr mgmt_hdr;
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ u8 *tx_fhdr;
+
+ tx_ctl = HTC_SKB_CB(skb);
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ memset(tx_ctl, 0, sizeof(*tx_ctl));
+ memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
+
+ /*
+ * Set the TSF adjust value for probe response
+ * frame also.
+ */
+ if (avp && unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.probe_resp.timestamp = avp->tsfadjust;
+ }
+
+ tx_ctl->type = ATH9K_HTC_MGMT;
+
+ mgmt_hdr.node_idx = sta_idx;
+ mgmt_hdr.vif_idx = vif_idx;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+ mgmt_hdr.cookie = slot;
+
+ mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
+ memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
+ tx_ctl->epid = priv->mgmt_ep;
+}
+
+static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ u8 sta_idx, u8 vif_idx, u8 slot,
+ bool is_cab)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ struct tx_frame_hdr tx_hdr;
+ u32 flags = 0;
+ u8 *qc, *tx_fhdr;
+ u16 qnum;
+
+ tx_ctl = HTC_SKB_CB(skb);
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ memset(tx_ctl, 0, sizeof(*tx_ctl));
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+ tx_hdr.vif_idx = vif_idx;
+ tx_hdr.cookie = slot;
+
+ /*
+ * This is a bit redundant but it helps to get
+ * the per-packet index quickly when draining the
+ * TX queue in the HIF layer. Otherwise we would
+ * have to parse the packet contents ...
+ */
+ tx_ctl->sta_idx = sta_idx;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl->type = ATH9K_HTC_AMPDU;
+ tx_hdr.data_type = ATH9K_HTC_AMPDU;
+ } else {
+ tx_ctl->type = ATH9K_HTC_NORMAL;
+ tx_hdr.data_type = ATH9K_HTC_NORMAL;
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ /* Check for RTS protection */
+ if (priv->hw->wiphy->rts_threshold != (u32) -1)
+ if (skb->len > priv->hw->wiphy->rts_threshold)
+ flags |= ATH9K_HTC_TX_RTSCTS;
+
+ /* CTS-to-self */
+ if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
+ (vif && vif->bss_conf.use_cts_prot))
+ flags |= ATH9K_HTC_TX_CTSONLY;
+
+ tx_hdr.flags = cpu_to_be32(flags);
+ tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(tx_hdr));
+ memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
+
+ if (is_cab) {
+ CAB_STAT_INC;
+ tx_ctl->epid = priv->cab_ep;
+ return;
+ }
+
+ qnum = skb_get_queue_mapping(skb);
+ tx_ctl->epid = get_htc_epid(priv, qnum);
+}
+
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 slot, bool is_cab)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = tx_info->control.vif;
+ struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp = NULL;
+ u8 sta_idx, vif_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /*
+ * Find out on which interface this packet has to be
+ * sent out.
+ */
+ if (vif) {
+ avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ vif_idx = avp->index;
+ } else {
+ if (!priv->ah->is_monitoring) {
+ ath_dbg(ath9k_hw_common(priv->ah), XMIT,
+ "VIF is null, but no monitor interface !\n");
+ return -EINVAL;
+ }
+
+ vif_idx = priv->mon_vif_idx;
+ }
+
+ /*
+ * Find out which station this packet is destined for.
+ */
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = priv->vif_sta_pos[vif_idx];
+ }
+
+ if (ieee80211_is_data(hdr->frame_control))
+ ath9k_htc_tx_data(priv, vif, skb,
+ sta_idx, vif_idx, slot, is_cab);
+ else
+ ath9k_htc_tx_mgmt(priv, avp, skb,
+ sta_idx, vif_idx, slot);
+
+
+ return htc_send(priv->htc, skb);
+}
+
+static inline bool __ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_sta *ista, u8 tid)
+{
+ bool ret = false;
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
+ ret = true;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ return ret;
+}
+
+static void ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, hdr->addr1);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (sta && conf_is_ht(&priv->hw->conf) &&
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+ struct ath9k_htc_sta *ista;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ista = (struct ath9k_htc_sta *)sta->drv_priv;
+ if (__ath9k_htc_check_tx_aggr(priv, ista, tid)) {
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ spin_lock_bh(&priv->tx.tx_lock);
+ ista->tid_state[tid] = AGGR_PROGRESS;
+ spin_unlock_bh(&priv->tx.tx_lock);
+ }
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
+ struct sk_buff *skb,
+ struct __wmi_event_txstatus *txs)
+{
+ struct ieee80211_vif *vif;
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_conf *cur_conf = &priv->hw->conf;
+ bool txok;
+ int slot;
+ int hdrlen, padsize;
+
+ slot = strip_drv_header(priv, skb);
+ if (slot < 0) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ tx_ctl = HTC_SKB_CB(skb);
+ txok = tx_ctl->txok;
+ tx_info = IEEE80211_SKB_CB(skb);
+ vif = tx_info->control.vif;
+ rate = &tx_info->status.rates[0];
+
+ memset(&tx_info->status, 0, sizeof(tx_info->status));
+
+ /*
+ * URB submission failed for this frame, it never reached
+ * the target.
+ */
+ if (!txok || !vif || !txs)
+ goto send_mac80211;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK) {
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ }
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT)
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_RTC_CTS)
+ rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
+
+ rate->count = 1;
+ rate->idx = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_RATE);
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_MCS) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_CW40)
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+ } else {
+ if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ rate->idx += 4; /* No CCK rates */
+ }
+
+ ath9k_htc_check_tx_aggr(priv, vif, skb);
+
+send_mac80211:
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (WARN_ON(--priv->tx.queued_cnt < 0))
+ priv->tx.queued_cnt = 0;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ ath9k_htc_tx_clear_slot(priv, slot);
+
+ /* Remove padding before handing frame back to mac80211 */
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padsize = hdrlen & 3;
+ if (padsize && skb->len > hdrlen + padsize) {
+ memmove(skb->data + padsize, skb->data, hdrlen);
+ skb_pull(skb, padsize);
+ }
+
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+}
+
+static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv,
+ struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(queue)) != NULL) {
+ ath9k_htc_tx_process(priv, skb, NULL);
+ }
+}
+
+void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_tx_event *event, *tmp;
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN;
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ /*
+ * Ensure that all pending TX frames are flushed,
+ * and that the TX completion/failed tasklets is killed.
+ */
+ htc_stop(priv->htc);
+ tasklet_kill(&priv->wmi->wmi_event_tasklet);
+ tasklet_kill(&priv->tx_failed_tasklet);
+
+ ath9k_htc_tx_drainq(priv, &priv->tx.mgmt_ep_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.cab_ep_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_be_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_bk_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_vi_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
+
+ /*
+ * The TX cleanup timer has already been killed.
+ */
+ spin_lock_bh(&priv->wmi->event_lock);
+ list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+ spin_unlock_bh(&priv->wmi->event_lock);
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN;
+ spin_unlock_bh(&priv->tx.tx_lock);
+}
+
+void ath9k_tx_failed_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
+ spin_unlock_bh(&priv->tx.tx_lock);
+ return;
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
+}
+
+static inline bool check_cookie(struct ath9k_htc_priv *priv,
+ struct sk_buff *skb,
+ u8 cookie, u8 epid)
+{
+ u8 fcookie = 0;
+
+ if (epid == priv->mgmt_ep) {
+ struct tx_mgmt_hdr *hdr;
+ hdr = (struct tx_mgmt_hdr *) skb->data;
+ fcookie = hdr->cookie;
+ } else if ((epid == priv->data_bk_ep) ||
+ (epid == priv->data_be_ep) ||
+ (epid == priv->data_vi_ep) ||
+ (epid == priv->data_vo_ep) ||
+ (epid == priv->cab_ep)) {
+ struct tx_frame_hdr *hdr;
+ hdr = (struct tx_frame_hdr *) skb->data;
+ fcookie = hdr->cookie;
+ }
+
+ if (fcookie == cookie)
+ return true;
+
+ return false;
+}
+
+static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
+ struct __wmi_event_txstatus *txs)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct sk_buff_head *epid_queue;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ u8 epid = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_EPID);
+
+ epid_queue = get_htc_epid_queue(priv, epid);
+ if (!epid_queue)
+ return NULL;
+
+ spin_lock_irqsave(&epid_queue->lock, flags);
+ skb_queue_walk_safe(epid_queue, skb, tmp) {
+ if (check_cookie(priv, skb, txs->cookie, epid)) {
+ __skb_unlink(skb, epid_queue);
+ spin_unlock_irqrestore(&epid_queue->lock, flags);
+ return skb;
+ }
+ }
+ spin_unlock_irqrestore(&epid_queue->lock, flags);
+
+ ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n",
+ txs->cookie, epid);
+
+ return NULL;
+}
+
+void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
+{
+ struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
+ struct __wmi_event_txstatus *__txs;
+ struct sk_buff *skb;
+ struct ath9k_htc_tx_event *tx_pend;
+ int i;
+
+ for (i = 0; i < txs->cnt; i++) {
+ WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
+
+ __txs = &txs->txstatus[i];
+
+ skb = ath9k_htc_tx_get_packet(priv, __txs);
+ if (!skb) {
+ /*
+ * Store this event, so that the TX cleanup
+ * routine can check later for the needed packet.
+ */
+ tx_pend = kzalloc(sizeof(struct ath9k_htc_tx_event),
+ GFP_ATOMIC);
+ if (!tx_pend)
+ continue;
+
+ memcpy(&tx_pend->txs, __txs,
+ sizeof(struct __wmi_event_txstatus));
+
+ spin_lock(&priv->wmi->event_lock);
+ list_add_tail(&tx_pend->list,
+ &priv->wmi->pending_tx_events);
+ spin_unlock(&priv->wmi->event_lock);
+
+ continue;
+ }
+
+ ath9k_htc_tx_process(priv, skb, __txs);
+ }
+
+ /* Wake TX queues if needed */
+ ath9k_htc_check_wake_queues(priv);
+}
+
+void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath9k_htc_tx_ctl *tx_ctl;
+ struct sk_buff_head *epid_queue;
+
+ tx_ctl = HTC_SKB_CB(skb);
+ tx_ctl->txok = txok;
+ tx_ctl->timestamp = jiffies;
+
+ if (!txok) {
+ skb_queue_tail(&priv->tx.tx_failed, skb);
+ tasklet_schedule(&priv->tx_failed_tasklet);
+ return;
+ }
+
+ epid_queue = get_htc_epid_queue(priv, ep_id);
+ if (!epid_queue) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb_queue_tail(epid_queue, skb);
+}
+
+static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_tx_ctl *tx_ctl;
+
+ tx_ctl = HTC_SKB_CB(skb);
+
+ if (time_after(jiffies,
+ tx_ctl->timestamp +
+ msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
+ ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
+ struct sk_buff_head *epid_queue)
+{
+ bool process = false;
+ unsigned long flags;
+ struct sk_buff *skb, *tmp;
+ struct sk_buff_head queue;
+
+ skb_queue_head_init(&queue);
+
+ spin_lock_irqsave(&epid_queue->lock, flags);
+ skb_queue_walk_safe(epid_queue, skb, tmp) {
+ if (check_packet(priv, skb)) {
+ __skb_unlink(skb, epid_queue);
+ __skb_queue_tail(&queue, skb);
+ process = true;
+ }
+ }
+ spin_unlock_irqrestore(&epid_queue->lock, flags);
+
+ if (process) {
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ __skb_unlink(skb, &queue);
+ ath9k_htc_tx_process(priv, skb, NULL);
+ }
+ }
+}
+
+void ath9k_htc_tx_cleanup_timer(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_tx_event *event, *tmp;
+ struct sk_buff *skb;
+
+ spin_lock(&priv->wmi->event_lock);
+ list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
+
+ skb = ath9k_htc_tx_get_packet(priv, &event->txs);
+ if (skb) {
+ ath_dbg(common, XMIT,
+ "Found packet for cookie: %d, epid: %d\n",
+ event->txs.cookie,
+ MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
+
+ ath9k_htc_tx_process(priv, skb, &event->txs);
+ list_del(&event->list);
+ kfree(event);
+ continue;
+ }
+
+ if (++event->count >= ATH9K_HTC_TX_TIMEOUT_COUNT) {
+ list_del(&event->list);
+ kfree(event);
+ }
+ }
+ spin_unlock(&priv->wmi->event_lock);
+
+ /*
+ * Check if status-pending packets have to be cleaned up.
+ */
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.mgmt_ep_queue);
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.cab_ep_queue);
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_be_queue);
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_bk_queue);
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vi_queue);
+ ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vo_queue);
+
+ /* Wake TX queues if needed */
+ ath9k_htc_check_wake_queues(priv);
+
+ mod_timer(&priv->tx.cleanup_timer,
+ jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
+}
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv)
+{
+ skb_queue_head_init(&priv->tx.mgmt_ep_queue);
+ skb_queue_head_init(&priv->tx.cab_ep_queue);
+ skb_queue_head_init(&priv->tx.data_be_queue);
+ skb_queue_head_init(&priv->tx.data_bk_queue);
+ skb_queue_head_init(&priv->tx.data_vi_queue);
+ skb_queue_head_init(&priv->tx.data_vo_queue);
+ skb_queue_head_init(&priv->tx.tx_failed);
+ return 0;
+}
+
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
+{
+
+}
+
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memset(&qi, 0, sizeof(qi));
+ ATH9K_HTC_INIT_TXQ(subtype);
+
+ qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
+ if (qnum == -1)
+ return false;
+
+ if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
+ ath_err(common, "qnum %u out of range, max %zu!\n",
+ qnum, ARRAY_SIZE(priv->hwq_map));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return false;
+ }
+
+ priv->hwq_map[subtype] = qnum;
+ return true;
+}
+
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_tx_queue_info qi;
+
+ memset(&qi, 0, sizeof(qi));
+ ATH9K_HTC_INIT_TXQ(0);
+
+ return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
+}
+
+/******/
+/* RX */
+/******/
+
+/*
+ * Calculate the RX filter to be set in the HW.
+ */
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ if (priv->rxfilter & FIF_PROBE_REQ)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (((ah->opmode != NL80211_IFTYPE_AP) &&
+ (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
+ ah->is_monitoring)
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if (priv->rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->nvifs <= 1) &&
+ !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if (conf_is_ht(&priv->hw->conf)) {
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+ rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
+ }
+
+ if (priv->rxfilter & FIF_PSPOLL)
+ rfilt |= ATH9K_RX_FILTER_PSPOLL;
+
+ if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS)
+ rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
+
+ return rfilt;
+
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Recv initialization for opmode change.
+ */
+static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ ath9k_hw_rxena(priv->ah);
+ ath9k_htc_opmode_init(priv);
+ ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
+}
+
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->flag = 0;
+ if (rxstatus->rs_flags & ATH9K_RX_2040)
+ rx_stats->flag |= RX_FLAG_40MHZ;
+ if (rxstatus->rs_flags & ATH9K_RX_GI)
+ rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
+
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ rx_stats->rs_status = rxstatus->rs_status;
+ rx_stats->rs_phyerr = rxstatus->rs_phyerr;
+ rx_stats->rs_rssi = rxstatus->rs_rssi;
+ rx_stats->rs_keyix = rxstatus->rs_keyix;
+ rx_stats->rs_rate = rxstatus->rs_rate;
+ rx_stats->rs_antenna = rxstatus->rs_antenna;
+ rx_stats->rs_more = rxstatus->rs_more;
+
+ memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+ sizeof(rx_stats->rs_rssi_ctl));
+ memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+ sizeof(rx_stats->rs_rssi_ext));
+
+ rx_stats->rs_isaggr = rxstatus->rs_isaggr;
+ rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
+ rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+ convert_htc_flag(rx_stats, rxstatus);
+}
+
+static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_rxbuf *rxbuf,
+ struct ieee80211_rx_status *rx_status)
+
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_hw *hw = priv->hw;
+ struct sk_buff *skb = rxbuf->skb;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_hw *ah = common->ah;
+ struct ath_htc_rx_status *rxstatus;
+ struct ath_rx_status rx_stats;
+ bool decrypt_error = false;
+
+ if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
+ ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
+ skb->len);
+ goto rx_next;
+ }
+
+ rxstatus = (struct ath_htc_rx_status *)skb->data;
+
+ if (be16_to_cpu(rxstatus->rs_datalen) -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ ath_err(common,
+ "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ /* Get the RX status information */
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+ * After this, we can drop this part of skb. */
+ rx_status_htc_to_ath(&rx_stats, rxstatus);
+ ath9k_htc_err_stat_rx(priv, &rx_stats);
+ rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ /* TODO: Not using DFS processing now. */
+ if (ath_cmn_process_fft(&priv->spec_priv, hdr,
+ &rx_stats, rx_status->mactime)) {
+ /* TODO: Code to collect spectral scan statistics */
+ }
+ goto rx_next;
+ }
+
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+ &decrypt_error, priv->rxfilter))
+ goto rx_next;
+
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+ rx_status, decrypt_error);
+
+ if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+ goto rx_next;
+
+ rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+ ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
+
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats.rs_antenna;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+
+ return true;
+rx_next:
+ return false;
+}
+
+/*
+ * FIXME: Handle FLUSH later on.
+ */
+void ath9k_rx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+
+ if (rxbuf == NULL) {
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ break;
+ }
+
+ if (!rxbuf->skb)
+ goto requeue;
+
+ if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
+ dev_kfree_skb_any(rxbuf->skb);
+ goto requeue;
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
+ sizeof(struct ieee80211_rx_status));
+ skb = rxbuf->skb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
+ ieee80211_queue_work(priv->hw, &priv->ps_work);
+
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+
+ ieee80211_rx(priv->hw, skb);
+
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+requeue:
+ rxbuf->in_process = false;
+ rxbuf->skb = NULL;
+ list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
+ rxbuf = NULL;
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ } while (1);
+
+}
+
+void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+
+ spin_lock(&priv->rx.rxbuflock);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (!tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+ spin_unlock(&priv->rx.rxbuflock);
+
+ if (rxbuf == NULL) {
+ ath_dbg(common, ANY, "No free RX buffer\n");
+ goto err;
+ }
+
+ spin_lock(&priv->rx.rxbuflock);
+ rxbuf->skb = skb;
+ rxbuf->in_process = true;
+ spin_unlock(&priv->rx.rxbuflock);
+
+ tasklet_schedule(&priv->rx_tasklet);
+ return;
+err:
+ dev_kfree_skb_any(skb);
+}
+
+/* FIXME: Locking for cleanup/init */
+
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_rxbuf *rxbuf, *tbuf;
+
+ list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
+ list_del(&rxbuf->list);
+ if (rxbuf->skb)
+ dev_kfree_skb_any(rxbuf->skb);
+ kfree(rxbuf);
+ }
+}
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv)
+{
+ int i = 0;
+
+ INIT_LIST_HEAD(&priv->rx.rxbuf);
+ spin_lock_init(&priv->rx.rxbuflock);
+
+ for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
+ struct ath9k_htc_rxbuf *rxbuf =
+ kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL)
+ goto err;
+
+ list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
+ }
+
+ return 0;
+
+err:
+ ath9k_rx_cleanup(priv);
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 000000000..d2408da38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "htc.h"
+
+static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
+ u16 len, u8 flags, u8 epid)
+
+{
+ struct htc_frame_hdr *hdr;
+ struct htc_endpoint *endpoint = &target->endpoint[epid];
+ int status;
+
+ hdr = (struct htc_frame_hdr *)
+ skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr->endpoint_id = epid;
+ hdr->flags = flags;
+ hdr->payload_len = cpu_to_be16(len);
+
+ status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
+
+ return status;
+}
+
+static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
+{
+ enum htc_endpoint_id avail_epid;
+
+ for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--)
+ if (endpoint[avail_epid].service_id == 0)
+ return &endpoint[avail_epid];
+ return NULL;
+}
+
+static u8 service_to_ulpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 4;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u8 service_to_dlpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 3;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static void htc_process_target_rdy(struct htc_target *target,
+ void *buf)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+
+ target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
+
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->service_id = HTC_CTRL_RSVD_SVC;
+ endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ atomic_inc(&target->tgt_ready);
+ complete(&target->target_wait);
+}
+
+static void htc_process_conn_rsp(struct htc_target *target,
+ struct htc_frame_hdr *htc_hdr)
+{
+ struct htc_conn_svc_rspmsg *svc_rspmsg;
+ struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
+ u16 service_id;
+ u16 max_msglen;
+ enum htc_endpoint_id epid, tepid;
+
+ svc_rspmsg = (struct htc_conn_svc_rspmsg *)
+ ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+
+ for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) {
+ tmp_endpoint = &target->endpoint[tepid];
+ if (tmp_endpoint->service_id == service_id) {
+ tmp_endpoint->service_id = 0;
+ break;
+ }
+ }
+
+ if (tepid == ENDPOINT0)
+ return;
+
+ endpoint->service_id = service_id;
+ endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
+ endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
+ endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
+ endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
+ endpoint->max_msglen = max_msglen;
+ target->conn_rsp_epid = epid;
+ complete(&target->cmd_wait);
+ } else {
+ target->conn_rsp_epid = ENDPOINT_UNUSED;
+ }
+}
+
+static int htc_config_pipe_credits(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_config_pipe_msg *cp_msg;
+ int ret, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ cp_msg = (struct htc_config_pipe_msg *)
+ skb_put(skb, sizeof(struct htc_config_pipe_msg));
+
+ cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
+ cp_msg->pipe_id = USB_WLAN_TX_PIPE;
+ cp_msg->credits = target->credits;
+
+ target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int htc_setup_complete(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_comp_msg *comp_msg;
+ int ret = 0, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ comp_msg = (struct htc_comp_msg *)
+ skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
+
+ target->htc_flags |= HTC_OP_START_WAIT;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/* HTC APIs */
+
+int htc_init(struct htc_target *target)
+{
+ int ret;
+
+ ret = htc_config_pipe_credits(target);
+ if (ret)
+ return ret;
+
+ return htc_setup_complete(target);
+}
+
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_epid)
+{
+ struct sk_buff *skb;
+ struct htc_endpoint *endpoint;
+ struct htc_conn_svc_msg *conn_msg;
+ int ret, time_left;
+
+ /* Find an available endpoint */
+ endpoint = get_next_avail_ep(target->endpoint);
+ if (!endpoint) {
+ dev_err(target->dev, "Endpoint is not available for"
+ "service %d\n", service_connreq->service_id);
+ return -EINVAL;
+ }
+
+ endpoint->service_id = service_connreq->service_id;
+ endpoint->max_txqdepth = service_connreq->max_send_qdepth;
+ endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
+ endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
+ endpoint->ep_callbacks = service_connreq->ep_callbacks;
+
+ skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
+ sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "Failed to allocate buf to send"
+ "service connect req\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ conn_msg = (struct htc_conn_svc_msg *)
+ skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
+ conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
+ conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
+ conn_msg->dl_pipeid = endpoint->dl_pipeid;
+ conn_msg->ul_pipeid = endpoint->ul_pipeid;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+ return -ETIMEDOUT;
+ }
+
+ *conn_rsp_epid = target->conn_rsp_epid;
+ return 0;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int htc_send(struct htc_target *target, struct sk_buff *skb)
+{
+ struct ath9k_htc_tx_ctl *tx_ctl;
+
+ tx_ctl = HTC_SKB_CB(skb);
+ return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid);
+}
+
+int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid)
+{
+ return htc_issue_send(target, skb, skb->len, 0, epid);
+}
+
+void htc_stop(struct htc_target *target)
+{
+ target->hif->stop(target->hif_dev);
+}
+
+void htc_start(struct htc_target *target)
+{
+ target->hif->start(target->hif_dev);
+}
+
+void htc_sta_drain(struct htc_target *target, u8 idx)
+{
+ target->hif->sta_drain(target->hif_dev, idx);
+}
+
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_frame_hdr *htc_hdr = NULL;
+
+ if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
+ goto ret;
+ }
+
+ if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
+ goto ret;
+ }
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ if (endpoint->ep_callbacks.tx) {
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
+ } else {
+ kfree_skb(skb);
+ }
+ }
+
+ return;
+ret:
+ kfree_skb(skb);
+}
+
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+ struct sk_buff *skb)
+{
+ uint32_t *pattern = (uint32_t *)skb->data;
+
+ switch (*pattern) {
+ case 0x33221199:
+ {
+ struct htc_panic_bad_vaddr *htc_panic;
+ htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+ htc_panic->exccause, htc_panic->pc,
+ htc_panic->badvaddr);
+ break;
+ }
+ case 0x33221299:
+ {
+ struct htc_panic_bad_epid *htc_panic;
+ htc_panic = (struct htc_panic_bad_epid *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "bad epid: 0x%08x\n", htc_panic->epid);
+ break;
+ }
+ default:
+ dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ break;
+ }
+}
+
+/*
+ * HTC Messages are handled directly here and the obtained SKB
+ * is freed.
+ *
+ * Service messages (Data, WMI) passed to the corresponding
+ * endpoint RX handlers, which have to free the SKB.
+ */
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id)
+{
+ struct htc_frame_hdr *htc_hdr;
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+ __be16 *msg_id;
+
+ if (!htc_handle || !skb)
+ return;
+
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ epid = htc_hdr->endpoint_id;
+
+ if (epid == 0x99) {
+ ath9k_htc_fw_panic_report(htc_handle, skb);
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid >= ENDPOINT_MAX) {
+ if (pipe_id != USB_REG_IN_PIPE)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid == ENDPOINT0) {
+
+ /* Handle trailer */
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ /* Move past the Watchdog pattern */
+ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ }
+
+ /* Get the message ID */
+ msg_id = (__be16 *) ((void *) htc_hdr +
+ sizeof(struct htc_frame_hdr));
+
+ /* Now process HTC messages */
+ switch (be16_to_cpu(*msg_id)) {
+ case HTC_MSG_READY_ID:
+ htc_process_target_rdy(htc_handle, htc_hdr);
+ break;
+ case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ htc_process_conn_rsp(htc_handle, htc_hdr);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+
+ } else {
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
+ skb_trim(skb, len - htc_hdr->control[0]);
+
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ endpoint = &htc_handle->endpoint[epid];
+ if (endpoint->ep_callbacks.rx)
+ endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ skb, epid);
+ }
+}
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_target *target;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (!target)
+ return NULL;
+
+ init_completion(&target->target_wait);
+ init_completion(&target->cmd_wait);
+
+ target->hif = hif;
+ target->hif_dev = hif_handle;
+ target->dev = dev;
+
+ /* Assign control endpoint pipe IDs */
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->ul_pipeid = hif->control_ul_pipe;
+ endpoint->dl_pipeid = hif->control_dl_pipe;
+
+ atomic_set(&target->tgt_ready, 0);
+
+ return target;
+}
+
+void ath9k_htc_hw_free(struct htc_target *htc)
+{
+ kfree(htc);
+}
+
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid,
+ char *product, u32 drv_info)
+{
+ if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
+ pr_err("Failed to initialize the device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
+{
+ if (target)
+ ath9k_htc_disconnect_device(target, hot_unplug);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 000000000..06474ccc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HST_H
+#define HTC_HST_H
+
+struct ath9k_htc_priv;
+struct htc_target;
+struct ath9k_htc_tx_ctl;
+
+enum ath9k_hif_transports {
+ ATH9K_HIF_USB,
+};
+
+struct ath9k_htc_hif {
+ struct list_head list;
+ const enum ath9k_hif_transports transport;
+ const char *name;
+
+ u8 control_dl_pipe;
+ u8 control_ul_pipe;
+
+ void (*start) (void *hif_handle);
+ void (*stop) (void *hif_handle);
+ void (*sta_drain) (void *hif_handle, u8 idx);
+ int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf);
+};
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT0 = 0,
+ ENDPOINT1 = 1,
+ ENDPOINT2 = 2,
+ ENDPOINT3 = 3,
+ ENDPOINT4 = 4,
+ ENDPOINT5 = 5,
+ ENDPOINT6 = 6,
+ ENDPOINT7 = 7,
+ ENDPOINT8 = 8,
+ ENDPOINT_MAX = 22
+};
+
+/* Htc frame hdr flags */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1)
+
+struct htc_frame_hdr {
+ u8 endpoint_id;
+ u8 flags;
+ __be16 payload_len;
+ u8 control[4];
+} __packed;
+
+struct htc_ready_msg {
+ __be16 message_id;
+ __be16 credits;
+ __be16 credit_size;
+ u8 max_endpoints;
+ u8 pad;
+} __packed;
+
+struct htc_config_pipe_msg {
+ __be16 message_id;
+ u8 pipe_id;
+ u8 credits;
+} __packed;
+
+struct htc_panic_bad_vaddr {
+ __be32 pattern;
+ __be32 exccause;
+ __be32 pc;
+ __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+ __be32 pattern;
+ __be32 epid;
+} __packed;
+
+struct htc_ep_callbacks {
+ void *priv;
+ void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
+ void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
+};
+
+struct htc_endpoint {
+ u16 service_id;
+
+ struct htc_ep_callbacks ep_callbacks;
+ u32 max_txqdepth;
+ int max_msglen;
+
+ u8 ul_pipeid;
+ u8 dl_pipeid;
+};
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
+#define HTC_CONTROL_BUFFER_SIZE \
+ (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
+
+#define HTC_OP_START_WAIT BIT(0)
+#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
+
+struct htc_target {
+ void *hif_dev;
+ struct ath9k_htc_priv *drv_priv;
+ struct device *dev;
+ struct ath9k_htc_hif *hif;
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct completion target_wait;
+ struct completion cmd_wait;
+ struct list_head list;
+ enum htc_endpoint_id conn_rsp_epid;
+ u16 credits;
+ u16 credit_size;
+ u8 htc_flags;
+ atomic_t tgt_ready;
+};
+
+enum htc_msg_id {
+ HTC_MSG_READY_ID = 1,
+ HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
+ HTC_MSG_SETUP_COMPLETE_ID,
+ HTC_MSG_CONFIG_PIPE_ID,
+ HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
+};
+
+struct htc_service_connreq {
+ u16 service_id;
+ u16 con_flags;
+ u32 max_send_qdepth;
+ struct htc_ep_callbacks ep_callbacks;
+};
+
+/* Current service IDs */
+
+enum htc_service_group_ids{
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
+
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
+
+struct htc_conn_svc_msg {
+ __be16 msg_id;
+ __be16 service_id;
+ __be16 con_flags;
+ u8 dl_pipeid;
+ u8 ul_pipeid;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+#define HTC_SERVICE_NO_RESOURCES 3
+#define HTC_SERVICE_NO_MORE_EP 4
+
+struct htc_conn_svc_rspmsg {
+ __be16 msg_id;
+ __be16 service_id;
+ u8 status;
+ u8 endpoint_id;
+ __be16 max_msg_len;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_comp_msg {
+ __be16 msg_id;
+} __packed;
+
+int htc_init(struct htc_target *target);
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_eid);
+int htc_send(struct htc_target *target, struct sk_buff *skb);
+int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid);
+void htc_stop(struct htc_target *target);
+void htc_start(struct htc_target *target);
+void htc_sta_drain(struct htc_target *target, u8 idx);
+
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id);
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok);
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev);
+void ath9k_htc_hw_free(struct htc_target *htc);
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid, char *product,
+ u32 drv_info);
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
+
+#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 000000000..232339b05
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_HW_OPS_H
+#define ATH9K_HW_OPS_H
+
+#include "hw.h"
+
+/* Hardware core and driver accessible callbacks */
+
+static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
+ bool power_off)
+{
+ if (!ah->aspm_enabled)
+ return;
+
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, power_off);
+}
+
+static inline void ath9k_hw_rxena(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->rx_enable(ah);
+}
+
+static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
+ u32 link)
+{
+ ath9k_hw_ops(ah)->set_desc_link(ds, link);
+}
+
+static inline int ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal)
+{
+ return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
+}
+
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p)
+{
+ return ath9k_hw_ops(ah)->get_isr(ah, masked, sync_cause_p);
+}
+
+static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_info *i)
+{
+ return ath9k_hw_ops(ah)->set_txdesc(ah, ds, i);
+}
+
+static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
+}
+
+static inline int ath9k_hw_get_duration(struct ath_hw *ah, const void *ds,
+ int index)
+{
+ return ath9k_hw_ops(ah)->get_duration(ah, ds, index);
+}
+
+static inline void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ ath9k_hw_ops(ah)->antdiv_comb_conf_get(ah, antconf);
+}
+
+static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf)
+{
+ ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
+}
+
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+ ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+ if (ath9k_hw_ops(ah)->tx99_set_txpower)
+ ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+ if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
+ ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
+}
+
+static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
+{
+ if (ath9k_hw_private_ops(ah)->is_aic_enabled)
+ return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
+
+ return false;
+}
+
+#endif
+
+/* Private hardware call ops */
+
+static inline void ath9k_hw_init_hang_checks(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_hang_checks(ah);
+}
+
+static inline bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_mac_hang(ah);
+}
+
+static inline bool ath9k_hw_detect_bb_hang(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->detect_bb_hang(ah);
+}
+
+/* PHY ops */
+
+static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
+}
+
+static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
+}
+
+static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ if (!ath9k_hw_private_ops(ah)->set_rf_regs)
+ return true;
+
+ return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
+}
+
+static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
+}
+
+static inline int ath9k_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
+}
+
+static inline void ath9k_olc_init(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->olc_init)
+ return;
+
+ return ath9k_hw_private_ops(ah)->olc_init(ah);
+}
+
+static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
+}
+
+static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
+}
+
+static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
+}
+
+static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_req(ah);
+}
+
+static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_done(ah);
+}
+
+static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->restore_chainmask)
+ return;
+
+ return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
+}
+
+static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
+}
+
+static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
+}
+
+static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *ini_reloaded)
+{
+ return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
+ ini_reloaded);
+}
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->set_radar_params)
+ return;
+
+ ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
+static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static inline u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static inline void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
+static inline void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
+#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
new file mode 100644
index 000000000..5e15e8e10
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -0,0 +1,3258 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio.h>
+#include <asm/unaligned.h>
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_mac.h"
+#include "ar9003_mci.h"
+#include "ar9003_phy.h"
+#include "ath9k.h"
+
+static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static void ath9k_hw_set_clockrate(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ unsigned int clockrate;
+
+ /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
+ clockrate = 117;
+ else if (!chan) /* should really check for CCK instead */
+ clockrate = ATH9K_CLOCK_RATE_CCK;
+ else if (IS_CHAN_2GHZ(chan))
+ clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
+ else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
+ clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (chan) {
+ if (IS_CHAN_HT40(chan))
+ clockrate *= 2;
+ if (IS_CHAN_HALF_RATE(chan))
+ clockrate /= 2;
+ if (IS_CHAN_QUARTER_RATE(chan))
+ clockrate /= 4;
+ }
+
+ common->clockrate = clockrate;
+}
+
+static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ return usecs * common->clockrate;
+}
+
+bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
+{
+ int i;
+
+ BUG_ON(timeout < AH_TIME_QUANTUM);
+
+ for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) {
+ if ((REG_READ(ah, reg) & mask) == val)
+ return true;
+
+ udelay(AH_TIME_QUANTUM);
+ }
+
+ ath_dbg(ath9k_hw_common(ah), ANY,
+ "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
+ timeout, reg, REG_READ(ah, reg), mask, val);
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_wait);
+
+void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
+ int hw_delay)
+{
+ hw_delay /= 10;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ hw_delay *= 2;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ hw_delay *= 4;
+
+ udelay(hw_delay + BASE_ACTIVATE_DELAY);
+}
+
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
+ int column, unsigned int *writecnt)
+{
+ int r;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+ for (r = 0; r < array->ia_rows; r++) {
+ REG_WRITE(ah, INI_RA(array, r, 0),
+ INI_RA(array, r, column));
+ DO_DELAY(*writecnt);
+ }
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
+{
+ u32 *tmp_reg_list, *tmp_data;
+ int i;
+
+ tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp_reg_list) {
+ dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
+ return;
+ }
+
+ tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp_data) {
+ dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
+ goto error_tmp_data;
+ }
+
+ for (i = 0; i < size; i++)
+ tmp_reg_list[i] = array[i][0];
+
+ REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
+
+ for (i = 0; i < size; i++)
+ array[i][1] = tmp_data[i];
+
+ kfree(tmp_data);
+error_tmp_data:
+ kfree(tmp_reg_list);
+}
+
+u32 ath9k_hw_reverse_bits(u32 val, u32 n)
+{
+ u32 retval;
+ int i;
+
+ for (i = 0, retval = 0; i < n; i++) {
+ retval = (retval << 1) | (val & 1);
+ val >>= 1;
+ }
+ return retval;
+}
+
+u16 ath9k_hw_computetxtime(struct ath_hw *ah,
+ u8 phy, int kbps,
+ u32 frameLen, u16 rateix,
+ bool shortPreamble)
+{
+ u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
+
+ if (kbps == 0)
+ return 0;
+
+ switch (phy) {
+ case WLAN_RC_PHY_CCK:
+ phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
+ if (shortPreamble)
+ phyTime >>= 1;
+ numBits = frameLen << 3;
+ txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
+ break;
+ case WLAN_RC_PHY_OFDM:
+ if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) {
+ bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME_QUARTER
+ + OFDM_PREAMBLE_TIME_QUARTER
+ + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
+ } else if (ah->curchan &&
+ IS_CHAN_HALF_RATE(ah->curchan)) {
+ bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME_HALF +
+ OFDM_PREAMBLE_TIME_HALF
+ + (numSymbols * OFDM_SYMBOL_TIME_HALF);
+ } else {
+ bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
+ + (numSymbols * OFDM_SYMBOL_TIME);
+ }
+ break;
+ default:
+ ath_err(ath9k_hw_common(ah),
+ "Unknown phy %u (rate ix %u)\n", phy, rateix);
+ txTime = 0;
+ break;
+ }
+
+ return txTime;
+}
+EXPORT_SYMBOL(ath9k_hw_computetxtime);
+
+void ath9k_hw_get_channel_centers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct chan_centers *centers)
+{
+ int8_t extoff;
+
+ if (!IS_CHAN_HT40(chan)) {
+ centers->ctl_center = centers->ext_center =
+ centers->synth_center = chan->channel;
+ return;
+ }
+
+ if (IS_CHAN_HT40PLUS(chan)) {
+ centers->synth_center =
+ chan->channel + HT40_CHANNEL_CENTER_SHIFT;
+ extoff = 1;
+ } else {
+ centers->synth_center =
+ chan->channel - HT40_CHANNEL_CENTER_SHIFT;
+ extoff = -1;
+ }
+
+ centers->ctl_center =
+ centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
+ /* 25 MHz spacing is supported by hw but not on upper layers */
+ centers->ext_center =
+ centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
+}
+
+/******************/
+/* Chip Revisions */
+/******************/
+
+static void ath9k_hw_read_revisions(struct ath_hw *ah)
+{
+ u32 val;
+
+ if (ah->get_mac_revision)
+ ah->hw_version.macRev = ah->get_mac_revision();
+
+ switch (ah->hw_version.devid) {
+ case AR5416_AR9100_DEVID:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9100;
+ break;
+ case AR9300_DEVID_AR9330:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9330;
+ if (!ah->get_mac_revision) {
+ val = REG_READ(ah, AR_SREV);
+ ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
+ }
+ return;
+ case AR9300_DEVID_AR9340:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9340;
+ return;
+ case AR9300_DEVID_QCA955X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9550;
+ return;
+ case AR9300_DEVID_AR953X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+ return;
+ case AR9300_DEVID_QCA956X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+ }
+
+ val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+
+ if (val == 0xFF) {
+ val = REG_READ(ah, AR_SREV);
+ ah->hw_version.macVersion =
+ (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
+ ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ ah->is_pciexpress = true;
+ else
+ ah->is_pciexpress = (val &
+ AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
+ } else {
+ if (!AR_SREV_9100(ah))
+ ah->hw_version.macVersion = MS(val, AR_SREV_VERSION);
+
+ ah->hw_version.macRev = val & AR_SREV_REVISION;
+
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
+ ah->is_pciexpress = true;
+ }
+}
+
+/************************************/
+/* HW Attach, Detach, Init Routines */
+/************************************/
+
+static void ath9k_hw_disablepcie(struct ath_hw *ah)
+{
+ if (!AR_SREV_5416(ah))
+ return;
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
+
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+}
+
+/* This should work for all families including legacy */
+static bool ath9k_hw_chip_test(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regAddr[2] = { AR_STA_ID0 };
+ u32 regHold[2];
+ static const u32 patternData[4] = {
+ 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
+ };
+ int i, j, loop_max;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ loop_max = 2;
+ regAddr[1] = AR_PHY_BASE + (8 << 2);
+ } else
+ loop_max = 1;
+
+ for (i = 0; i < loop_max; i++) {
+ u32 addr = regAddr[i];
+ u32 wrData, rdData;
+
+ regHold[i] = REG_READ(ah, addr);
+ for (j = 0; j < 0x100; j++) {
+ wrData = (j << 16) | j;
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (rdData != wrData) {
+ ath_err(common,
+ "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ addr, wrData, rdData);
+ return false;
+ }
+ }
+ for (j = 0; j < 4; j++) {
+ wrData = patternData[j];
+ REG_WRITE(ah, addr, wrData);
+ rdData = REG_READ(ah, addr);
+ if (wrData != rdData) {
+ ath_err(common,
+ "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ addr, wrData, rdData);
+ return false;
+ }
+ }
+ REG_WRITE(ah, regAddr[i], regHold[i]);
+ }
+ udelay(100);
+
+ return true;
+}
+
+static void ath9k_hw_init_config(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ah->config.dma_beacon_response_time = 1;
+ ah->config.sw_beacon_response_time = 6;
+ ah->config.cwm_ignore_extcca = 0;
+ ah->config.analog_shiftreg = 1;
+
+ ah->config.rx_intr_mitigation = true;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.rimt_last = 500;
+ ah->config.rimt_first = 2000;
+ } else {
+ ah->config.rimt_last = 250;
+ ah->config.rimt_first = 700;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ ah->config.pll_pwrsave = 7;
+
+ /*
+ * We need this for PCI devices only (Cardbus, PCI, miniPCI)
+ * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
+ * This means we use it for all AR5416 devices, and the few
+ * minor PCI AR9280 devices out there.
+ *
+ * Serialization is required because these devices do not handle
+ * well the case of two concurrent reads/writes due to the latency
+ * involved. During one read/write another read/write can be issued
+ * on another CPU while the previous read/write may still be working
+ * on our hardware, if we hit this case the hardware poops in a loop.
+ * We prevent this by serializing reads and writes.
+ *
+ * This issue is not present on PCI-Express devices or pre-AR5416
+ * devices (legacy, 802.11abg).
+ */
+ if (num_possible_cpus() > 1)
+ ah->config.serialize_regmode = SER_REG_MODE_AUTO;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+ !ah->is_pciexpress)) {
+ ah->config.serialize_regmode = SER_REG_MODE_ON;
+ } else {
+ ah->config.serialize_regmode = SER_REG_MODE_OFF;
+ }
+ }
+
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
+ ah->config.serialize_regmode);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
+ else
+ ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
+}
+
+static void ath9k_hw_init_defaults(struct ath_hw *ah)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+
+ regulatory->country_code = CTRY_DEFAULT;
+ regulatory->power_limit = MAX_RATE_POWER;
+
+ ah->hw_version.magic = AR5416_MAGIC;
+ ah->hw_version.subvendorid = 0;
+
+ ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE |
+ AR_STA_ID1_MCAST_KSRCH;
+ if (AR_SREV_9100(ah))
+ ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
+
+ ah->slottime = ATH9K_SLOT_TIME_9;
+ ah->globaltxtimeout = (u32) -1;
+ ah->power_mode = ATH9K_PM_UNDEFINED;
+ ah->htc_reset_init = true;
+
+ ah->tpc_enabled = false;
+
+ ah->ani_function = ATH9K_ANI_ALL;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
+ else
+ ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
+}
+
+static int ath9k_hw_init_macaddr(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 sum;
+ int i;
+ u16 eeval;
+ static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
+
+ sum = 0;
+ for (i = 0; i < 3; i++) {
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
+ sum += eeval;
+ common->macaddr[2 * i] = eeval >> 8;
+ common->macaddr[2 * i + 1] = eeval & 0xff;
+ }
+ if (!is_valid_ether_addr(common->macaddr)) {
+ ath_err(common,
+ "eeprom contains invalid mac address: %pM\n",
+ common->macaddr);
+
+ random_ether_addr(common->macaddr);
+ ath_err(common,
+ "random mac address will be used: %pM\n",
+ common->macaddr);
+ }
+
+ return 0;
+}
+
+static int ath9k_hw_post_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ecode;
+
+ if (common->bus_ops->ath_bus_type != ATH_USB) {
+ if (!ath9k_hw_chip_test(ah))
+ return -ENODEV;
+ }
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ecode = ar9002_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+ }
+
+ ecode = ath9k_hw_eeprom_init(ah);
+ if (ecode != 0)
+ return ecode;
+
+ ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
+ ah->eep_ops->get_eeprom_ver(ah),
+ ah->eep_ops->get_eeprom_rev(ah));
+
+ ath9k_hw_ani_init(ah);
+
+ /*
+ * EEPROM needs to be initialized before we do this.
+ * This is required for regulatory compliance.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ if ((regdmn & 0xF0) == CTL_FCC) {
+ ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ;
+ ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ;
+ }
+ }
+
+ return 0;
+}
+
+static int ath9k_hw_attach_ops(struct ath_hw *ah)
+{
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return ar9002_hw_attach_ops(ah);
+
+ ar9003_hw_attach_ops(ah);
+ return 0;
+}
+
+/* Called for all hardware families */
+static int __ath9k_hw_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int r = 0;
+
+ ath9k_hw_read_revisions(ah);
+
+ switch (ah->hw_version.macVersion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ case AR_SREV_VERSION_9300:
+ case AR_SREV_VERSION_9330:
+ case AR_SREV_VERSION_9485:
+ case AR_SREV_VERSION_9340:
+ case AR_SREV_VERSION_9462:
+ case AR_SREV_VERSION_9550:
+ case AR_SREV_VERSION_9565:
+ case AR_SREV_VERSION_9531:
+ case AR_SREV_VERSION_9561:
+ break;
+ default:
+ ath_err(common,
+ "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
+ ah->hw_version.macVersion, ah->hw_version.macRev);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * We need to do this to avoid RMW of this register. We cannot
+ * read the reg when chip is asleep.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+ }
+
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
+ ath_err(common, "Couldn't reset chip\n");
+ return -EIO;
+ }
+
+ if (AR_SREV_9565(ah)) {
+ ah->WARegVal |= AR_WA_BIT22;
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ }
+
+ ath9k_hw_init_defaults(ah);
+ ath9k_hw_init_config(ah);
+
+ r = ath9k_hw_attach_ops(ah);
+ if (r)
+ return r;
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
+ ath_err(common, "Couldn't wakeup chip\n");
+ return -EIO;
+ }
+
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9330(ah) || AR_SREV_9550(ah))
+ ah->is_pciexpress = false;
+
+ ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
+ ath9k_hw_init_cal_settings(ah);
+
+ if (!ah->is_pciexpress)
+ ath9k_hw_disablepcie(ah);
+
+ r = ath9k_hw_post_init(ah);
+ if (r)
+ return r;
+
+ ath9k_hw_init_mode_gain_regs(ah);
+ r = ath9k_hw_fill_cap_info(ah);
+ if (r)
+ return r;
+
+ r = ath9k_hw_init_macaddr(ah);
+ if (r) {
+ ath_err(common, "Failed to initialize MAC address\n");
+ return r;
+ }
+
+ ath9k_hw_init_hang_checks(ah);
+
+ common->state = ATH_HW_INITIALIZED;
+
+ return 0;
+}
+
+int ath9k_hw_init(struct ath_hw *ah)
+{
+ int ret;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
+ switch (ah->hw_version.devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR5416_AR9100_DEVID:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ case AR9285_DEVID_PCIE:
+ case AR9287_DEVID_PCI:
+ case AR9287_DEVID_PCIE:
+ case AR2427_DEVID_PCIE:
+ case AR9300_DEVID_PCIE:
+ case AR9300_DEVID_AR9485_PCIE:
+ case AR9300_DEVID_AR9330:
+ case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_QCA955X:
+ case AR9300_DEVID_AR9580:
+ case AR9300_DEVID_AR9462:
+ case AR9485_DEVID_AR1111:
+ case AR9300_DEVID_AR9565:
+ case AR9300_DEVID_AR953X:
+ case AR9300_DEVID_QCA956X:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ break;
+ ath_err(common, "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
+ return -EOPNOTSUPP;
+ }
+
+ ret = __ath9k_hw_init(ah);
+ if (ret) {
+ ath_err(common,
+ "Unable to initialize hardware; initialization status: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath_dynack_init(ah);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_init);
+
+static void ath9k_hw_init_qos(struct ath_hw *ah)
+{
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
+ REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
+
+ REG_WRITE(ah, AR_QOS_NO_ACK,
+ SM(2, AR_QOS_NO_ACK_TWO_BIT) |
+ SM(5, AR_QOS_NO_ACK_BIT_OFF) |
+ SM(0, AR_QOS_NO_ACK_BYTE_OFF));
+
+ REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
+ REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i = 0;
+
+ REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+ udelay(100);
+ REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
+ udelay(100);
+
+ if (WARN_ON_ONCE(i >= 100)) {
+ ath_err(common, "PLL4 meaurement not done\n");
+ break;
+ }
+
+ i++;
+ }
+
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+static void ath9k_hw_init_pll(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = ath9k_hw_compute_pll_control(ah, chan);
+
+ if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
+ /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KD, 0x40);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KI, 0x4);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+ AR_CH0_BB_DPLL1_REFDIV, 0x5);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+ AR_CH0_BB_DPLL1_NINI, 0x58);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
+ AR_CH0_BB_DPLL1_NFRAC, 0x0);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_BB_DPLL2_OUTDIV, 0x1);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1);
+
+ /* program BB PLL phase_shift to 0x6 */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_BB_DPLL2_PLL_PWD, 0x0);
+ udelay(1000);
+ } else if (AR_SREV_9330(ah)) {
+ u32 ddr_dpll2, pll_control2, kd;
+
+ if (ah->is_clk_25mhz) {
+ ddr_dpll2 = 0x18e82f01;
+ pll_control2 = 0xe04a3d;
+ kd = 0x1d;
+ } else {
+ ddr_dpll2 = 0x19e82f01;
+ pll_control2 = 0x886666;
+ kd = 0x3d;
+ }
+
+ /* program DDR PLL ki and kd value */
+ REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2);
+
+ /* program DDR PLL phase_shift */
+ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, 0x1);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ pll | AR_RTC_9300_PLL_BYPASS);
+ udelay(1000);
+
+ /* program refdiv, nint, frac to RTC register */
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2);
+
+ /* program BB PLL kd and ki value */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06);
+
+ /* program BB PLL phase_shift */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) {
+ u32 regval, pll2_divint, pll2_divfrac, refdiv;
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ pll | AR_RTC_9300_SOC_PLL_BYPASS);
+ udelay(1000);
+
+ REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16);
+ udelay(100);
+
+ if (ah->is_clk_25mhz) {
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
+ pll2_divint = 0x1c;
+ pll2_divfrac = 0xa3d2;
+ refdiv = 1;
+ } else {
+ pll2_divint = 0x54;
+ pll2_divfrac = 0x1eb85;
+ refdiv = 3;
+ }
+ } else {
+ if (AR_SREV_9340(ah)) {
+ pll2_divint = 88;
+ pll2_divfrac = 0;
+ refdiv = 5;
+ } else {
+ pll2_divint = 0x11;
+ pll2_divfrac = (AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah)) ?
+ 0x26665 : 0x26666;
+ refdiv = 1;
+ }
+ }
+
+ regval = REG_READ(ah, AR_PHY_PLL_MODE);
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
+ regval |= (0x1 << 22);
+ else
+ regval |= (0x1 << 16);
+ REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
+ udelay(100);
+
+ REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) |
+ (pll2_divint << 18) | pll2_divfrac);
+ udelay(100);
+
+ regval = REG_READ(ah, AR_PHY_PLL_MODE);
+ if (AR_SREV_9340(ah))
+ regval = (regval & 0x80071fff) |
+ (0x1 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x18 << 19);
+ else if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
+ regval = (regval & 0x01c00fff) |
+ (0x1 << 31) |
+ (0x2 << 29) |
+ (0xa << 25) |
+ (0x1 << 19);
+
+ if (AR_SREV_9531(ah))
+ regval |= (0x6 << 12);
+ } else
+ regval = (regval & 0x80071fff) |
+ (0x3 << 30) |
+ (0x1 << 13) |
+ (0x4 << 26) |
+ (0x60 << 19);
+ REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
+
+ if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
+ else
+ REG_WRITE(ah, AR_PHY_PLL_MODE,
+ REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
+ udelay(1000);
+ }
+
+ if (AR_SREV_9565(ah))
+ pll |= 0x40000;
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
+
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah))
+ udelay(1000);
+
+ /* Switch the core clock for ar9271 to 117Mhz */
+ if (AR_SREV_9271(ah)) {
+ udelay(500);
+ REG_WRITE(ah, 0x50040, 0x304);
+ }
+
+ udelay(RTC_PLL_SETTLE_DELAY);
+
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+}
+
+static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
+ enum nl80211_iftype opmode)
+{
+ u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 imr_reg = AR_IMR_TXERR |
+ AR_IMR_TXURN |
+ AR_IMR_RXERR |
+ AR_IMR_RXORN |
+ AR_IMR_BCNMISC;
+
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
+ sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ imr_reg |= AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK_LP;
+
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK;
+ }
+
+ if (ah->config.tx_intr_mitigation)
+ imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
+ else
+ imr_reg |= AR_IMR_TXOK;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_IMR, imr_reg);
+ ah->imrs2_reg |= AR_IMR_S2_GTT;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ }
+}
+
+static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us - 2);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val);
+}
+
+void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
+}
+
+void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
+}
+
+void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
+}
+
+static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
+{
+ if (tu > 0xFFFF) {
+ ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
+ tu);
+ ah->globaltxtimeout = (u32) -1;
+ return false;
+ } else {
+ REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
+ ah->globaltxtimeout = tu;
+ return true;
+ }
+}
+
+void ath9k_hw_init_global_settings(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ath9k_channel *chan = ah->curchan;
+ int acktimeout, ctstimeout, ack_offset = 0;
+ int slottime;
+ int sifstime;
+ int rx_lat = 0, tx_lat = 0, eifs = 0;
+ u32 reg;
+
+ ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
+ ah->misc_mode);
+
+ if (!chan)
+ return;
+
+ if (ah->misc_mode != 0)
+ REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rx_lat = 41;
+ else
+ rx_lat = 37;
+ tx_lat = 54;
+
+ if (IS_CHAN_5GHZ(chan))
+ sifstime = 16;
+ else
+ sifstime = 10;
+
+ if (IS_CHAN_HALF_RATE(chan)) {
+ eifs = 175;
+ rx_lat *= 2;
+ tx_lat *= 2;
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ tx_lat += 11;
+
+ sifstime = 32;
+ ack_offset = 16;
+ slottime = 13;
+ } else if (IS_CHAN_QUARTER_RATE(chan)) {
+ eifs = 340;
+ rx_lat = (rx_lat * 4) - 1;
+ tx_lat *= 4;
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ tx_lat += 22;
+
+ sifstime = 64;
+ ack_offset = 32;
+ slottime = 21;
+ } else {
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
+ eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
+ reg = AR_USEC_ASYNC_FIFO;
+ } else {
+ eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
+ common->clockrate;
+ reg = REG_READ(ah, AR_USEC);
+ }
+ rx_lat = MS(reg, AR_USEC_RX_LAT);
+ tx_lat = MS(reg, AR_USEC_TX_LAT);
+
+ slottime = ah->slottime;
+ }
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime += 3 * ah->coverage_class;
+ acktimeout = slottime + sifstime + ack_offset;
+ ctstimeout = acktimeout;
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+ * initval's 64us ack timeout value. Use 48us for the CTS timeout.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+ if (IS_CHAN_2GHZ(chan) &&
+ !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
+ acktimeout += 64 - sifstime - ah->slottime;
+ ctstimeout += 48 - sifstime - ah->slottime;
+ }
+
+ if (ah->dynack.enabled) {
+ acktimeout = ah->dynack.ackto;
+ ctstimeout = acktimeout;
+ slottime = (acktimeout - 3) / 2;
+ } else {
+ ah->dynack.ackto = acktimeout;
+ }
+
+ ath9k_hw_set_sifs_time(ah, sifstime);
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, ctstimeout);
+ if (ah->globaltxtimeout != (u32) -1)
+ ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
+
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs));
+ REG_RMW(ah, AR_USEC,
+ (common->clockrate - 1) |
+ SM(rx_lat, AR_USEC_RX_LAT) |
+ SM(tx_lat, AR_USEC_TX_LAT),
+ AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC);
+
+}
+EXPORT_SYMBOL(ath9k_hw_init_global_settings);
+
+void ath9k_hw_deinit(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (common->state < ATH_HW_INITIALIZED)
+ return;
+
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
+}
+EXPORT_SYMBOL(ath9k_hw_deinit);
+
+/*******/
+/* INI */
+/*******/
+
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
+{
+ u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
+
+ if (IS_CHAN_2GHZ(chan))
+ ctl |= CTL_11G;
+ else
+ ctl |= CTL_11A;
+
+ return ctl;
+}
+
+/****************************************/
+/* Reset and Channel Switching Routines */
+/****************************************/
+
+static inline void ath9k_hw_set_dma(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int txbuf_size;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /*
+ * set AHB_MODE not to do cacheline prefetches
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
+
+ /*
+ * let mac dma reads be in 128 byte chunks
+ */
+ REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ /*
+ * Restore TX Trigger Level to its pre-reset value.
+ * The initial value depends on whether aggregation is enabled, and is
+ * adjusted whenever underruns are detected.
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /*
+ * let mac dma writes be in 128 byte chunks
+ */
+ REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
+
+ /*
+ * Setup receive FIFO threshold to hold off TX activities
+ */
+ REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+ }
+
+ /*
+ * reduce the number of usable entries in PCU TXBUF to avoid
+ * wrap around issues.
+ */
+ if (AR_SREV_9285(ah)) {
+ /* For AR9285 the number of Fifos are reduced to half.
+ * So set the usable tx buf size also to half to
+ * avoid data/delimiter underruns
+ */
+ txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE;
+ } else if (AR_SREV_9340_13_OR_LATER(ah)) {
+ /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */
+ txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE;
+ } else {
+ txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE;
+ }
+
+ if (!AR_SREV_9271(ah))
+ REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+
+static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
+{
+ u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
+ u32 set = AR_STA_ID1_KSRCH_MODE;
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ switch (opmode) {
+ case NL80211_IFTYPE_ADHOC:
+ if (!AR_SREV_9340_13(ah)) {
+ set |= AR_STA_ID1_ADHOC;
+ REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ }
+ /* fall through */
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ set |= AR_STA_ID1_STA_AP;
+ /* fall through */
+ case NL80211_IFTYPE_STATION:
+ REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
+ break;
+ default:
+ if (!ah->is_monitoring)
+ set = 0;
+ break;
+ }
+ REG_RMW(ah, AR_STA_ID1, set, mask);
+ REG_RMW_BUFFER_FLUSH(ah);
+}
+
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent)
+{
+ u32 coef_exp, coef_man;
+
+ for (coef_exp = 31; coef_exp > 0; coef_exp--)
+ if ((coef_scaled >> coef_exp) & 0x1)
+ break;
+
+ coef_exp = 14 - (coef_exp - COEF_SCALE_S);
+
+ coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
+
+ *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
+ *coef_exponent = coef_exp - 16;
+}
+
+/* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues.
+ */
+static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
+{
+ int i, npend = 0;
+
+ for (i = 0; i < AR_NUM_QCU; i++) {
+ npend = ath9k_hw_numtxpending(ah, i);
+ if (npend)
+ break;
+ }
+
+ if (ah->external_reset &&
+ (npend || type == ATH9K_RESET_COLD)) {
+ int reset_err = 0;
+
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "reset MAC via external reset\n");
+
+ reset_err = ah->external_reset();
+ if (reset_err) {
+ ath_err(ath9k_hw_common(ah),
+ "External reset failed, err=%d\n",
+ reset_err);
+ return false;
+ }
+
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+ }
+
+ return true;
+}
+
+static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
+{
+ u32 rst_flags;
+ u32 tmpReg;
+
+ if (AR_SREV_9100(ah)) {
+ REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
+ AR_RTC_DERIVED_CLK_PERIOD, 1);
+ (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ if (AR_SREV_9100(ah)) {
+ rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
+ AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
+ } else {
+ tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if (AR_SREV_9340(ah))
+ tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT;
+ else
+ tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT;
+
+ if (tmpReg) {
+ u32 val;
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+
+ val = AR_RC_HOSTIF;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ val |= AR_RC_AHB;
+ REG_WRITE(ah, AR_RC, val);
+
+ } else if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB);
+
+ rst_flags = AR_RTC_RC_MAC_WARM;
+ if (type == ATH9K_RESET_COLD)
+ rst_flags |= AR_RTC_RC_MAC_COLD;
+ }
+
+ if (AR_SREV_9330(ah)) {
+ if (!ath9k_hw_ar9330_reset_war(ah, type))
+ return false;
+ }
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_check_gpm_offset(ah);
+
+ REG_WRITE(ah, AR_RTC_RC, rst_flags);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ udelay(50);
+ else if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(100);
+
+ REG_WRITE(ah, AR_RTC_RC, 0);
+ if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
+ return false;
+ }
+
+ if (!AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_RC, 0);
+
+ if (AR_SREV_9100(ah))
+ udelay(50);
+
+ return true;
+}
+
+static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
+{
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ AR_RTC_FORCE_WAKE_ON_INT);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB);
+
+ REG_WRITE(ah, AR_RTC_RESET, 0);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ udelay(2);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, 0);
+
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+
+ if (!ath9k_hw_wait(ah,
+ AR_RTC_STATUS,
+ AR_RTC_STATUS_M,
+ AR_RTC_STATUS_ON,
+ AH_WAIT_TIMEOUT)) {
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
+ return false;
+ }
+
+ return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
+}
+
+static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
+{
+ bool ret = false;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
+
+ if (!ah->reset_power_on)
+ type = ATH9K_RESET_POWER_ON;
+
+ switch (type) {
+ case ATH9K_RESET_POWER_ON:
+ ret = ath9k_hw_set_reset_power_on(ah);
+ if (ret)
+ ah->reset_power_on = true;
+ break;
+ case ATH9K_RESET_WARM:
+ case ATH9K_RESET_COLD:
+ ret = ath9k_hw_set_reset(ah, type);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static bool ath9k_hw_chip_reset(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int reset_type = ATH9K_RESET_WARM;
+
+ if (AR_SREV_9280(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ reset_type = ATH9K_RESET_POWER_ON;
+ else
+ reset_type = ATH9K_RESET_COLD;
+ } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
+ (REG_READ(ah, AR_CR) & AR_CR_RXE))
+ reset_type = ATH9K_RESET_COLD;
+
+ if (!ath9k_hw_set_reset_reg(ah, reset_type))
+ return false;
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return false;
+
+ ah->chip_fullsleep = false;
+
+ if (AR_SREV_9330(ah))
+ ar9003_hw_internal_regulator_apply(ah);
+ ath9k_hw_init_pll(ah, chan);
+
+ return true;
+}
+
+static bool ath9k_hw_channel_change(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ bool band_switch = false, mode_diff = false;
+ u8 ini_reloaded = 0;
+ u32 qnum;
+ int r;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
+ u32 flags_diff = chan->channelFlags ^ ah->curchan->channelFlags;
+ band_switch = !!(flags_diff & CHANNEL_5GHZ);
+ mode_diff = !!(flags_diff & ~CHANNEL_HT);
+ }
+
+ for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
+ if (ath9k_hw_numtxpending(ah, qnum)) {
+ ath_dbg(common, QUEUE,
+ "Transmit frames pending on queue %d\n", qnum);
+ return false;
+ }
+ }
+
+ if (!ath9k_hw_rfbus_req(ah)) {
+ ath_err(common, "Could not kill baseband RX\n");
+ return false;
+ }
+
+ if (band_switch || mode_diff) {
+ ath9k_hw_mark_phy_inactive(ah);
+ udelay(5);
+
+ if (band_switch)
+ ath9k_hw_init_pll(ah, chan);
+
+ if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
+ ath_err(common, "Failed to do fast channel change\n");
+ return false;
+ }
+ }
+
+ ath9k_hw_set_channel_regs(ah, chan);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
+ if (r) {
+ ath_err(common, "Failed to set channel\n");
+ return false;
+ }
+ ath9k_hw_set_clockrate(ah);
+ ath9k_hw_apply_txpower(ah, chan, false);
+
+ ath9k_hw_set_delta_slope(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
+
+ if (band_switch || ini_reloaded)
+ ah->eep_ops->set_board_values(ah, chan);
+
+ ath9k_hw_init_bb(ah, chan);
+ ath9k_hw_rfbus_done(ah);
+
+ if (band_switch || ini_reloaded) {
+ ah->ah_flags |= AH_FASTCC;
+ ath9k_hw_init_cal(ah, chan);
+ ah->ah_flags &= ~AH_FASTCC;
+ }
+
+ return true;
+}
+
+static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
+{
+ u32 gpio_mask = ah->gpio_mask;
+ int i;
+
+ for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
+ if (!(gpio_mask & 1))
+ continue;
+
+ ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+ }
+}
+
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ val = REG_READ(ah, AR_NAV);
+ if (val != 0xdeadbeef && val > 0x7fff) {
+ ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+ REG_WRITE(ah, AR_NAV, 0);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
+bool ath9k_hw_check_alive(struct ath_hw *ah)
+{
+ int count = 50;
+ u32 reg, last_val;
+
+ if (AR_SREV_9300(ah))
+ return !ath9k_hw_detect_mac_hang(ah);
+
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ return true;
+
+ last_val = REG_READ(ah, AR_OBS_BUS_1);
+ do {
+ reg = REG_READ(ah, AR_OBS_BUS_1);
+ if (reg != last_val)
+ return true;
+
+ udelay(1);
+ last_val = reg;
+ if ((reg & 0x7E7FFFEF) == 0x00702400)
+ continue;
+
+ switch (reg & 0x7E000B00) {
+ case 0x1E000000:
+ case 0x52000B00:
+ case 0x18000B00:
+ continue;
+ default:
+ return true;
+ }
+ } while (count-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_check_alive);
+
+static void ath9k_hw_init_mfp(struct ath_hw *ah)
+{
+ /* Setup MFP options for CCMP */
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
+ * frames when constructing CCMP AAD. */
+ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
+ 0xc7ff);
+ if (AR_SREV_9271(ah) || AR_DEVID_7010(ah))
+ ah->sw_mgmt_crypto_tx = true;
+ else
+ ah->sw_mgmt_crypto_tx = false;
+ ah->sw_mgmt_crypto_rx = false;
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ /* Disable hardware crypto for management frames */
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
+ } else {
+ ah->sw_mgmt_crypto_tx = true;
+ ah->sw_mgmt_crypto_rx = true;
+ }
+}
+
+static void ath9k_hw_reset_opmode(struct ath_hw *ah,
+ u32 macStaId1, u32 saveDefAntenna)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_RMW(ah, AR_STA_ID1, macStaId1
+ | AR_STA_ID1_RTS_USE_DEF
+ | ah->sta_id1_defaults,
+ ~AR_STA_ID1_SADH_MASK);
+ ath_hw_setbssidmask(common);
+ REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
+ ath9k_hw_write_associd(ah);
+ REG_WRITE(ah, AR_ISR, ~0);
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+}
+
+static void ath9k_hw_init_queues(struct ath_hw *ah)
+{
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < AR_NUM_DCU; i++)
+ REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ah->intr_txqs = 0;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ ath9k_hw_resettxqueue(ah, i);
+}
+
+/*
+ * For big endian systems turn on swapping for descriptors
+ */
+static void ath9k_hw_init_desc(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9100(ah)) {
+ u32 mask;
+ mask = REG_READ(ah, AR_CFG);
+ if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
+ ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+ mask);
+ } else {
+ mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
+ REG_WRITE(ah, AR_CFG, mask);
+ ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+ REG_READ(ah, AR_CFG));
+ }
+ } else {
+ if (common->bus_ops->ath_bus_type == ATH_USB) {
+ /* Configure AR9271 target WLAN */
+ if (AR_SREV_9271(ah))
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+ else
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+ }
+#ifdef __BIG_ENDIAN
+ else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
+ REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
+ else
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+#endif
+ }
+}
+
+/*
+ * Fast channel change:
+ * (Change synthesizer based on channel freq without resetting chip)
+ */
+static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ int ret;
+
+ if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
+ goto fail;
+
+ if (ah->chip_fullsleep)
+ goto fail;
+
+ if (!ah->curchan)
+ goto fail;
+
+ if (chan->channel == ah->curchan->channel)
+ goto fail;
+
+ if ((ah->curchan->channelFlags | chan->channelFlags) &
+ (CHANNEL_HALF | CHANNEL_QUARTER))
+ goto fail;
+
+ /*
+ * If cross-band fcc is not supoprted, bail out if channelFlags differ.
+ */
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+ ((chan->channelFlags ^ ah->curchan->channelFlags) & ~CHANNEL_HT))
+ goto fail;
+
+ if (!ath9k_hw_check_alive(ah))
+ goto fail;
+
+ /*
+ * For AR9462, make sure that calibration data for
+ * re-using are present.
+ */
+ if (AR_SREV_9462(ah) && (ah->caldata &&
+ (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+ !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
+ goto fail;
+
+ ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
+ ah->curchan->channel, chan->channel);
+
+ ret = ath9k_hw_channel_change(ah, chan);
+ if (!ret)
+ goto fail;
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_2g5g_switch(ah, false);
+
+ ath9k_hw_loadnf(ah, ah->curchan);
+ ath9k_hw_start_nfcal(ah, true);
+
+ if (AR_SREV_9271(ah))
+ ar9002_hw_load_ani_reg(ah, chan);
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+{
+ struct timespec ts;
+ s64 usec;
+
+ if (!cur) {
+ getrawmonotonic(&ts);
+ cur = &ts;
+ }
+
+ usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000;
+ usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000;
+
+ return (u32) usec;
+}
+EXPORT_SYMBOL(ath9k_hw_get_tsf_offset);
+
+int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata, bool fastcc)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 saveLedState;
+ u32 saveDefAntenna;
+ u32 macStaId1;
+ u64 tsf = 0;
+ s64 usec = 0;
+ int r;
+ bool start_mci_reset = false;
+ bool save_fullsleep = ah->chip_fullsleep;
+
+ if (ath9k_hw_mci_is_enabled(ah)) {
+ start_mci_reset = ar9003_mci_start_reset(ah, chan);
+ if (start_mci_reset)
+ return 0;
+ }
+
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return -EIO;
+
+ if (ah->curchan && !ah->chip_fullsleep)
+ ath9k_hw_getnf(ah, ah->curchan);
+
+ ah->caldata = caldata;
+ if (caldata && (chan->channel != caldata->channel ||
+ chan->channelFlags != caldata->channelFlags)) {
+ /* Operating channel changed, reset channel calibration data */
+ memset(caldata, 0, sizeof(*caldata));
+ ath9k_init_nfcal_hist_buffer(ah, chan);
+ } else if (caldata) {
+ clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
+ }
+ ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
+
+ if (fastcc) {
+ r = ath9k_hw_do_fastcc(ah, chan);
+ if (!r)
+ return r;
+ }
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_stop_bt(ah, save_fullsleep);
+
+ saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
+ if (saveDefAntenna == 0)
+ saveDefAntenna = 1;
+
+ macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
+
+ /* Save TSF before chip reset, a cold reset clears it */
+ tsf = ath9k_hw_gettsf64(ah);
+ usec = ktime_to_us(ktime_get_raw());
+
+ saveLedState = REG_READ(ah, AR_CFG_LED) &
+ (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
+ AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
+
+ ath9k_hw_mark_phy_inactive(ah);
+
+ ah->paprd_table_write_done = false;
+
+ /* Only required on the first reset */
+ if (AR_SREV_9271(ah) && ah->htc_reset_init) {
+ REG_WRITE(ah,
+ AR9271_RESET_POWER_DOWN_CONTROL,
+ AR9271_RADIO_RF_RST);
+ udelay(50);
+ }
+
+ if (!ath9k_hw_chip_reset(ah, chan)) {
+ ath_err(common, "Chip reset failed\n");
+ return -EINVAL;
+ }
+
+ /* Only required on the first reset */
+ if (AR_SREV_9271(ah) && ah->htc_reset_init) {
+ ah->htc_reset_init = false;
+ REG_WRITE(ah,
+ AR9271_RESET_POWER_DOWN_CONTROL,
+ AR9271_GATE_MAC_CTL);
+ udelay(50);
+ }
+
+ /* Restore TSF */
+ usec = ktime_to_us(ktime_get_raw()) - usec;
+ ath9k_hw_settsf64(ah, tsf + usec);
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_enable_async_fifo(ah);
+
+ r = ath9k_hw_process_ini(ah, chan);
+ if (r)
+ return r;
+
+ ath9k_hw_set_rfmode(ah, chan);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
+ /*
+ * Some AR91xx SoC devices frequently fail to accept TSF writes
+ * right after the chip reset. When that happens, write a new
+ * value after the initvals have been applied, with an offset
+ * based on measured time difference
+ */
+ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
+ tsf += 1500;
+ ath9k_hw_settsf64(ah, tsf);
+ }
+
+ ath9k_hw_init_mfp(ah);
+
+ ath9k_hw_set_delta_slope(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
+ ah->eep_ops->set_board_values(ah, chan);
+
+ ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
+ if (r)
+ return r;
+
+ ath9k_hw_set_clockrate(ah);
+
+ ath9k_hw_init_queues(ah);
+ ath9k_hw_init_interrupt_masks(ah, ah->opmode);
+ ath9k_hw_ani_cache_ini_regs(ah);
+ ath9k_hw_init_qos(ah);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+
+ ath9k_hw_init_global_settings(ah);
+
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ ath9k_hw_set_dma(ah);
+
+ if (!ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_OBS, 8);
+
+ ENABLE_REG_RMW_BUFFER(ah);
+ if (ah->config.rx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
+ REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
+ }
+
+ if (ah->config.tx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
+ }
+ REG_RMW_BUFFER_FLUSH(ah);
+
+ ath9k_hw_init_bb(ah, chan);
+
+ if (caldata) {
+ clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+ clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+ }
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
+ return -EIO;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ ath9k_hw_restore_chainmask(ah);
+ REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ath9k_hw_gen_timer_start_tsf2(ah);
+
+ ath9k_hw_init_desc(ah);
+
+ if (ath9k_hw_btcoex_is_enabled(ah))
+ ath9k_hw_btcoex_enable(ah);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_check_bt(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_bb_watchdog_config(ah);
+
+ if (ah->config.hw_hang_checks & HW_PHYRESTART_CLC_WAR)
+ ar9003_hw_disable_phy_restart(ah);
+
+ ath9k_hw_apply_gpio_override(ah);
+
+ if (AR_SREV_9565(ah) && common->bt_ant_diversity)
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+ if (ah->hw->conf.radar_enabled) {
+ /* set HW specific DFS configuration */
+ ah->radar_conf.ext_channel = IS_CHAN_HT40(chan);
+ ath9k_hw_set_radar_params(ah);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_reset);
+
+/******************************/
+/* Power Management (Chipset) */
+/******************************/
+
+/*
+ * Notify Power Mgt is disabled in self-generated frames.
+ * If requested, force chip to sleep.
+ */
+static void ath9k_set_power_sleep(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
+ /* xxx Required for WLAN only case ? */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+ udelay(100);
+ }
+
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(100);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
+ REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ udelay(2);
+ }
+
+ /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+}
+
+/*
+ * Notify Power Management is enabled in self-generating
+ * frames. If request, set power mode of chip to
+ * auto/normal. Duration in units of 128us (1/8 TU).
+ */
+static void ath9k_set_power_network_sleep(struct ath_hw *ah)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_ON_INT);
+ } else {
+
+ /* When chip goes into network sleep, it could be waken
+ * up by MCI_INT interrupt caused by BT's HW messages
+ * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * rate (~100us). This will cause chip to leave and
+ * re-enter network sleep mode frequently, which in
+ * consequence will have WLAN MCI HW to generate lots of
+ * SYS_WAKING and SYS_SLEEPING messages which will make
+ * BT CPU to busy to process.
+ */
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_HW_MSG_MASK);
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(30);
+ }
+
+ /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+}
+
+static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ /* Set Bits 14 and 17 of AR_WA before powering on the chip. */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
+ if ((REG_READ(ah, AR_RTC_STATUS) &
+ AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
+ return false;
+ }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
+ }
+ if (AR_SREV_9100(ah))
+ REG_SET_BIT(ah, AR_RTC_RESET,
+ AR_RTC_RESET_EN);
+
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ if (AR_SREV_9100(ah))
+ mdelay(10);
+ else
+ udelay(50);
+
+ for (i = POWER_UP_TIME / 50; i > 0; i--) {
+ val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ if (val == AR_RTC_STATUS_ON)
+ break;
+ udelay(50);
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ }
+ if (i == 0) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to wakeup in %uus\n",
+ POWER_UP_TIME / 20);
+ return false;
+ }
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_power_awake(ah);
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ return true;
+}
+
+bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int status = true;
+ static const char *modes[] = {
+ "AWAKE",
+ "FULL-SLEEP",
+ "NETWORK SLEEP",
+ "UNDEFINED"
+ };
+
+ if (ah->power_mode == mode)
+ return status;
+
+ ath_dbg(common, RESET, "%s -> %s\n",
+ modes[ah->power_mode], modes[mode]);
+
+ switch (mode) {
+ case ATH9K_PM_AWAKE:
+ status = ath9k_hw_set_power_awake(ah);
+ break;
+ case ATH9K_PM_FULL_SLEEP:
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_full_sleep(ah);
+
+ ath9k_set_power_sleep(ah);
+ ah->chip_fullsleep = true;
+ break;
+ case ATH9K_PM_NETWORK_SLEEP:
+ ath9k_set_power_network_sleep(ah);
+ break;
+ default:
+ ath_err(common, "Unknown power mode %u\n", mode);
+ return false;
+ }
+ ah->power_mode = mode;
+
+ /*
+ * XXX: If this warning never comes up after a while then
+ * simply keep the ATH_DBG_WARN_ON_ONCE() but make
+ * ath9k_hw_setpower() return type void.
+ */
+
+ if (!(ah->ah_flags & AH_UNPLUGGED))
+ ATH_DBG_WARN_ON_ONCE(!status);
+
+ return status;
+}
+EXPORT_SYMBOL(ath9k_hw_setpower);
+
+/*******************/
+/* Beacon Handling */
+/*******************/
+
+void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
+{
+ int flags = 0;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ switch (ah->opmode) {
+ case NL80211_IFTYPE_ADHOC:
+ REG_SET_BIT(ah, AR_TXCFG,
+ AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
+ TU_TO_USEC(ah->config.dma_beacon_response_time));
+ REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
+ TU_TO_USEC(ah->config.sw_beacon_response_time));
+ flags |=
+ AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
+ break;
+ default:
+ ath_dbg(ath9k_hw_common(ah), BEACON,
+ "%s: unsupported opmode: %d\n", __func__, ah->opmode);
+ return;
+ break;
+ }
+
+ REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
+ REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ REG_SET_BIT(ah, AR_TIMER_MODE, flags);
+}
+EXPORT_SYMBOL(ath9k_hw_beaconinit);
+
+void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
+ const struct ath9k_beacon_state *bs)
+{
+ u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, bs->bs_nexttbtt);
+ REG_WRITE(ah, AR_BEACON_PERIOD, bs->bs_intval);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, bs->bs_intval);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ REG_RMW_FIELD(ah, AR_RSSI_THR,
+ AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
+
+ beaconintval = bs->bs_intval;
+
+ if (bs->bs_sleepduration > beaconintval)
+ beaconintval = bs->bs_sleepduration;
+
+ dtimperiod = bs->bs_dtimperiod;
+ if (bs->bs_sleepduration > dtimperiod)
+ dtimperiod = bs->bs_sleepduration;
+
+ if (beaconintval == dtimperiod)
+ nextTbtt = bs->bs_nextdtim;
+ else
+ nextTbtt = bs->bs_nexttbtt;
+
+ ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+ ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
+ ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
+ ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_NEXT_DTIM, bs->bs_nextdtim - SLEEP_SLOP);
+ REG_WRITE(ah, AR_NEXT_TIM, nextTbtt - SLEEP_SLOP);
+
+ REG_WRITE(ah, AR_SLEEP1,
+ SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
+ | AR_SLEEP1_ASSUME_DTIM);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
+ beacontimeout = (BEACON_TIMEOUT_VAL << 3);
+ else
+ beacontimeout = MIN_BEACON_TIMEOUT_VAL;
+
+ REG_WRITE(ah, AR_SLEEP2,
+ SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
+
+ REG_WRITE(ah, AR_TIM_PERIOD, beaconintval);
+ REG_WRITE(ah, AR_DTIM_PERIOD, dtimperiod);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ REG_SET_BIT(ah, AR_TIMER_MODE,
+ AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
+ AR_DTIM_TIMER_EN);
+
+ /* TSF Out of Range Threshold */
+ REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
+}
+EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
+
+/*******************/
+/* HW Capabilities */
+/*******************/
+
+static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
+{
+ eeprom_chainmask &= chip_chainmask;
+ if (eeprom_chainmask)
+ return eeprom_chainmask;
+ else
+ return chip_chainmask;
+}
+
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+ switch (ah->hw_version.macVersion) {
+ /* for temporary testing DFS with 9280 */
+ case AR_SREV_VERSION_9280:
+ /* AR9580 will likely be our first target to get testing on */
+ case AR_SREV_VERSION_9580:
+ return true;
+ default:
+ return false;
+ }
+}
+
+int ath9k_hw_fill_cap_info(struct ath_hw *ah)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u16 eeval;
+ u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
+
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+ regulatory->current_rd = eeval;
+
+ if (ah->opmode != NL80211_IFTYPE_AP &&
+ ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
+ if (regulatory->current_rd == 0x64 ||
+ regulatory->current_rd == 0x65)
+ regulatory->current_rd += 5;
+ else if (regulatory->current_rd == 0x41)
+ regulatory->current_rd = 0x43;
+ ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
+ regulatory->current_rd);
+ }
+
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
+
+ if (eeval & AR5416_OPFLAGS_11A) {
+ if (ah->disable_5ghz)
+ ath_warn(common, "disabling 5GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
+ }
+
+ if (eeval & AR5416_OPFLAGS_11G) {
+ if (ah->disable_2ghz)
+ ath_warn(common, "disabling 2GHz band\n");
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ }
+
+ if ((pCap->hw_caps & (ATH9K_HW_CAP_2GHZ | ATH9K_HW_CAP_5GHZ)) == 0) {
+ ath_err(common, "both bands are disabled\n");
+ return -EINVAL;
+ }
+
+ if (AR_SREV_9485(ah) ||
+ AR_SREV_9285(ah) ||
+ AR_SREV_9330(ah) ||
+ AR_SREV_9565(ah))
+ pCap->chip_chainmask = 1;
+ else if (!AR_SREV_9280_20_OR_LATER(ah))
+ pCap->chip_chainmask = 7;
+ else if (!AR_SREV_9300_20_OR_LATER(ah) ||
+ AR_SREV_9340(ah) ||
+ AR_SREV_9462(ah) ||
+ AR_SREV_9531(ah))
+ pCap->chip_chainmask = 3;
+ else
+ pCap->chip_chainmask = 7;
+
+ pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
+ /*
+ * For AR9271 we will temporarilly uses the rx chainmax as read from
+ * the EEPROM.
+ */
+ if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
+ !(eeval & AR5416_OPFLAGS_11A) &&
+ !(AR_SREV_9271(ah)))
+ /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
+ pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
+ else if (AR_SREV_9100(ah))
+ pCap->rx_chainmask = 0x7;
+ else
+ /* Use rx_chainmask from EEPROM. */
+ pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
+
+ pCap->tx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->tx_chainmask);
+ pCap->rx_chainmask = fixup_chainmask(pCap->chip_chainmask, pCap->rx_chainmask);
+ ah->txchainmask = pCap->tx_chainmask;
+ ah->rxchainmask = pCap->rx_chainmask;
+
+ ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
+
+ /* enable key search for every frame in an aggregate */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
+
+ common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
+
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ pCap->hw_caps |= ATH9K_HW_CAP_HT;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
+
+ if (AR_SREV_9271(ah))
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_DEVID_7010(ah))
+ pCap->num_gpio_pins = AR7010_NUM_GPIO;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->num_gpio_pins = AR9300_NUM_GPIO;
+ else if (AR_SREV_9287_11_OR_LATER(ah))
+ pCap->num_gpio_pins = AR9287_NUM_GPIO;
+ else if (AR_SREV_9285_12_OR_LATER(ah))
+ pCap->num_gpio_pins = AR9285_NUM_GPIO;
+ else if (AR_SREV_9280_20_OR_LATER(ah))
+ pCap->num_gpio_pins = AR928X_NUM_GPIO;
+ else
+ pCap->num_gpio_pins = AR_NUM_GPIO;
+
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
+ pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
+ else
+ pCap->rts_aggr_limit = (8 * 1024);
+
+#ifdef CONFIG_ATH9K_RFKILL
+ ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
+ if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
+ ah->rfkill_gpio =
+ MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL);
+ ah->rfkill_polarity =
+ MS(ah->rfsilent, EEP_RFSILENT_POLARITY);
+
+ pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
+ }
+#endif
+ if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
+ else
+ pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
+ if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) &&
+ !AR_SREV_9561(ah) && !AR_SREV_9565(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
+
+ pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
+ pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
+ pCap->rx_status_len = sizeof(struct ar9003_rxs);
+ pCap->tx_desc_len = sizeof(struct ar9003_txc);
+ pCap->txs_len = sizeof(struct ar9003_txs);
+ } else {
+ pCap->tx_desc_len = sizeof(struct ath_desc);
+ if (AR_SREV_9280_20(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+
+ if (AR_SREV_9561(ah))
+ ah->ent_mode = 0x3BDA000;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
+
+ if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
+
+ if (AR_SREV_9285(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
+ ant_div_ctl1 =
+ ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
+ }
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
+ pCap->hw_caps |= ATH9K_HW_CAP_APM;
+ }
+
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
+ ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ if ((ant_div_ctl1 >> 0x6) == 0x3) {
+ pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
+ }
+
+ if (ath9k_hw_dfs_tested(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
+ tx_chainmask = pCap->tx_chainmask;
+ rx_chainmask = pCap->rx_chainmask;
+ while (tx_chainmask || rx_chainmask) {
+ if (tx_chainmask & BIT(0))
+ pCap->max_txchains++;
+ if (rx_chainmask & BIT(0))
+ pCap->max_rxchains++;
+
+ tx_chainmask >>= 1;
+ rx_chainmask >>= 1;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
+ pCap->hw_caps |= ATH9K_HW_CAP_MCI;
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+
+#ifdef CONFIG_ATH9K_WOW
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
+ ah->wow.max_patterns = MAX_NUM_PATTERN;
+ else
+ ah->wow.max_patterns = MAX_NUM_PATTERN_LEGACY;
+#endif
+
+ return 0;
+}
+
+/****************************/
+/* GPIO / RFKILL / Antennae */
+/****************************/
+
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
+ u32 gpio, u32 type)
+{
+ int addr;
+ u32 gpio_shift, tmp;
+
+ if (gpio > 11)
+ addr = AR_GPIO_OUTPUT_MUX3;
+ else if (gpio > 5)
+ addr = AR_GPIO_OUTPUT_MUX2;
+ else
+ addr = AR_GPIO_OUTPUT_MUX1;
+
+ gpio_shift = (gpio % 6) * 5;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)
+ || (addr != AR_GPIO_OUTPUT_MUX1)) {
+ REG_RMW(ah, addr, (type << gpio_shift),
+ (0x1f << gpio_shift));
+ } else {
+ tmp = REG_READ(ah, addr);
+ tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
+ tmp &= ~(0x1f << gpio_shift);
+ tmp |= (type << gpio_shift);
+ REG_WRITE(ah, addr, tmp);
+ }
+}
+
+void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
+{
+ u32 gpio_shift;
+
+ BUG_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (AR_DEVID_7010(ah)) {
+ gpio_shift = gpio;
+ REG_RMW(ah, AR7010_GPIO_OE,
+ (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
+ (AR7010_GPIO_OE_MASK << gpio_shift));
+ return;
+ }
+
+ gpio_shift = gpio << 1;
+ REG_RMW(ah,
+ AR_GPIO_OE_OUT,
+ (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
+ (AR_GPIO_OE_OUT_DRV << gpio_shift));
+}
+EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
+
+u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+{
+#define MS_REG_READ(x, y) \
+ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
+
+ if (gpio >= ah->caps.num_gpio_pins)
+ return 0xffffffff;
+
+ if (AR_DEVID_7010(ah)) {
+ u32 val;
+ val = REG_READ(ah, AR7010_GPIO_IN);
+ return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
+ } else if (AR_SREV_9300_20_OR_LATER(ah))
+ return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
+ else if (AR_SREV_9271(ah))
+ return MS_REG_READ(AR9271, gpio) != 0;
+ else if (AR_SREV_9287_11_OR_LATER(ah))
+ return MS_REG_READ(AR9287, gpio) != 0;
+ else if (AR_SREV_9285_12_OR_LATER(ah))
+ return MS_REG_READ(AR9285, gpio) != 0;
+ else if (AR_SREV_9280_20_OR_LATER(ah))
+ return MS_REG_READ(AR928X, gpio) != 0;
+ else
+ return MS_REG_READ(AR, gpio) != 0;
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_get);
+
+void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
+ u32 ah_signal_type)
+{
+ u32 gpio_shift;
+
+ if (AR_DEVID_7010(ah)) {
+ gpio_shift = gpio;
+ REG_RMW(ah, AR7010_GPIO_OE,
+ (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
+ (AR7010_GPIO_OE_MASK << gpio_shift));
+ return;
+ }
+
+ ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+ gpio_shift = 2 * gpio;
+ REG_RMW(ah,
+ AR_GPIO_OE_OUT,
+ (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
+ (AR_GPIO_OE_OUT_DRV << gpio_shift));
+}
+EXPORT_SYMBOL(ath9k_hw_cfg_output);
+
+void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
+{
+ if (AR_DEVID_7010(ah)) {
+ val = val ? 0 : 1;
+ REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
+ AR_GPIO_BIT(gpio));
+ return;
+ }
+
+ if (AR_SREV_9271(ah))
+ val = ~val;
+
+ if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
+ REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
+ AR_GPIO_BIT(gpio));
+ else
+ gpio_set_value(gpio, val & 1);
+}
+EXPORT_SYMBOL(ath9k_hw_set_gpio);
+
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+{
+ if (gpio >= ah->caps.num_gpio_pins)
+ return;
+
+ gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+}
+EXPORT_SYMBOL(ath9k_hw_request_gpio);
+
+void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
+{
+ REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
+}
+EXPORT_SYMBOL(ath9k_hw_setantenna);
+
+/*********************/
+/* General Operation */
+/*********************/
+
+u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
+{
+ u32 bits = REG_READ(ah, AR_RX_FILTER);
+ u32 phybits = REG_READ(ah, AR_PHY_ERR);
+
+ if (phybits & AR_PHY_ERR_RADAR)
+ bits |= ATH9K_RX_FILTER_PHYRADAR;
+ if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
+ bits |= ATH9K_RX_FILTER_PHYERR;
+
+ return bits;
+}
+EXPORT_SYMBOL(ath9k_hw_getrxfilter);
+
+void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
+{
+ u32 phybits;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
+
+ REG_WRITE(ah, AR_RX_FILTER, bits);
+
+ phybits = 0;
+ if (bits & ATH9K_RX_FILTER_PHYRADAR)
+ phybits |= AR_PHY_ERR_RADAR;
+ if (bits & ATH9K_RX_FILTER_PHYERR)
+ phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
+ REG_WRITE(ah, AR_PHY_ERR, phybits);
+
+ if (phybits)
+ REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
+ else
+ REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setrxfilter);
+
+bool ath9k_hw_phy_disable(struct ath_hw *ah)
+{
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_bt_gain_ctrl(ah);
+
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
+ return false;
+
+ ath9k_hw_init_pll(ah, NULL);
+ ah->htc_reset_init = true;
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_phy_disable);
+
+bool ath9k_hw_disable(struct ath_hw *ah)
+{
+ if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
+ return false;
+
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
+ return false;
+
+ ath9k_hw_init_pll(ah, NULL);
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_disable);
+
+static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ enum eeprom_param gain_param;
+
+ if (IS_CHAN_2GHZ(chan))
+ gain_param = EEP_ANTENNA_GAIN_2G;
+ else
+ gain_param = EEP_ANTENNA_GAIN_5G;
+
+ return ah->eep_ops->get_eeprom(ah, gain_param);
+}
+
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+ bool test)
+{
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ struct ieee80211_channel *channel;
+ int chan_pwr, new_pwr, max_gain;
+ int ant_gain, ant_reduction = 0;
+
+ if (!chan)
+ return;
+
+ channel = chan->chan;
+ chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
+ new_pwr = min_t(int, chan_pwr, reg->power_limit);
+ max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
+
+ ant_gain = get_antenna_gain(ah, chan);
+ if (ant_gain > max_gain)
+ ant_reduction = ant_gain - max_gain;
+
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(reg, chan),
+ ant_reduction, new_pwr, test);
+}
+
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
+{
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ieee80211_channel *channel = chan->chan;
+
+ reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
+ if (test)
+ channel->max_power = MAX_RATE_POWER / 2;
+
+ ath9k_hw_apply_txpower(ah, chan, test);
+
+ if (test)
+ channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
+}
+EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
+
+void ath9k_hw_setopmode(struct ath_hw *ah)
+{
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+}
+EXPORT_SYMBOL(ath9k_hw_setopmode);
+
+void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
+{
+ REG_WRITE(ah, AR_MCAST_FIL0, filter0);
+ REG_WRITE(ah, AR_MCAST_FIL1, filter1);
+}
+EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
+
+void ath9k_hw_write_associd(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
+ REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
+ ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
+}
+EXPORT_SYMBOL(ath9k_hw_write_associd);
+
+#define ATH9K_MAX_TSF_READ 10
+
+u64 ath9k_hw_gettsf64(struct ath_hw *ah)
+{
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ tsf_upper1 = REG_READ(ah, AR_TSF_U32);
+ for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
+ tsf_lower = REG_READ(ah, AR_TSF_L32);
+ tsf_upper2 = REG_READ(ah, AR_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ WARN_ON( i == ATH9K_MAX_TSF_READ );
+
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
+}
+EXPORT_SYMBOL(ath9k_hw_gettsf64);
+
+void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
+{
+ REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
+ REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
+}
+EXPORT_SYMBOL(ath9k_hw_settsf64);
+
+void ath9k_hw_reset_tsf(struct ath_hw *ah)
+{
+ if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
+ AH_TSF_WRITE_TIMEOUT))
+ ath_dbg(ath9k_hw_common(ah), RESET,
+ "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
+
+ REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
+}
+EXPORT_SYMBOL(ath9k_hw_reset_tsf);
+
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
+{
+ if (set)
+ ah->misc_mode |= AR_PCU_TX_ADD_TSF;
+ else
+ ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
+}
+EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
+
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 macmode;
+
+ if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
+ macmode = AR_2040_JOINED_RX_CLEAR;
+ else
+ macmode = 0;
+
+ REG_WRITE(ah, AR_2040_MODE, macmode);
+}
+
+/* HW Generic timers configuration */
+
+static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
+{
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
+ {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001},
+ {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4,
+ AR_NDP2_TIMER_MODE, 0x0002},
+ {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4,
+ AR_NDP2_TIMER_MODE, 0x0004},
+ {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4,
+ AR_NDP2_TIMER_MODE, 0x0008},
+ {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4,
+ AR_NDP2_TIMER_MODE, 0x0010},
+ {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4,
+ AR_NDP2_TIMER_MODE, 0x0020},
+ {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4,
+ AR_NDP2_TIMER_MODE, 0x0040},
+ {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4,
+ AR_NDP2_TIMER_MODE, 0x0080}
+};
+
+/* HW generic timer primitives */
+
+u32 ath9k_hw_gettsf32(struct ath_hw *ah)
+{
+ return REG_READ(ah, AR_TSF_L32);
+}
+EXPORT_SYMBOL(ath9k_hw_gettsf32);
+
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ if (timer_table->tsf2_enabled) {
+ REG_SET_BIT(ah, AR_DIRECT_CONNECT, AR_DC_AP_STA_EN);
+ REG_SET_BIT(ah, AR_RESET_TSF, AR_RESET_TSF2_ONCE);
+ }
+}
+
+struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
+ void (*trigger)(void *),
+ void (*overflow)(void *),
+ void *arg,
+ u8 timer_index)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+ struct ath_gen_timer *timer;
+
+ if ((timer_index < AR_FIRST_NDP_TIMER) ||
+ (timer_index >= ATH_MAX_GEN_TIMER))
+ return NULL;
+
+ if ((timer_index > AR_FIRST_NDP_TIMER) &&
+ !AR_SREV_9300_20_OR_LATER(ah))
+ return NULL;
+
+ timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
+ if (timer == NULL)
+ return NULL;
+
+ /* allocate a hardware generic timer slot */
+ timer_table->timers[timer_index] = timer;
+ timer->index = timer_index;
+ timer->trigger = trigger;
+ timer->overflow = overflow;
+ timer->arg = arg;
+
+ if ((timer_index > AR_FIRST_NDP_TIMER) && !timer_table->tsf2_enabled) {
+ timer_table->tsf2_enabled = true;
+ ath9k_hw_gen_timer_start_tsf2(ah);
+ }
+
+ return timer;
+}
+EXPORT_SYMBOL(ath_gen_timer_alloc);
+
+void ath9k_hw_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+ u32 mask = 0;
+
+ timer_table->timer_mask |= BIT(timer->index);
+
+ /*
+ * Program generic timer registers
+ */
+ REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr,
+ timer_next);
+ REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr,
+ timer_period);
+ REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
+ gen_tmr_configuration[timer->index].mode_mask);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ /*
+ * Starting from AR9462, each generic timer can select which tsf
+ * to use. But we still follow the old rule, 0 - 7 use tsf and
+ * 8 - 15 use tsf2.
+ */
+ if ((timer->index < AR_GEN_TIMER_BANK_1_LEN))
+ REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ else
+ REG_SET_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ }
+
+ if (timer->trigger)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_TRIG);
+ if (timer->overflow)
+ mask |= SM(AR_GENTMR_BIT(timer->index),
+ AR_IMR_S5_GENTIMER_THRESH);
+
+ REG_SET_BIT(ah, AR_IMR_S5, mask);
+
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
+
+void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ /* Clear generic timer enable bits. */
+ REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
+ gen_tmr_configuration[timer->index].mode_mask);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ /*
+ * Need to switch back to TSF if it was using TSF2.
+ */
+ if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
+ REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ }
+ }
+
+ /* Disable both trigger and thresh interrupt masks */
+ REG_CLR_BIT(ah, AR_IMR_S5,
+ (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
+ SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
+
+ timer_table->timer_mask &= ~BIT(timer->index);
+
+ if (timer_table->timer_mask == 0) {
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
+
+void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ /* free the hardware generic timer slot */
+ timer_table->timers[timer->index] = NULL;
+ kfree(timer);
+}
+EXPORT_SYMBOL(ath_gen_timer_free);
+
+/*
+ * Generic Timer Interrupts handling
+ */
+void ath_gen_timer_isr(struct ath_hw *ah)
+{
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+ struct ath_gen_timer *timer;
+ unsigned long trigger_mask, thresh_mask;
+ unsigned int index;
+
+ /* get hardware generic timer interrupt status */
+ trigger_mask = ah->intr_gen_timer_trigger;
+ thresh_mask = ah->intr_gen_timer_thresh;
+ trigger_mask &= timer_table->timer_mask;
+ thresh_mask &= timer_table->timer_mask;
+
+ for_each_set_bit(index, &thresh_mask, ARRAY_SIZE(timer_table->timers)) {
+ timer = timer_table->timers[index];
+ if (!timer)
+ continue;
+ if (!timer->overflow)
+ continue;
+
+ trigger_mask &= ~BIT(index);
+ timer->overflow(timer->arg);
+ }
+
+ for_each_set_bit(index, &trigger_mask, ARRAY_SIZE(timer_table->timers)) {
+ timer = timer_table->timers[index];
+ if (!timer)
+ continue;
+ if (!timer->trigger)
+ continue;
+ timer->trigger(timer->arg);
+ }
+}
+EXPORT_SYMBOL(ath_gen_timer_isr);
+
+/********/
+/* HTC */
+/********/
+
+static struct {
+ u32 version;
+ const char * name;
+} ath_mac_bb_names[] = {
+ /* Devices with external radios */
+ { AR_SREV_VERSION_5416_PCI, "5416" },
+ { AR_SREV_VERSION_5416_PCIE, "5418" },
+ { AR_SREV_VERSION_9100, "9100" },
+ { AR_SREV_VERSION_9160, "9160" },
+ /* Single-chip solutions */
+ { AR_SREV_VERSION_9280, "9280" },
+ { AR_SREV_VERSION_9285, "9285" },
+ { AR_SREV_VERSION_9287, "9287" },
+ { AR_SREV_VERSION_9271, "9271" },
+ { AR_SREV_VERSION_9300, "9300" },
+ { AR_SREV_VERSION_9330, "9330" },
+ { AR_SREV_VERSION_9340, "9340" },
+ { AR_SREV_VERSION_9485, "9485" },
+ { AR_SREV_VERSION_9462, "9462" },
+ { AR_SREV_VERSION_9550, "9550" },
+ { AR_SREV_VERSION_9565, "9565" },
+ { AR_SREV_VERSION_9531, "9531" },
+};
+
+/* For devices with external radios */
+static struct {
+ u16 version;
+ const char * name;
+} ath_rf_names[] = {
+ { 0, "5133" },
+ { AR_RAD5133_SREV_MAJOR, "5133" },
+ { AR_RAD5122_SREV_MAJOR, "5122" },
+ { AR_RAD2133_SREV_MAJOR, "2133" },
+ { AR_RAD2122_SREV_MAJOR, "2122" }
+};
+
+/*
+ * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
+ */
+static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
+{
+ int i;
+
+ for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
+ if (ath_mac_bb_names[i].version == mac_bb_version) {
+ return ath_mac_bb_names[i].name;
+ }
+ }
+
+ return "????";
+}
+
+/*
+ * Return the RF name. "????" is returned if the RF is unknown.
+ * Used for devices with external radios.
+ */
+static const char *ath9k_hw_rf_name(u16 rf_version)
+{
+ int i;
+
+ for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
+ if (ath_rf_names[i].version == rf_version) {
+ return ath_rf_names[i].name;
+ }
+ }
+
+ return "????";
+}
+
+void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
+{
+ int used;
+
+ /* chipsets >= AR9280 are single-chip */
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev);
+ }
+ else {
+ used = scnprintf(hw_name, len,
+ "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+ ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+ ah->hw_version.macRev,
+ ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+ & AR_RADIO_SREV_MAJOR)),
+ ah->hw_version.phyRev);
+ }
+
+ hw_name[used] = '\0';
+}
+EXPORT_SYMBOL(ath9k_hw_name);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
new file mode 100644
index 000000000..c1d2d0340
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -0,0 +1,1215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HW_H
+#define HW_H
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+
+#include "mac.h"
+#include "ani.h"
+#include "eeprom.h"
+#include "calib.h"
+#include "reg.h"
+#include "reg_mci.h"
+#include "phy.h"
+#include "btcoex.h"
+#include "dynack.h"
+
+#include "../regd.h"
+
+#define ATHEROS_VENDOR_ID 0x168c
+
+#define AR5416_DEVID_PCI 0x0023
+#define AR5416_DEVID_PCIE 0x0024
+#define AR9160_DEVID_PCI 0x0027
+#define AR9280_DEVID_PCI 0x0029
+#define AR9280_DEVID_PCIE 0x002a
+#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
+#define AR9287_DEVID_PCI 0x002d
+#define AR9287_DEVID_PCIE 0x002e
+#define AR9300_DEVID_PCIE 0x0030
+#define AR9300_DEVID_AR9340 0x0031
+#define AR9300_DEVID_AR9485_PCIE 0x0032
+#define AR9300_DEVID_AR9580 0x0033
+#define AR9300_DEVID_AR9462 0x0034
+#define AR9300_DEVID_AR9330 0x0035
+#define AR9300_DEVID_QCA955X 0x0038
+#define AR9485_DEVID_AR1111 0x0037
+#define AR9300_DEVID_AR9565 0x0036
+#define AR9300_DEVID_AR953X 0x003d
+#define AR9300_DEVID_QCA956X 0x003f
+
+#define AR5416_AR9100_DEVID 0x000b
+
+#define AR_SUBVENDOR_ID_NOG 0x0e11
+#define AR_SUBVENDOR_ID_NEW_A 0x7065
+#define AR5416_MAGIC 0x19641014
+
+#define AR9280_COEX2WIRE_SUBSYSID 0x309b
+#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
+#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
+
+#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
+
+#define ATH_DEFAULT_NOISE_FLOOR -95
+
+#define ATH9K_RSSI_BAD -128
+
+#define ATH9K_NUM_CHANNELS 38
+
+/* Register read/write primitives */
+#define REG_WRITE(_ah, _reg, _val) \
+ (_ah)->reg_ops.write((_ah), (_val), (_reg))
+
+#define REG_READ(_ah, _reg) \
+ (_ah)->reg_ops.read((_ah), (_reg))
+
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
+ (_ah)->reg_ops.multi_read((_ah), (_addr), (_val), (_cnt))
+
+#define REG_RMW(_ah, _reg, _set, _clr) \
+ (_ah)->reg_ops.rmw((_ah), (_reg), (_set), (_clr))
+
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if ((_ah)->reg_ops.enable_write_buffer) \
+ (_ah)->reg_ops.enable_write_buffer((_ah)); \
+ } while (0)
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ do { \
+ if ((_ah)->reg_ops.write_flush) \
+ (_ah)->reg_ops.write_flush((_ah)); \
+ } while (0)
+
+#define ENABLE_REG_RMW_BUFFER(_ah) \
+ do { \
+ if ((_ah)->reg_ops.enable_rmw_buffer) \
+ (_ah)->reg_ops.enable_rmw_buffer((_ah)); \
+ } while (0)
+
+#define REG_RMW_BUFFER_FLUSH(_ah) \
+ do { \
+ if ((_ah)->reg_ops.rmw_flush) \
+ (_ah)->reg_ops.rmw_flush((_ah)); \
+ } while (0)
+
+#define PR_EEP(_s, _val) \
+ do { \
+ len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+ _s, (_val)); \
+ } while (0)
+
+#define SM(_v, _f) (((_v) << _f##_S) & _f)
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+#define REG_RMW_FIELD(_a, _r, _f, _v) \
+ REG_RMW(_a, _r, (((_v) << _f##_S) & _f), (_f))
+#define REG_READ_FIELD(_a, _r, _f) \
+ (((REG_READ(_a, _r) & _f) >> _f##_S))
+#define REG_SET_BIT(_a, _r, _f) \
+ REG_RMW(_a, _r, (_f), 0)
+#define REG_CLR_BIT(_a, _r, _f) \
+ REG_RMW(_a, _r, 0, (_f))
+
+#define DO_DELAY(x) do { \
+ if (((++(x) % 64) == 0) && \
+ (ath9k_hw_common(ah)->bus_ops->ath_bus_type \
+ != ATH_USB)) \
+ udelay(1); \
+ } while (0)
+
+#define REG_WRITE_ARRAY(iniarray, column, regWr) \
+ ath9k_hw_write_array(ah, iniarray, column, &(regWr))
+#define REG_READ_ARRAY(ah, array, size) \
+ ath9k_hw_read_array(ah, array, size)
+
+#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
+#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
+#define AR_GPIO_OUTPUT_MUX_AS_TX_FRAME 3
+#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
+#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
+
+#define AR_GPIOD_MASK 0x00001FFF
+#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
+
+#define BASE_ACTIVATE_DELAY 100
+#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100)
+#define COEF_SCALE_S 24
+#define HT40_CHANNEL_CENTER_SHIFT 10
+
+#define ATH9K_ANTENNA0_CHAINMASK 0x1
+#define ATH9K_ANTENNA1_CHAINMASK 0x2
+
+#define ATH9K_NUM_DMA_DEBUG_REGS 8
+#define ATH9K_NUM_QUEUES 10
+
+#define MAX_RATE_POWER 63
+#define AH_WAIT_TIMEOUT 100000 /* (us) */
+#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */
+#define AH_TIME_QUANTUM 10
+#define AR_KEYTABLE_SIZE 128
+#define POWER_UP_TIME 10000
+#define SPUR_RSSI_THRESH 40
+#define UPPER_5G_SUB_BAND_START 5700
+#define MID_5G_SUB_BAND_START 5400
+
+#define CAB_TIMEOUT_VAL 10
+#define BEACON_TIMEOUT_VAL 10
+#define MIN_BEACON_TIMEOUT_VAL 1
+#define SLEEP_SLOP TU_TO_USEC(3)
+
+#define INIT_CONFIG_STATUS 0x00000000
+#define INIT_RSSI_THR 0x00000700
+#define INIT_BCON_CNTRL_REG 0x00000000
+
+#define TU_TO_USEC(_tu) ((_tu) << 10)
+
+#define ATH9K_HW_RX_HP_QDEPTH 16
+#define ATH9K_HW_RX_LP_QDEPTH 128
+
+#define PAPRD_GAIN_TABLE_ENTRIES 32
+#define PAPRD_TABLE_SZ 24
+#define PAPRD_IDEAL_AGC2_PWR_RANGE 0xe0
+
+/*
+ * Wake on Wireless
+ */
+
+/* Keep Alive Frame */
+#define KAL_FRAME_LEN 28
+#define KAL_FRAME_TYPE 0x2 /* data frame */
+#define KAL_FRAME_SUB_TYPE 0x4 /* null data frame */
+#define KAL_DURATION_ID 0x3d
+#define KAL_NUM_DATA_WORDS 6
+#define KAL_NUM_DESC_WORDS 12
+#define KAL_ANTENNA_MODE 1
+#define KAL_TO_DS 1
+#define KAL_DELAY 4 /* delay of 4ms between 2 KAL frames */
+#define KAL_TIMEOUT 900
+
+#define MAX_PATTERN_SIZE 256
+#define MAX_PATTERN_MASK_SIZE 32
+#define MAX_NUM_PATTERN 16
+#define MAX_NUM_PATTERN_LEGACY 8
+#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and
+ deauthenticate packets */
+
+/*
+ * WoW trigger mapping to hardware code
+ */
+
+#define AH_WOW_USER_PATTERN_EN BIT(0)
+#define AH_WOW_MAGIC_PATTERN_EN BIT(1)
+#define AH_WOW_LINK_CHANGE BIT(2)
+#define AH_WOW_BEACON_MISS BIT(3)
+
+enum ath_hw_txq_subtype {
+ ATH_TXQ_AC_BK = 0,
+ ATH_TXQ_AC_BE = 1,
+ ATH_TXQ_AC_VI = 2,
+ ATH_TXQ_AC_VO = 3,
+};
+
+enum ath_ini_subsys {
+ ATH_INI_PRE = 0,
+ ATH_INI_CORE,
+ ATH_INI_POST,
+ ATH_INI_NUM_SPLIT,
+};
+
+enum ath9k_hw_caps {
+ ATH9K_HW_CAP_HT = BIT(0),
+ ATH9K_HW_CAP_RFSILENT = BIT(1),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
+ ATH9K_HW_CAP_EDMA = BIT(4),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
+ ATH9K_HW_CAP_LDPC = BIT(6),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(7),
+ ATH9K_HW_CAP_SGI_20 = BIT(8),
+ ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
+ ATH9K_HW_CAP_2GHZ = BIT(11),
+ ATH9K_HW_CAP_5GHZ = BIT(12),
+ ATH9K_HW_CAP_APM = BIT(13),
+#ifdef CONFIG_ATH9K_PCOEM
+ ATH9K_HW_CAP_RTT = BIT(14),
+ ATH9K_HW_CAP_MCI = BIT(15),
+ ATH9K_HW_CAP_BT_ANT_DIV = BIT(17),
+#else
+ ATH9K_HW_CAP_RTT = 0,
+ ATH9K_HW_CAP_MCI = 0,
+ ATH9K_HW_CAP_BT_ANT_DIV = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(18),
+ ATH9K_HW_CAP_PAPRD = BIT(19),
+ ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(20),
+};
+
+/*
+ * WoW device capabilities
+ * @ATH9K_HW_WOW_DEVICE_CAPABLE: device revision is capable of WoW.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_EXACT: device is capable of matching
+ * an exact user defined pattern or de-authentication/disassoc pattern.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
+ * bytes of the pattern for user defined pattern, de-authentication and
+ * disassociation patterns for all types of possible frames recieved
+ * of those types.
+ */
+
+struct ath9k_hw_wow {
+ u32 wow_event_mask;
+ u32 wow_event_mask2;
+ u8 max_patterns;
+};
+
+struct ath9k_hw_capabilities {
+ u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
+ u16 rts_aggr_limit;
+ u8 tx_chainmask;
+ u8 rx_chainmask;
+ u8 chip_chainmask;
+ u8 max_txchains;
+ u8 max_rxchains;
+ u8 num_gpio_pins;
+ u8 rx_hp_qdepth;
+ u8 rx_lp_qdepth;
+ u8 rx_status_len;
+ u8 tx_desc_len;
+ u8 txs_len;
+};
+
+#define AR_NO_SPUR 0x8000
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+
+enum ath9k_hw_hang_checks {
+ HW_BB_WATCHDOG = BIT(0),
+ HW_PHYRESTART_CLC_WAR = BIT(1),
+ HW_BB_RIFS_HANG = BIT(2),
+ HW_BB_DFS_HANG = BIT(3),
+ HW_BB_RX_CLEAR_STUCK_HANG = BIT(4),
+ HW_MAC_HANG = BIT(5),
+};
+
+#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
+#define AR_PCIE_PLL_PWRSAVE_ON_D3 BIT(1)
+#define AR_PCIE_PLL_PWRSAVE_ON_D0 BIT(2)
+#define AR_PCIE_CDR_PWRSAVE_ON_D3 BIT(3)
+#define AR_PCIE_CDR_PWRSAVE_ON_D0 BIT(4)
+
+struct ath9k_ops_config {
+ int dma_beacon_response_time;
+ int sw_beacon_response_time;
+ u32 cwm_ignore_extcca;
+ u32 pcie_waen;
+ u8 analog_shiftreg;
+ u32 ofdm_trig_low;
+ u32 ofdm_trig_high;
+ u32 cck_trig_high;
+ u32 cck_trig_low;
+ u32 enable_paprd;
+ int serialize_regmode;
+ bool rx_intr_mitigation;
+ bool tx_intr_mitigation;
+ u8 max_txtrig_level;
+ u16 ani_poll_interval; /* ANI poll interval in ms */
+ u16 hw_hang_checks;
+ u16 rimt_first;
+ u16 rimt_last;
+
+ /* Platform specific config */
+ u32 aspm_l1_fix;
+ u32 xlna_gpio;
+ u32 ant_ctrl_comm2g_switch_enable;
+ bool xatten_margin_cfg;
+ bool alt_mingainidx;
+ u8 pll_pwrsave;
+ bool tx_gain_buffalo;
+ bool led_active_high;
+};
+
+enum ath9k_int {
+ ATH9K_INT_RX = 0x00000001,
+ ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXHP = 0x00000001,
+ ATH9K_INT_RXLP = 0x00000002,
+ ATH9K_INT_RXNOFRM = 0x00000008,
+ ATH9K_INT_RXEOL = 0x00000010,
+ ATH9K_INT_RXORN = 0x00000020,
+ ATH9K_INT_TX = 0x00000040,
+ ATH9K_INT_TXDESC = 0x00000080,
+ ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_MCI = 0x00000200,
+ ATH9K_INT_BB_WATCHDOG = 0x00000400,
+ ATH9K_INT_TXURN = 0x00000800,
+ ATH9K_INT_MIB = 0x00001000,
+ ATH9K_INT_RXPHY = 0x00004000,
+ ATH9K_INT_RXKCM = 0x00008000,
+ ATH9K_INT_SWBA = 0x00010000,
+ ATH9K_INT_BMISS = 0x00040000,
+ ATH9K_INT_BNR = 0x00100000,
+ ATH9K_INT_TIM = 0x00200000,
+ ATH9K_INT_DTIM = 0x00400000,
+ ATH9K_INT_DTIMSYNC = 0x00800000,
+ ATH9K_INT_GPIO = 0x01000000,
+ ATH9K_INT_CABEND = 0x02000000,
+ ATH9K_INT_TSFOOR = 0x04000000,
+ ATH9K_INT_GENTIMER = 0x08000000,
+ ATH9K_INT_CST = 0x10000000,
+ ATH9K_INT_GTT = 0x20000000,
+ ATH9K_INT_FATAL = 0x40000000,
+ ATH9K_INT_GLOBAL = 0x80000000,
+ ATH9K_INT_BMISC = ATH9K_INT_TIM |
+ ATH9K_INT_DTIM |
+ ATH9K_INT_DTIMSYNC |
+ ATH9K_INT_TSFOOR |
+ ATH9K_INT_CABEND,
+ ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
+ ATH9K_INT_RXDESC |
+ ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN |
+ ATH9K_INT_TXURN |
+ ATH9K_INT_TXDESC |
+ ATH9K_INT_MIB |
+ ATH9K_INT_RXPHY |
+ ATH9K_INT_RXKCM |
+ ATH9K_INT_SWBA |
+ ATH9K_INT_BMISS |
+ ATH9K_INT_GPIO,
+ ATH9K_INT_NOCARD = 0xffffffff
+};
+
+#define MAX_RTT_TABLE_ENTRY 6
+#define MAX_IQCAL_MEASUREMENT 8
+#define MAX_CL_TAB_ENTRY 16
+#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
+
+enum ath9k_cal_flags {
+ RTT_DONE,
+ PAPRD_PACKET_SENT,
+ PAPRD_DONE,
+ NFCAL_PENDING,
+ NFCAL_INTF,
+ TXIQCAL_DONE,
+ TXCLCAL_DONE,
+ SW_PKDET_DONE,
+};
+
+struct ath9k_hw_cal_data {
+ u16 channel;
+ u16 channelFlags;
+ unsigned long cal_flags;
+ int32_t CalValid;
+ int8_t iCoff;
+ int8_t qCoff;
+ u8 caldac[2];
+ u16 small_signal_gain[AR9300_MAX_CHAINS];
+ u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
+ u32 num_measures[AR9300_MAX_CHAINS];
+ int tx_corr_coeff[MAX_IQCAL_MEASUREMENT][AR9300_MAX_CHAINS];
+ u32 tx_clcal[AR9300_MAX_CHAINS][MAX_CL_TAB_ENTRY];
+ u32 rtt_table[AR9300_MAX_CHAINS][MAX_RTT_TABLE_ENTRY];
+ struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+};
+
+struct ath9k_channel {
+ struct ieee80211_channel *chan;
+ u16 channel;
+ u16 channelFlags;
+ s16 noisefloor;
+};
+
+#define CHANNEL_5GHZ BIT(0)
+#define CHANNEL_HALF BIT(1)
+#define CHANNEL_QUARTER BIT(2)
+#define CHANNEL_HT BIT(3)
+#define CHANNEL_HT40PLUS BIT(4)
+#define CHANNEL_HT40MINUS BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
+ (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+ (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
+
+enum ath9k_power_mode {
+ ATH9K_PM_AWAKE = 0,
+ ATH9K_PM_FULL_SLEEP,
+ ATH9K_PM_NETWORK_SLEEP,
+ ATH9K_PM_UNDEFINED
+};
+
+enum ser_reg_mode {
+ SER_REG_MODE_OFF = 0,
+ SER_REG_MODE_ON = 1,
+ SER_REG_MODE_AUTO = 2,
+};
+
+enum ath9k_rx_qtype {
+ ATH9K_RX_QUEUE_HP,
+ ATH9K_RX_QUEUE_LP,
+ ATH9K_RX_QUEUE_MAX,
+};
+
+struct ath9k_beacon_state {
+ u32 bs_nexttbtt;
+ u32 bs_nextdtim;
+ u32 bs_intval;
+#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
+ u32 bs_dtimperiod;
+ u16 bs_bmissthreshold;
+ u32 bs_sleepduration;
+ u32 bs_tsfoor_threshold;
+};
+
+struct chan_centers {
+ u16 synth_center;
+ u16 ctl_center;
+ u16 ext_center;
+};
+
+enum {
+ ATH9K_RESET_POWER_ON,
+ ATH9K_RESET_WARM,
+ ATH9K_RESET_COLD,
+};
+
+struct ath9k_hw_version {
+ u32 magic;
+ u16 devid;
+ u16 subvendorid;
+ u32 macVersion;
+ u16 macRev;
+ u16 phyRev;
+ u16 analog5GhzRev;
+ u16 analog2GhzRev;
+ enum ath_usb_dev usbdev;
+};
+
+/* Generic TSF timer definitions */
+
+#define ATH_MAX_GEN_TIMER 16
+
+#define AR_GENTMR_BIT(_index) (1 << (_index))
+
+struct ath_gen_timer_configuration {
+ u32 next_addr;
+ u32 period_addr;
+ u32 mode_addr;
+ u32 mode_mask;
+};
+
+struct ath_gen_timer {
+ void (*trigger)(void *arg);
+ void (*overflow)(void *arg);
+ void *arg;
+ u8 index;
+};
+
+struct ath_gen_timer_table {
+ struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
+ u16 timer_mask;
+ bool tsf2_enabled;
+};
+
+struct ath_hw_antcomb_conf {
+ u8 main_lna_conf;
+ u8 alt_lna_conf;
+ u8 fast_div_bias;
+ u8 main_gaintb;
+ u8 alt_gaintb;
+ int lna1_lna2_delta;
+ int lna1_lna2_switch_delta;
+ u8 div_group;
+};
+
+/**
+ * struct ath_hw_radar_conf - radar detection initialization parameters
+ *
+ * @pulse_inband: threshold for checking the ratio of in-band power
+ * to total power for short radar pulses (half dB steps)
+ * @pulse_inband_step: threshold for checking an in-band power to total
+ * power ratio increase for short radar pulses (half dB steps)
+ * @pulse_height: threshold for detecting the beginning of a short
+ * radar pulse (dB step)
+ * @pulse_rssi: threshold for detecting if a short radar pulse is
+ * gone (dB step)
+ * @pulse_maxlen: maximum pulse length (0.8 us steps)
+ *
+ * @radar_rssi: RSSI threshold for starting long radar detection (dB steps)
+ * @radar_inband: threshold for checking the ratio of in-band power
+ * to total power for long radar pulses (half dB steps)
+ * @fir_power: threshold for detecting the end of a long radar pulse (dB)
+ *
+ * @ext_channel: enable extension channel radar detection
+ */
+struct ath_hw_radar_conf {
+ unsigned int pulse_inband;
+ unsigned int pulse_inband_step;
+ unsigned int pulse_height;
+ unsigned int pulse_rssi;
+ unsigned int pulse_maxlen;
+
+ unsigned int radar_rssi;
+ unsigned int radar_inband;
+ int fir_power;
+
+ bool ext_channel;
+};
+
+/**
+ * struct ath_hw_private_ops - callbacks used internally by hardware code
+ *
+ * This structure contains private callbacks designed to only be used internally
+ * by the hardware core.
+ *
+ * @init_cal_settings: setup types of calibrations supported
+ * @init_cal: starts actual calibration
+ *
+ * @init_mode_gain_regs: Initialize TX/RX gain registers
+ *
+ * @rf_set_freq: change frequency
+ * @spur_mitigate_freq: spur mitigation
+ * @set_rf_regs:
+ * @compute_pll_control: compute the PLL control value to use for
+ * AR_RTC_PLL_CONTROL for a given channel
+ * @setup_calibration: set up calibration
+ * @iscal_supported: used to query if a type of calibration is supported
+ *
+ * @ani_cache_ini_regs: cache the values for ANI from the initial
+ * register settings through the register initialization.
+ */
+struct ath_hw_private_ops {
+ void (*init_hang_checks)(struct ath_hw *ah);
+ bool (*detect_mac_hang)(struct ath_hw *ah);
+ bool (*detect_bb_hang)(struct ath_hw *ah);
+
+ /* Calibration ops */
+ void (*init_cal_settings)(struct ath_hw *ah);
+ bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ void (*init_mode_gain_regs)(struct ath_hw *ah);
+ void (*setup_calibration)(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+
+ /* PHY ops */
+ int (*rf_set_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ void (*spur_mitigate_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*set_rf_regs)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+ void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*init_bb)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*olc_init)(struct ath_hw *ah);
+ void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*mark_phy_inactive)(struct ath_hw *ah);
+ void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
+ bool (*rfbus_req)(struct ath_hw *ah);
+ void (*rfbus_done)(struct ath_hw *ah);
+ void (*restore_chainmask)(struct ath_hw *ah);
+ u32 (*compute_pll_control)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
+ int param);
+ void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*set_radar_params)(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf);
+ int (*fast_chan_change)(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 *ini_reloaded);
+
+ /* ANI */
+ void (*ani_cache_ini_regs)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ bool (*is_aic_enabled)(struct ath_hw *ah);
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+};
+
+/**
+ * struct ath_spec_scan - parameters for Atheros spectral scan
+ *
+ * @enabled: enable/disable spectral scan
+ * @short_repeat: controls whether the chip is in spectral scan mode
+ * for 4 usec (enabled) or 204 usec (disabled)
+ * @count: number of scan results requested. There are special meanings
+ * in some chip revisions:
+ * AR92xx: highest bit set (>=128) for endless mode
+ * (spectral scan won't stopped until explicitly disabled)
+ * AR9300 and newer: 0 for endless mode
+ * @endless: true if endless mode is intended. Otherwise, count value is
+ * corrected to the next possible value.
+ * @period: time duration between successive spectral scan entry points
+ * (period*256*Tclk). Tclk = ath_common->clockrate
+ * @fft_period: PHY passes FFT frames to MAC every (fft_period+1)*4uS
+ *
+ * Note: Tclk = 40MHz or 44MHz depending upon operating mode.
+ * Typically it's 44MHz in 2/5GHz on later chips, but there's
+ * a "fast clock" check for this in 5GHz.
+ *
+ */
+struct ath_spec_scan {
+ bool enabled;
+ bool short_repeat;
+ bool endless;
+ u8 count;
+ u8 period;
+ u8 fft_period;
+};
+
+/**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+ *
+ * This structure contains callbacks designed to to be used internally by
+ * hardware code and also by the lower level driver.
+ *
+ * @config_pci_powersave:
+ * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @spectral_scan_config: set parameters for spectral scan and enable/disable it
+ * @spectral_scan_trigger: trigger a spectral scan run
+ * @spectral_scan_wait: wait for a spectral scan run to finish
+ */
+struct ath_hw_ops {
+ void (*config_pci_powersave)(struct ath_hw *ah,
+ bool power_off);
+ void (*rx_enable)(struct ath_hw *ah);
+ void (*set_desc_link)(void *ds, u32 link);
+ int (*calibrate)(struct ath_hw *ah, struct ath9k_channel *chan,
+ u8 rxchainmask, bool longcal);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
+ u32 *sync_cause_p);
+ void (*set_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_info *i);
+ int (*proc_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts);
+ int (*get_duration)(struct ath_hw *ah, const void *ds, int index);
+ void (*antdiv_comb_conf_get)(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf);
+ void (*antdiv_comb_conf_set)(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf);
+ void (*spectral_scan_config)(struct ath_hw *ah,
+ struct ath_spec_scan *param);
+ void (*spectral_scan_trigger)(struct ath_hw *ah);
+ void (*spectral_scan_wait)(struct ath_hw *ah);
+
+ void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+ void (*tx99_stop)(struct ath_hw *ah);
+ void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
+#endif
+};
+
+struct ath_nf_limits {
+ s16 max;
+ s16 min;
+ s16 nominal;
+};
+
+enum ath_cal_list {
+ TX_IQ_CAL = BIT(0),
+ TX_IQ_ON_AGC_CAL = BIT(1),
+ TX_CL_CAL = BIT(2),
+};
+
+/* ah_flags */
+#define AH_USE_EEPROM 0x1
+#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
+#define AH_FASTCC 0x4
+#define AH_NO_EEP_SWAP 0x8 /* Do not swap EEPROM data */
+
+struct ath_hw {
+ struct ath_ops reg_ops;
+
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_common common;
+ struct ath9k_hw_version hw_version;
+ struct ath9k_ops_config config;
+ struct ath9k_hw_capabilities caps;
+ struct ath9k_channel channels[ATH9K_NUM_CHANNELS];
+ struct ath9k_channel *curchan;
+
+ union {
+ struct ar5416_eeprom_def def;
+ struct ar5416_eeprom_4k map4k;
+ struct ar9287_eeprom map9287;
+ struct ar9300_eeprom ar9300_eep;
+ } eeprom;
+ const struct eeprom_ops *eep_ops;
+
+ bool sw_mgmt_crypto_tx;
+ bool sw_mgmt_crypto_rx;
+ bool is_pciexpress;
+ bool aspm_enabled;
+ bool is_monitoring;
+ bool need_an_top2_fixup;
+ u16 tx_trig_level;
+
+ u32 nf_regs[6];
+ struct ath_nf_limits nf_2g;
+ struct ath_nf_limits nf_5g;
+ u16 rfsilent;
+ u32 rfkill_gpio;
+ u32 rfkill_polarity;
+ u32 ah_flags;
+
+ bool reset_power_on;
+ bool htc_reset_init;
+
+ enum nl80211_iftype opmode;
+ enum ath9k_power_mode power_mode;
+
+ s8 noise;
+ struct ath9k_hw_cal_data *caldata;
+ struct ath9k_pacal_info pacal_info;
+ struct ar5416Stats stats;
+ struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
+
+ enum ath9k_int imask;
+ u32 imrs2_reg;
+ u32 txok_interrupt_mask;
+ u32 txerr_interrupt_mask;
+ u32 txdesc_interrupt_mask;
+ u32 txeol_interrupt_mask;
+ u32 txurn_interrupt_mask;
+ atomic_t intr_ref_cnt;
+ bool chip_fullsleep;
+ u32 modes_index;
+
+ /* Calibration */
+ u32 supp_cals;
+ struct ath9k_cal_list iq_caldata;
+ struct ath9k_cal_list adcgain_caldata;
+ struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list *cal_list;
+ struct ath9k_cal_list *cal_list_last;
+ struct ath9k_cal_list *cal_list_curr;
+#define totalPowerMeasI meas0.unsign
+#define totalPowerMeasQ meas1.unsign
+#define totalIqCorrMeas meas2.sign
+#define totalAdcIOddPhase meas0.unsign
+#define totalAdcIEvenPhase meas1.unsign
+#define totalAdcQOddPhase meas2.unsign
+#define totalAdcQEvenPhase meas3.unsign
+#define totalAdcDcOffsetIOddPhase meas0.sign
+#define totalAdcDcOffsetIEvenPhase meas1.sign
+#define totalAdcDcOffsetQOddPhase meas2.sign
+#define totalAdcDcOffsetQEvenPhase meas3.sign
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } meas0;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } meas1;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } meas2;
+ union {
+ u32 unsign[AR5416_MAX_CHAINS];
+ int32_t sign[AR5416_MAX_CHAINS];
+ } meas3;
+ u16 cal_samples;
+ u8 enabled_cals;
+
+ u32 sta_id1_defaults;
+ u32 misc_mode;
+
+ /* Private to hardware code */
+ struct ath_hw_private_ops private_ops;
+ /* Accessed by the lower level driver */
+ struct ath_hw_ops ops;
+
+ /* Used to program the radio on non single-chip devices */
+ u32 *analogBank6Data;
+
+ int coverage_class;
+ u32 slottime;
+ u32 globaltxtimeout;
+
+ /* ANI */
+ u32 aniperiod;
+ enum ath9k_ani_cmd ani_function;
+ u32 ani_skip_count;
+ struct ar5416AniState ani;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ struct ath_btcoex_hw btcoex_hw;
+#endif
+
+ u32 intr_txqs;
+ u8 txchainmask;
+ u8 rxchainmask;
+
+ struct ath_hw_radar_conf radar_conf;
+
+ u32 originalGain[22];
+ int initPDADC;
+ int PDADCdelta;
+ int led_pin;
+ u32 gpio_mask;
+ u32 gpio_val;
+
+ struct ar5416IniArray ini_dfs;
+ struct ar5416IniArray iniModes;
+ struct ar5416IniArray iniCommon;
+ struct ar5416IniArray iniBB_RfGain;
+ struct ar5416IniArray iniBank6;
+ struct ar5416IniArray iniAddac;
+ struct ar5416IniArray iniPcieSerdes;
+ struct ar5416IniArray iniPcieSerdesLowPower;
+ struct ar5416IniArray iniModesFastClock;
+ struct ar5416IniArray iniAdditional;
+ struct ar5416IniArray iniModesRxGain;
+ struct ar5416IniArray ini_modes_rx_gain_bounds;
+ struct ar5416IniArray iniModesTxGain;
+ struct ar5416IniArray iniCckfirNormal;
+ struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray iniModes_9271_ANI_reg;
+ struct ar5416IniArray ini_radio_post_sys2ant;
+ struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+ struct ar5416IniArray ini_modes_rxgain_bb_core;
+ struct ar5416IniArray ini_modes_rxgain_bb_postamble;
+
+ struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
+
+ u32 intr_gen_timer_trigger;
+ u32 intr_gen_timer_thresh;
+ struct ath_gen_timer_table hw_gen_timers;
+
+ struct ar9003_txs *ts_ring;
+ u32 ts_paddr_start;
+ u32 ts_paddr_end;
+ u16 ts_tail;
+ u16 ts_size;
+
+ u32 bb_watchdog_last_status;
+ u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+ u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
+
+ unsigned int paprd_target_power;
+ unsigned int paprd_training_power;
+ unsigned int paprd_ratemask;
+ unsigned int paprd_ratemask_ht40;
+ bool paprd_table_write_done;
+ u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
+ u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
+ /*
+ * Store the permanent value of Reg 0x4004in WARegVal
+ * so we dont have to R/M/W. We should not be reading
+ * this register when in sleep states.
+ */
+ u32 WARegVal;
+
+ /* Enterprise mode cap */
+ u32 ent_mode;
+
+#ifdef CONFIG_ATH9K_WOW
+ struct ath9k_hw_wow wow;
+#endif
+ bool is_clk_25mhz;
+ int (*get_mac_revision)(void);
+ int (*external_reset)(void);
+ bool disable_2ghz;
+ bool disable_5ghz;
+
+ const struct firmware *eeprom_blob;
+
+ struct ath_dynack dynack;
+
+ bool tpc_enabled;
+ u8 tx_power[Ar5416RateSize];
+ u8 tx_power_stbc[Ar5416RateSize];
+};
+
+struct ath_bus_ops {
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ void (*bt_coex_prep)(struct ath_common *common);
+ void (*aspm_init)(struct ath_common *common);
+};
+
+static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
+{
+ return &ah->common;
+}
+
+static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
+{
+ return &(ath9k_hw_common(ah)->regulatory);
+}
+
+static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
+{
+ return &ah->private_ops;
+}
+
+static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
+{
+ return &ah->ops;
+}
+
+static inline u8 get_streams(int mask)
+{
+ return !!(mask & BIT(0)) + !!(mask & BIT(1)) + !!(mask & BIT(2));
+}
+
+/* Initialization, Detach, Reset */
+void ath9k_hw_deinit(struct ath_hw *ah);
+int ath9k_hw_init(struct ath_hw *ah);
+int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ struct ath9k_hw_cal_data *caldata, bool fastcc);
+int ath9k_hw_fill_cap_info(struct ath_hw *ah);
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
+
+/* GPIO / RFKILL / Antennae */
+void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
+u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
+void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
+ u32 ah_signal_type);
+void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
+void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
+void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
+
+/* General Operation */
+void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
+ int hw_delay);
+bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
+ int column, unsigned int *writecnt);
+void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
+u32 ath9k_hw_reverse_bits(u32 val, u32 n);
+u16 ath9k_hw_computetxtime(struct ath_hw *ah,
+ u8 phy, int kbps,
+ u32 frameLen, u16 rateix, bool shortPreamble);
+void ath9k_hw_get_channel_centers(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct chan_centers *centers);
+u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
+void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
+bool ath9k_hw_phy_disable(struct ath_hw *ah);
+bool ath9k_hw_disable(struct ath_hw *ah);
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
+void ath9k_hw_setopmode(struct ath_hw *ah);
+void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
+void ath9k_hw_write_associd(struct ath_hw *ah);
+u32 ath9k_hw_gettsf32(struct ath_hw *ah);
+u64 ath9k_hw_gettsf64(struct ath_hw *ah);
+void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
+void ath9k_hw_reset_tsf(struct ath_hw *ah);
+u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
+void ath9k_hw_init_global_settings(struct ath_hw *ah);
+u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
+void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
+ const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
+bool ath9k_hw_check_alive(struct ath_hw *ah);
+
+bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
+
+/* Generic hw timer primitives */
+struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
+ void (*trigger)(void *),
+ void (*overflow)(void *),
+ void *arg,
+ u8 timer_index);
+void ath9k_hw_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period);
+void ath9k_hw_gen_timer_start_tsf2(struct ath_hw *ah);
+void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
+
+void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
+void ath_gen_timer_isr(struct ath_hw *hw);
+
+void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+
+/* PHY */
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent);
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+ bool test);
+
+/*
+ * Code Specific to AR5008, AR9001 or AR9002,
+ * we stuff these here to avoid callbacks for AR9003.
+ */
+int ar9002_hw_rf_claim(struct ath_hw *ah);
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+
+/*
+ * Code specific to AR9003, we stuff these here to avoid callbacks
+ * for older families
+ */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
+void ar9003_paprd_enable(struct ath_hw *ah, bool val);
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *caldata,
+ int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+ struct ath9k_hw_cal_data *caldata, int chain);
+void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+int ar9003_paprd_init_table(struct ath_hw *ah);
+bool ar9003_paprd_is_done(struct ath_hw *ah);
+bool ar9003_is_paprd_enabled(struct ath_hw *ah);
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
+ struct ath9k_channel *chan);
+void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
+ struct ath9k_channel *chan, int ht40_delta);
+
+/* Hardware family op attach helpers */
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
+
+int ar9002_hw_attach_ops(struct ath_hw *ah);
+void ar9003_hw_attach_ops(struct ath_hw *ah);
+
+void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
+
+void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
+void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
+
+void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us);
+void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
+void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
+static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.enabled;
+}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return ah->common.btcoex_enabled &&
+ (ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+
+}
+void ath9k_hw_btcoex_enable(struct ath_hw *ah);
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.scheme;
+}
+#else
+static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
+{
+}
+static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
+{
+ return false;
+}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return false;
+}
+static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
+{
+}
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ATH_BTCOEX_CFG_NONE;
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+
+#ifdef CONFIG_ATH9K_WOW
+int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len);
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah);
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable);
+#else
+static inline int ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
+ u8 *user_pattern,
+ u8 *user_mask,
+ int pattern_count,
+ int pattern_len)
+{
+ return 0;
+}
+static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ return 0;
+}
+static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+}
+#endif
+
+#define ATH9K_CLOCK_RATE_CCK 22
+#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
+#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000..f8d11efa7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,1061 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/ath9k_platform.h>
+#include <linux/module.h>
+#include <linux/relay.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "ath9k.h"
+
+struct ath9k_eeprom_ctx {
+ struct completion complete;
+ struct ath_hw *ah;
+};
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int ath9k_modparam_nohwcrypt;
+module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+int ath9k_led_blink;
+module_param_named(blink, ath9k_led_blink, int, 0444);
+MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+
+static int ath9k_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
+static int ath9k_bt_ant_diversity;
+module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
+MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+
+int ath9k_use_chanctx;
+module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
+MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
+
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
+bool is_ath9k_unloaded;
+
+#ifdef CONFIG_MAC80211_LEDS
+static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+static void ath9k_op_ps_wakeup(struct ath_common *common)
+{
+ ath9k_ps_wakeup((struct ath_softc *) common->priv);
+}
+
+static void ath9k_op_ps_restore(struct ath_common *common)
+{
+ ath9k_ps_restore((struct ath_softc *) common->priv);
+}
+
+static struct ath_ps_ops ath9k_ps_ops = {
+ .wakeup = ath9k_op_ps_wakeup,
+ .restore = ath9k_op_ps_restore,
+};
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ val[i] = ath9k_ioread32(hw_priv, addr[i]);
+}
+
+
+static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
+ u32 set, u32 clr)
+{
+ u32 val;
+
+ val = ioread32(sc->mem + reg_offset);
+ val &= ~clr;
+ val |= set;
+ iowrite32(val, sc->mem + reg_offset);
+
+ return val;
+}
+
+static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ unsigned long uninitialized_var(flags);
+ u32 val;
+
+ if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
+
+ return val;
+}
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+ ath_reg_notifier_apply(wiphy, request, reg);
+
+ /* Set tx power */
+ if (!ah->curchan)
+ return;
+
+ sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
+ /* synchronize DFS detector if regulatory domain changed */
+ if (sc->dfs_detector != NULL)
+ sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
+ request->dfs_region);
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc, bool is_tx)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 *ds;
+ int i, bsize, desc_len;
+
+ ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+
+ if (is_tx)
+ desc_len = sc->sc_ah->caps.tx_desc_len;
+ else
+ desc_len = sizeof(struct ath_desc);
+
+ /* ath_desc must be a multiple of DWORDs */
+ if ((desc_len % 4) != 0) {
+ ath_err(common, "ath_desc not DWORD aligned\n");
+ BUG_ON((desc_len % 4) != 0);
+ return -ENOMEM;
+ }
+
+ dd->dd_desc_len = desc_len * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * desc_len;
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ }
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
+ ds = (u8 *) dd->dd_desc;
+ ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ if (is_tx) {
+ struct ath_buf *bf;
+
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ } else {
+ struct ath_rxbuf *bf;
+
+ bsize = sizeof(struct ath_rxbuf) * nbuf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += (desc_len * ndesc);
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ }
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ int i = 0;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ ath_cabq_update(sc);
+
+ sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
+ sc->tx.txq_map[i]->mac80211_qnum = i;
+ sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
+ }
+ return 0;
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
+ sc->beacon.bslot[i] = NULL;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
+ sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
+
+ sc->spec_priv.ah = sc->sc_ah;
+ sc->spec_priv.spec_config.enabled = 0;
+ sc->spec_priv.spec_config.short_repeat = true;
+ sc->spec_priv.spec_config.count = 8;
+ sc->spec_priv.spec_config.endless = false;
+ sc->spec_priv.spec_config.period = 0xFF;
+ sc->spec_priv.spec_config.fft_period = 0xF;
+}
+
+static void ath9k_init_pcoem_platform(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!IS_ENABLED(CONFIG_ATH9K_PCOEM))
+ return;
+
+ if (common->bus_ops->ath_bus_type != ATH_PCI)
+ return;
+
+ if (sc->driver_data & (ATH9K_PCI_CUS198 |
+ ATH9K_PCI_CUS230)) {
+ ah->config.xlna_gpio = 9;
+ ah->config.xatten_margin_cfg = true;
+ ah->config.alt_mingainidx = true;
+ ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
+ sc->ant_comb.low_rssi_thresh = 20;
+ sc->ant_comb.fast_div_bias = 3;
+
+ ath_info(common, "Set parameters for %s\n",
+ (sc->driver_data & ATH9K_PCI_CUS198) ?
+ "CUS198" : "CUS230");
+ }
+
+ if (sc->driver_data & ATH9K_PCI_CUS217)
+ ath_info(common, "CUS217 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_CUS252)
+ ath_info(common, "CUS252 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+ ath_info(common, "WB335 1-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+ ath_info(common, "WB335 2-ANT card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_KILLER)
+ ath_info(common, "Killer Wireless card detected\n");
+
+ /*
+ * Some WB335 cards do not support antenna diversity. Since
+ * we use a hardcoded value for AR9565 instead of using the
+ * EEPROM/OTP data, remove the combining feature from
+ * the HW capabilities bitmap.
+ */
+ if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+ if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+ pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+ }
+
+ if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
+ pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
+ ath_info(common, "Set BT/WLAN RX diversity capability\n");
+ }
+
+ if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) {
+ ah->config.pcie_waen = 0x0040473b;
+ ath_info(common, "Enable WAR for ASPM D3/L1\n");
+ }
+
+ /*
+ * The default value of pll_pwrsave is 1.
+ * For certain AR9485 cards, it is set to 0.
+ * For AR9462, AR9565 it's set to 7.
+ */
+ ah->config.pll_pwrsave = 1;
+
+ if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
+ ah->config.pll_pwrsave = 0;
+ ath_info(common, "Disable PLL PowerSave\n");
+ }
+
+ if (sc->driver_data & ATH9K_PCI_LED_ACT_HI)
+ ah->config.led_active_high = true;
+}
+
+static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
+ void *ctx)
+{
+ struct ath9k_eeprom_ctx *ec = ctx;
+
+ if (eeprom_blob)
+ ec->ah->eeprom_blob = eeprom_blob;
+
+ complete(&ec->complete);
+}
+
+static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
+{
+ struct ath9k_eeprom_ctx ec;
+ struct ath_hw *ah = ah = sc->sc_ah;
+ int err;
+
+ /* try to load the EEPROM content asynchronously */
+ init_completion(&ec.complete);
+ ec.ah = sc->sc_ah;
+
+ err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL,
+ &ec, ath9k_eeprom_request_cb);
+ if (err < 0) {
+ ath_err(ath9k_hw_common(ah),
+ "EEPROM request failed\n");
+ return err;
+ }
+
+ wait_for_completion(&ec.complete);
+
+ if (!ah->eeprom_blob) {
+ ath_err(ath9k_hw_common(ah),
+ "Unable to load EEPROM file %s\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath9k_eeprom_release(struct ath_softc *sc)
+{
+ release_firmware(sc->sc_ah->eeprom_blob);
+}
+
+static int ath9k_init_soc_platform(struct ath_softc *sc)
+{
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+ struct ath_hw *ah = sc->sc_ah;
+ int ret = 0;
+
+ if (!pdata)
+ return 0;
+
+ if (pdata->eeprom_name) {
+ ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->tx_gain_buffalo)
+ ah->config.tx_gain_buffalo = true;
+
+ return ret;
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+ struct ath_hw *ah = NULL;
+ struct ath9k_hw_capabilities *pCap;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->dev = sc->dev;
+ ah->hw = sc->hw;
+ ah->hw_version.devid = devid;
+ ah->reg_ops.read = ath9k_ioread32;
+ ah->reg_ops.multi_read = ath9k_multi_ioread32;
+ ah->reg_ops.write = ath9k_iowrite32;
+ ah->reg_ops.rmw = ath9k_reg_rmw;
+ pCap = &ah->caps;
+
+ common = ath9k_hw_common(ah);
+
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ sc->sc_ah = ah;
+ sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+ sc->tx99_power = MAX_RATE_POWER + 1;
+ init_waitqueue_head(&sc->tx_wait);
+ sc->cur_chan = &sc->chanctx[0];
+ if (!ath9k_is_chanctx_enabled())
+ sc->cur_chan->hw_queue_base = 0;
+
+ if (!pdata || pdata->use_eeprom) {
+ ah->ah_flags |= AH_USE_EEPROM;
+ sc->sc_ah->led_pin = -1;
+ } else {
+ sc->sc_ah->gpio_mask = pdata->gpio_mask;
+ sc->sc_ah->gpio_val = pdata->gpio_val;
+ sc->sc_ah->led_pin = pdata->led_pin;
+ ah->is_clk_25mhz = pdata->is_clk_25mhz;
+ ah->get_mac_revision = pdata->get_mac_revision;
+ ah->external_reset = pdata->external_reset;
+ ah->disable_2ghz = pdata->disable_2ghz;
+ ah->disable_5ghz = pdata->disable_5ghz;
+ if (!pdata->endian_check)
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+ }
+
+ common->ops = &ah->reg_ops;
+ common->bus_ops = bus_ops;
+ common->ps_ops = &ath9k_ps_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+ common->btcoex_enabled = ath9k_btcoex_enable == 1;
+ common->disable_ani = false;
+
+ /*
+ * Platform quirks.
+ */
+ ath9k_init_pcoem_platform(sc);
+
+ ret = ath9k_init_soc_platform(sc);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable WLAN/BT RX Antenna diversity only when:
+ *
+ * - BTCOEX is disabled.
+ * - the user manually requests the feature.
+ * - the HW cap is set using the platform data.
+ */
+ if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
+ (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
+ common->bt_ant_diversity = 1;
+
+ spin_lock_init(&common->cc_lock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ spin_lock_init(&sc->chan_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
+ (unsigned long)sc);
+
+ setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+
+ ath9k_init_channel_context(sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ /* Initializes the hardware for all supported chipsets */
+ ret = ath9k_hw_init(ah);
+ if (ret)
+ goto err_hw;
+
+ if (pdata && pdata->macaddr)
+ memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ret = ath9k_cmn_init_channels_rates(common);
+ if (ret)
+ goto err_btcoex;
+
+ ret = ath9k_init_p2p(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_cmn_init_crypto(sc->sc_ah);
+ ath9k_init_misc(sc);
+ ath_fill_led_pin(sc);
+ ath_chanctx_init(sc);
+ ath9k_offchannel_init(sc);
+
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_hw_deinit(ah);
+err_hw:
+ ath9k_eeprom_release(sc);
+ dev_kfree_skb_any(sc->tx99_skb);
+ return ret;
+}
+
+static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct cfg80211_chan_def chandef;
+ int i;
+
+ sband = &common->sbands[band];
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ ah->curchan = &ah->channels[chan->hw_value];
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ ath9k_cmn_get_channel(sc->hw, ah, &chandef);
+ ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
+ }
+}
+
+static void ath9k_init_txpower_limits(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *curchan = ah->curchan;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+
+ ah->curchan = curchan;
+}
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 8, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_limit wds_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
+};
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+
+static const struct ieee80211_iface_limit if_limits_multi[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+};
+
+static const struct ieee80211_iface_combination if_comb_multi[] = {
+ {
+ .limits = if_limits_multi,
+ .n_limits = ARRAY_SIZE(if_limits_multi),
+ .max_interfaces = 2,
+ .num_different_channels = 2,
+ .beacon_int_infra_match = true,
+ },
+};
+
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
+static const struct ieee80211_iface_limit if_dfs_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC) },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+ {
+ .limits = wds_limits,
+ .n_limits = ARRAY_SIZE(wds_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+#ifdef CONFIG_ATH9K_DFS_CERTIFIED
+ {
+ .limits = if_dfs_limits,
+ .n_limits = ARRAY_SIZE(if_dfs_limits),
+ .max_interfaces = 1,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40),
+ }
+#endif
+};
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_is_chanctx_enabled())
+ return;
+
+ hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+ hw->queues = ATH9K_NUM_TX_QUEUES;
+ hw->offchannel_tx_hw_queue = hw->queues - 1;
+ hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
+ hw->wiphy->iface_combinations = if_comb_multi;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
+ hw->wiphy->max_scan_ssids = 255;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->max_remain_on_channel_duration = 10000;
+ hw->chanctx_data_size = sizeof(void *);
+ hw->extra_beacon_tailroom =
+ sizeof(struct ieee80211_p2p_noa_attr) + 9;
+
+ ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
+}
+#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+
+static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_RC_TABLE |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ hw->radiotap_mcs_details |=
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_P2P_GO_CTWIN;
+
+ if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_WDS);
+
+ hw->wiphy->iface_combinations = if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ }
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
+ hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
+
+ /* single chain devices with rx diversity */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
+ hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
+
+ sc->ant_rx = hw->wiphy->available_antennas_rx;
+ sc->ant_tx = hw->wiphy->available_antennas_tx;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &common->sbands[IEEE80211_BAND_2GHZ];
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &common->sbands[IEEE80211_BAND_5GHZ];
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ ath9k_set_mcc_capab(sc, hw);
+#endif
+ ath9k_init_wow(hw);
+ ath9k_cmn_reload_chainmask(ah);
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, bus_ops);
+ if (error)
+ return error;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto deinit;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto deinit;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto deinit;
+
+ ath9k_init_txpower_limits(sc);
+
+#ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
+ ARRAY_SIZE(ath9k_tpt_blink));
+#endif
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto rx_cleanup;
+
+ error = ath9k_init_debug(ah);
+ if (error) {
+ ath_err(common, "Unable to create debugfs files\n");
+ goto unregister;
+ }
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto debug_cleanup;
+ }
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+debug_cleanup:
+ ath9k_deinit_debug(sc);
+unregister:
+ ieee80211_unregister_hw(hw);
+rx_cleanup:
+ ath_rx_cleanup(sc);
+deinit:
+ ath9k_deinit_softc(sc);
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ ath9k_deinit_p2p(sc);
+ ath9k_deinit_btcoex(sc);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ del_timer_sync(&sc->sleep_timer);
+ ath9k_hw_deinit(sc->sc_ah);
+ if (sc->dfs_detector != NULL)
+ sc->dfs_detector->exit(sc->dfs_detector);
+
+ ath9k_eeprom_release(sc);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ ath9k_ps_restore(sc);
+
+ ath9k_deinit_debug(sc);
+ ath9k_deinit_wow(hw);
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ error = ath_pci_init();
+ if (error < 0) {
+ pr_err("No PCI devices found, driver not installed\n");
+ error = -ENODEV;
+ goto err_out;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ is_ath9k_unloaded = true;
+ ath_ahb_exit();
+ ath_pci_exit();
+ pr_info("%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
new file mode 100644
index 000000000..90631d768
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*
+ * TX polling - checks if the TX engine is stuck somewhere
+ * and issues a chip reset if so.
+ */
+void ath_tx_complete_poll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ tx_complete_work.work);
+ struct ath_txq *txq;
+ int i;
+ bool needreset = false;
+
+
+ if (sc->tx99_state) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "skip tx hung detection on tx99\n");
+ return;
+ }
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+
+ ath_txq_lock(sc, txq);
+ if (txq->axq_depth) {
+ if (txq->axq_tx_inprogress) {
+ needreset = true;
+ ath_txq_unlock(sc, txq);
+ break;
+ } else {
+ txq->axq_tx_inprogress = true;
+ }
+ }
+ ath_txq_unlock(sc, txq);
+ }
+
+ if (needreset) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "tx hung, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+ return;
+ }
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+ msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+}
+
+/*
+ * Checks if the BB/MAC is hung.
+ */
+bool ath_hw_check(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ enum ath_reset_type type;
+ bool is_alive;
+
+ ath9k_ps_wakeup(sc);
+
+ is_alive = ath9k_hw_check_alive(sc->sc_ah);
+
+ if (!is_alive) {
+ ath_dbg(common, RESET,
+ "HW hang detected, schedule chip reset\n");
+ type = RESET_TYPE_MAC_HANG;
+ ath9k_queue_reset(sc, type);
+ }
+
+ ath9k_ps_restore(sc);
+
+ return is_alive;
+}
+
+/*
+ * PLL-WAR for AR9485/AR9340
+ */
+static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
+{
+ static int count;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (pll_sqsum >= 0x40000) {
+ count++;
+ if (count == 3) {
+ ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_PLL_HANG);
+ count = 0;
+ return true;
+ }
+ } else {
+ count = 0;
+ }
+
+ return false;
+}
+
+void ath_hw_pll_work(struct work_struct *work)
+{
+ u32 pll_sqsum;
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ /*
+ * ensure that the PLL WAR is executed only
+ * after the STA is associated (or) if the
+ * beaconing had started in interfaces that
+ * uses beacons.
+ */
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
+ return;
+
+ if (sc->tx99_state)
+ return;
+
+ ath9k_ps_wakeup(sc);
+ pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
+ ath9k_ps_restore(sc);
+ if (ath_hw_pll_rx_hang_check(sc, pll_sqsum))
+ return;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+}
+
+/*
+ * PA Pre-distortion.
+ */
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ int chain;
+
+ if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
+ ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
+ return;
+ }
+
+ ar9003_paprd_enable(ah, false);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ ar9003_paprd_populate_single_table(ah, caldata, chain);
+ }
+
+ ath_dbg(common, CALIBRATE, "Activating PAPRD\n");
+ ar9003_paprd_enable(ah, true);
+}
+
+static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int time_left;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
+
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = sc->cur_chandef.chan->band;
+ tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.rates[0].idx = 0;
+ tx_info->control.rates[0].count = 1;
+ tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ tx_info->control.rates[1].idx = -1;
+
+ init_completion(&sc->paprd_complete);
+ txctl.paprd = BIT(chain);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+
+ if (!time_left)
+ ath_dbg(common, CALIBRATE,
+ "Timeout waiting for paprd training on TX chain %d\n",
+ chain);
+
+ return !!time_left;
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ftype;
+ int chain_ok = 0;
+ int chain;
+ int len = 1800;
+ int ret;
+
+ if (!caldata ||
+ !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+ test_bit(PAPRD_DONE, &caldata->cal_flags)) {
+ ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
+ return;
+ }
+
+ ath9k_ps_wakeup(sc);
+
+ if (ar9003_paprd_init_table(ah) < 0)
+ goto fail_paprd;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail_paprd;
+
+ skb_put(skb, len);
+ memset(skb->data, 0, len);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+ hdr->frame_control = cpu_to_le16(ftype);
+ hdr->duration_id = cpu_to_le16(10);
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ chain_ok = 0;
+ ar9003_paprd_setup_gain_table(ah, chain);
+
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD training frame on chain %d\n", chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
+
+ if (!ar9003_paprd_is_done(ah)) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD not yet done on chain %d\n", chain);
+ break;
+ }
+
+ ret = ar9003_paprd_create_curve(ah, caldata, chain);
+ if (ret == -EINPROGRESS) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD curve on chain %d needs to be re-trained\n",
+ chain);
+ break;
+ } else if (ret) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD create curve failed on chain %d\n",
+ chain);
+ break;
+ }
+
+ chain_ok = 1;
+ }
+ kfree_skb(skb);
+
+ if (chain_ok) {
+ set_bit(PAPRD_DONE, &caldata->cal_flags);
+ ath_paprd_activate(sc);
+ }
+
+fail_paprd:
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * ANI performs periodic noise floor calibration
+ * that is used to adjust and optimize the chip performance. This
+ * takes environmental changes (location, temperature) into account.
+ * When the task is complete, it reschedules itself depending on the
+ * appropriate interval that was calculated.
+ */
+void ath_ani_calibrate(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval, long_cal_interval;
+ unsigned long flags;
+
+ if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
+ long_cal_interval = ATH_LONG_CALINTERVAL_INT;
+ else
+ long_cal_interval = ATH_LONG_CALINTERVAL;
+
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
+ if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
+ goto set_timer;
+ }
+ ah->ani_skip_count = 0;
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags &= ~PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ ath9k_ps_wakeup(sc);
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
+ longcal = true;
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
+ shortcal = true;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Call ANI routine if necessary */
+ if (aniflag) {
+ spin_lock(&common->cc_lock);
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+ ath_update_survey_stats(sc);
+ spin_unlock(&common->cc_lock);
+ }
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ int ret = ath9k_hw_calibrate(ah, ah->curchan, ah->rxchainmask,
+ longcal);
+ if (ret < 0) {
+ common->ani.caldone = 0;
+ ath9k_queue_reset(sc, RESET_TYPE_CALIBRATION);
+ return;
+ }
+
+ common->ani.caldone = ret;
+ }
+
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
+ ath9k_ps_restore(sc);
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+
+ if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
+ if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
+ ieee80211_queue_work(sc->hw, &sc->paprd_work);
+ } else if (!ah->paprd_table_write_done) {
+ ath9k_ps_wakeup(sc);
+ ath_paprd_activate(sc);
+ ath9k_ps_restore(sc);
+ }
+ }
+}
+
+void ath_start_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ if (common->disable_ani ||
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
+ sc->cur_chan->offchannel)
+ return;
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ath_dbg(common, ANI, "Starting ANI\n");
+ mod_timer(&common->ani.timer,
+ jiffies + msecs_to_jiffies((u32)ah->config.ani_poll_interval));
+}
+
+void ath_stop_ani(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, ANI, "Stopping ANI\n");
+ del_timer_sync(&common->ani.timer);
+}
+
+void ath_check_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+
+ /*
+ * Check for the various conditions in which ANI has to
+ * be stopped.
+ */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ if (!cur_conf->enable_beacon)
+ goto stop_ani;
+ } else if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (!cur_conf->enable_beacon) {
+ /*
+ * Disable ANI only when there are no
+ * associated stations.
+ */
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
+ goto stop_ani;
+ }
+ } else if (ah->opmode == NL80211_IFTYPE_STATION) {
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
+ goto stop_ani;
+ }
+
+ if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
+ ath_start_ani(sc);
+ }
+
+ return;
+
+stop_ani:
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
+ ath_stop_ani(sc);
+}
+
+void ath_update_survey_nf(struct ath_softc *sc, int channel)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *chan = &ah->channels[channel];
+ struct survey_info *survey = &sc->survey[channel];
+
+ if (chan->noisefloor) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = ath9k_hw_getchan_noise(ah, chan,
+ chan->noisefloor);
+ }
+}
+
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+int ath_update_survey_stats(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int pos = ah->curchan - &ah->channels[0];
+ struct survey_info *survey = &sc->survey[pos];
+ struct ath_cycle_counters *cc = &common->cc_survey;
+ unsigned int div = common->clockrate * 1000;
+ int ret = 0;
+
+ if (!ah->curchan)
+ return -1;
+
+ if (ah->power_mode == ATH9K_PM_AWAKE)
+ ath_hw_cycle_counters_update(common);
+
+ if (cc->cycles > 0) {
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time += cc->cycles / div;
+ survey->time_busy += cc->rx_busy / div;
+ survey->time_rx += cc->rx_frame / div;
+ survey->time_tx += cc->tx_frame / div;
+ }
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
+ memset(cc, 0, sizeof(*cc));
+
+ ath_update_survey_nf(sc, pos);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
new file mode 100644
index 000000000..bba85d1a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -0,0 +1,976 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include <linux/export.h>
+
+static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
+ struct ath9k_tx_queue_info *qi)
+{
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
+ ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
+ ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
+ ah->txurn_interrupt_mask);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_IMR_S0,
+ SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
+ | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
+ REG_WRITE(ah, AR_IMR_S1,
+ SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
+ | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
+
+ ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
+ ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
+u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
+{
+ return REG_READ(ah, AR_QTXDP(q));
+}
+EXPORT_SYMBOL(ath9k_hw_gettxbuf);
+
+void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
+{
+ REG_WRITE(ah, AR_QTXDP(q), txdp);
+}
+EXPORT_SYMBOL(ath9k_hw_puttxbuf);
+
+void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
+{
+ ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
+ REG_WRITE(ah, AR_Q_TXE, 1 << q);
+}
+EXPORT_SYMBOL(ath9k_hw_txstart);
+
+u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
+{
+ u32 npend;
+
+ npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
+ if (npend == 0) {
+
+ if (REG_READ(ah, AR_Q_TXE) & (1 << q))
+ npend = 1;
+ }
+
+ return npend;
+}
+EXPORT_SYMBOL(ath9k_hw_numtxpending);
+
+/**
+ * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
+ *
+ * @ah: atheros hardware struct
+ * @bIncTrigLevel: whether or not the frame trigger level should be updated
+ *
+ * The frame trigger level specifies the minimum number of bytes,
+ * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
+ * before the PCU will initiate sending the frame on the air. This can
+ * mean we initiate transmit before a full frame is on the PCU TX FIFO.
+ * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
+ * first)
+ *
+ * Caution must be taken to ensure to set the frame trigger level based
+ * on the DMA request size. For example if the DMA request size is set to
+ * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
+ * there need to be enough space in the tx FIFO for the requested transfer
+ * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
+ * the threshold to a value beyond 6, then the transmit will hang.
+ *
+ * Current dual stream devices have a PCU TX FIFO size of 8 KB.
+ * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
+ * there is a hardware issue which forces us to use 2 KB instead so the
+ * frame trigger level must not exceed 2 KB for these chipsets.
+ */
+bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
+{
+ u32 txcfg, curLevel, newLevel;
+
+ if (ah->tx_trig_level >= ah->config.max_txtrig_level)
+ return false;
+
+ ath9k_hw_disable_interrupts(ah);
+
+ txcfg = REG_READ(ah, AR_TXCFG);
+ curLevel = MS(txcfg, AR_FTRIG);
+ newLevel = curLevel;
+ if (bIncTrigLevel) {
+ if (curLevel < ah->config.max_txtrig_level)
+ newLevel++;
+ } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
+ newLevel--;
+ if (newLevel != curLevel)
+ REG_WRITE(ah, AR_TXCFG,
+ (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
+
+ ath9k_hw_enable_interrupts(ah);
+
+ ah->tx_trig_level = newLevel;
+
+ return newLevel != curLevel;
+}
+EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
+
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
+{
+ int maxdelay = 1000;
+ int i, q;
+
+ if (ah->curchan) {
+ if (IS_CHAN_HALF_RATE(ah->curchan))
+ maxdelay *= 2;
+ else if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ maxdelay *= 4;
+ }
+
+ REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
+
+ REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
+
+ for (q = 0; q < AR_NUM_QCU; q++) {
+ for (i = 0; i < maxdelay; i++) {
+ if (i)
+ udelay(5);
+
+ if (!ath9k_hw_numtxpending(ah, q))
+ break;
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
+
+ REG_WRITE(ah, AR_Q_TXD, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
+
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
+{
+#define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
+#define ATH9K_TIME_QUANTUM 100 /* usec */
+ int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+ int wait;
+
+ REG_WRITE(ah, AR_Q_TXD, 1 << q);
+
+ for (wait = wait_time; wait != 0; wait--) {
+ if (wait != wait_time)
+ udelay(ATH9K_TIME_QUANTUM);
+
+ if (ath9k_hw_numtxpending(ah, q) == 0)
+ break;
+ }
+
+ REG_WRITE(ah, AR_Q_TXD, 0);
+
+ return wait != 0;
+
+#undef ATH9K_TX_STOP_DMA_TIMEOUT
+#undef ATH9K_TIME_QUANTUM
+}
+EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
+
+bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
+ const struct ath9k_tx_queue_info *qinfo)
+{
+ u32 cw;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info *qi;
+
+ qi = &ah->txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ ath_dbg(common, QUEUE,
+ "Set TXQ properties, inactive queue: %u\n", q);
+ return false;
+ }
+
+ ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
+
+ qi->tqi_ver = qinfo->tqi_ver;
+ qi->tqi_subtype = qinfo->tqi_subtype;
+ qi->tqi_qflags = qinfo->tqi_qflags;
+ qi->tqi_priority = qinfo->tqi_priority;
+ if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
+ qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
+ else
+ qi->tqi_aifs = INIT_AIFS;
+ if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
+ cw = min(qinfo->tqi_cwmin, 1024U);
+ qi->tqi_cwmin = 1;
+ while (qi->tqi_cwmin < cw)
+ qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
+ } else
+ qi->tqi_cwmin = qinfo->tqi_cwmin;
+ if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
+ cw = min(qinfo->tqi_cwmax, 1024U);
+ qi->tqi_cwmax = 1;
+ while (qi->tqi_cwmax < cw)
+ qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
+ } else
+ qi->tqi_cwmax = INIT_CWMAX;
+
+ if (qinfo->tqi_shretry != 0)
+ qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
+ else
+ qi->tqi_shretry = INIT_SH_RETRY;
+ if (qinfo->tqi_lgretry != 0)
+ qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
+ else
+ qi->tqi_lgretry = INIT_LG_RETRY;
+ qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
+ qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
+ qi->tqi_burstTime = qinfo->tqi_burstTime;
+ qi->tqi_readyTime = qinfo->tqi_readyTime;
+
+ switch (qinfo->tqi_subtype) {
+ case ATH9K_WME_UPSD:
+ if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
+ qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_set_txq_props);
+
+bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info *qi;
+
+ qi = &ah->txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ ath_dbg(common, QUEUE,
+ "Get TXQ properties, inactive queue: %u\n", q);
+ return false;
+ }
+
+ qinfo->tqi_qflags = qi->tqi_qflags;
+ qinfo->tqi_ver = qi->tqi_ver;
+ qinfo->tqi_subtype = qi->tqi_subtype;
+ qinfo->tqi_qflags = qi->tqi_qflags;
+ qinfo->tqi_priority = qi->tqi_priority;
+ qinfo->tqi_aifs = qi->tqi_aifs;
+ qinfo->tqi_cwmin = qi->tqi_cwmin;
+ qinfo->tqi_cwmax = qi->tqi_cwmax;
+ qinfo->tqi_shretry = qi->tqi_shretry;
+ qinfo->tqi_lgretry = qi->tqi_lgretry;
+ qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
+ qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
+ qinfo->tqi_burstTime = qi->tqi_burstTime;
+ qinfo->tqi_readyTime = qi->tqi_readyTime;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_get_txq_props);
+
+int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
+ const struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info *qi;
+ int q;
+
+ switch (type) {
+ case ATH9K_TX_QUEUE_BEACON:
+ q = ATH9K_NUM_TX_QUEUES - 1;
+ break;
+ case ATH9K_TX_QUEUE_CAB:
+ q = ATH9K_NUM_TX_QUEUES - 2;
+ break;
+ case ATH9K_TX_QUEUE_PSPOLL:
+ q = 1;
+ break;
+ case ATH9K_TX_QUEUE_UAPSD:
+ q = ATH9K_NUM_TX_QUEUES - 3;
+ break;
+ case ATH9K_TX_QUEUE_DATA:
+ q = qinfo->tqi_subtype;
+ break;
+ default:
+ ath_err(common, "Invalid TX queue type: %u\n", type);
+ return -1;
+ }
+
+ ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
+
+ qi = &ah->txq[q];
+ if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
+ ath_err(common, "TX queue: %u already active\n", q);
+ return -1;
+ }
+ memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
+ qi->tqi_type = type;
+ qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
+ (void) ath9k_hw_set_txq_props(ah, q, qinfo);
+
+ return q;
+}
+EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
+
+static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
+{
+ ah->txok_interrupt_mask &= ~(1 << q);
+ ah->txerr_interrupt_mask &= ~(1 << q);
+ ah->txdesc_interrupt_mask &= ~(1 << q);
+ ah->txeol_interrupt_mask &= ~(1 << q);
+ ah->txurn_interrupt_mask &= ~(1 << q);
+}
+
+bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info *qi;
+
+ qi = &ah->txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
+ return false;
+ }
+
+ ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
+
+ qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
+ ath9k_hw_clear_queue_interrupts(ah, q);
+ ath9k_hw_set_txq_interrupts(ah, qi);
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
+
+bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info *qi;
+ u32 cwMin, chanCwMin, value;
+
+ qi = &ah->txq[q];
+ if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
+ ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
+ return true;
+ }
+
+ ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
+
+ if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
+ chanCwMin = INIT_CWMIN;
+
+ for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
+ } else
+ cwMin = qi->tqi_cwmin;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_DLCL_IFS(q),
+ SM(cwMin, AR_D_LCL_IFS_CWMIN) |
+ SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
+ SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+
+ REG_WRITE(ah, AR_DRETRY_LIMIT(q),
+ SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
+ SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
+ SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
+
+ REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+
+ if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
+ REG_WRITE(ah, AR_DMISC(q),
+ AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
+ else
+ REG_WRITE(ah, AR_DMISC(q),
+ AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+
+ if (qi->tqi_cbrPeriod) {
+ REG_WRITE(ah, AR_QCBRCFG(q),
+ SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
+ SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
+ (qi->tqi_cbrOverflowLimit ?
+ AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
+ }
+ if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
+ REG_WRITE(ah, AR_QRDYTIMECFG(q),
+ SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
+ AR_Q_RDYTIMECFG_EN);
+ }
+
+ REG_WRITE(ah, AR_DCHNTIME(q),
+ SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
+ (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
+
+ if (qi->tqi_burstTime
+ && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
+
+ if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
+
+ switch (qi->tqi_type) {
+ case ATH9K_TX_QUEUE_BEACON:
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_SET_BIT(ah, AR_QMISC(q),
+ AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_BEACON_USE
+ | AR_Q_MISC_CBR_INCR_DIS1);
+
+ REG_SET_BIT(ah, AR_DMISC(q),
+ (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
+ | AR_D_MISC_BEACON_USE
+ | AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ /*
+ * cwmin and cwmax should be 0 for beacon queue
+ * but not for IBSS as we would create an imbalance
+ * on beaconing fairness for participating nodes.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_ADHOC) {
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
+ | SM(0, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+ }
+ break;
+ case ATH9K_TX_QUEUE_CAB:
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_SET_BIT(ah, AR_QMISC(q),
+ AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_CBR_INCR_DIS1
+ | AR_Q_MISC_CBR_INCR_DIS0);
+ value = (qi->tqi_readyTime -
+ (ah->config.sw_beacon_response_time -
+ ah->config.dma_beacon_response_time)) * 1024;
+ REG_WRITE(ah, AR_QRDYTIMECFG(q),
+ value | AR_Q_RDYTIMECFG_EN);
+ REG_SET_BIT(ah, AR_DMISC(q),
+ (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ break;
+ case ATH9K_TX_QUEUE_PSPOLL:
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
+ break;
+ case ATH9K_TX_QUEUE_UAPSD:
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
+ break;
+ default:
+ break;
+ }
+
+ if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
+ REG_SET_BIT(ah, AR_DMISC(q),
+ SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
+ AR_D_MISC_ARB_LOCKOUT_CNTRL) |
+ AR_D_MISC_POST_FR_BKOFF_DIS);
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
+
+ ath9k_hw_clear_queue_interrupts(ah, q);
+ if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
+ ah->txok_interrupt_mask |= 1 << q;
+ ah->txerr_interrupt_mask |= 1 << q;
+ }
+ if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
+ ah->txdesc_interrupt_mask |= 1 << q;
+ if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
+ ah->txeol_interrupt_mask |= 1 << q;
+ if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
+ ah->txurn_interrupt_mask |= 1 << q;
+ ath9k_hw_set_txq_interrupts(ah, qi);
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_resettxqueue);
+
+int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
+ struct ath_rx_status *rs)
+{
+ struct ar5416_desc ads;
+ struct ar5416_desc *adsp = AR5416DESC(ds);
+ u32 phyerr;
+
+ if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ ads.u.rx = adsp->u.rx;
+
+ rs->rs_status = 0;
+ rs->rs_flags = 0;
+ rs->flag = 0;
+
+ rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ rs->rs_tstamp = ads.AR_RcvTimestamp;
+
+ if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
+ rs->rs_rssi = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
+ } else {
+ rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
+ AR_RxRSSIAnt00);
+ rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
+ AR_RxRSSIAnt01);
+ rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
+ AR_RxRSSIAnt02);
+ rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
+ AR_RxRSSIAnt10);
+ rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
+ AR_RxRSSIAnt11);
+ rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
+ AR_RxRSSIAnt12);
+ }
+ if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
+ rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ else
+ rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
+ rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+
+ rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
+ rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
+ rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+
+ /* directly mapped flags for ieee80211_rx_status */
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0;
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ rs->flag |=
+ (ads.ds_rxstatus3 & AR_STBC) ?
+ /* we can only Nss=1 STBC */
+ (1 << RX_FLAG_STBC_SHIFT) : 0;
+
+ if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
+ rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+ /*
+ * Treat these errors as mutually exclusive to avoid spurious
+ * extra error reports from the hardware. If a CRC error is
+ * reported, then decryption and MIC errors are irrelevant,
+ * the frame is going to be dropped either way
+ */
+ if (ads.ds_rxstatus8 & AR_PHYErr) {
+ rs->rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
+ rs->rs_phyerr = phyerr;
+ } else if (ads.ds_rxstatus8 & AR_CRCErr)
+ rs->rs_status |= ATH9K_RXERR_CRC;
+ else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
+ rs->rs_status |= ATH9K_RXERR_MIC;
+ } else {
+ if (ads.ds_rxstatus8 &
+ (AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr))
+ rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
+
+ /* Only up to MCS16 supported, everything above is invalid */
+ if (rs->rs_rate >= 0x90)
+ rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
+ }
+
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
+
+/*
+ * This can stop or re-enables RX.
+ *
+ * If bool is set this will kill any frame which is currently being
+ * transferred between the MAC and baseband and also prevent any new
+ * frames from getting started.
+ */
+bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
+{
+ u32 reg;
+
+ if (set) {
+ REG_SET_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
+ 0, AH_WAIT_TIMEOUT)) {
+ REG_CLR_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS |
+ AR_DIAG_RX_ABORT));
+
+ reg = REG_READ(ah, AR_OBS_BUS_1);
+ ath_err(ath9k_hw_common(ah),
+ "RX failed to go idle in 10 ms RXSM=0x%x\n",
+ reg);
+
+ return false;
+ }
+ } else {
+ REG_CLR_BIT(ah, AR_DIAG_SW,
+ (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_hw_setrxabort);
+
+void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
+{
+ REG_WRITE(ah, AR_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_putrxbuf);
+
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
+{
+ ath9k_enable_mib_counters(ah);
+
+ ath9k_ani_reset(ah, is_scanning);
+
+ REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+}
+EXPORT_SYMBOL(ath9k_hw_startpcureceive);
+
+void ath9k_hw_abortpcurecv(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
+
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
+{
+#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 mac_status, last_mac_status = 0;
+ int i;
+
+ /* Enable access to the DMA observation bus */
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ /* Wait for rx enable bit to go low */
+ for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
+ if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
+ break;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+ *reset = true;
+ break;
+ }
+
+ last_mac_status = mac_status;
+ }
+
+ udelay(AH_TIME_QUANTUM);
+ }
+
+ if (i == 0) {
+ ath_err(common,
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
+ AH_RX_STOP_DMA_TIMEOUT / 1000,
+ REG_READ(ah, AR_CR),
+ REG_READ(ah, AR_DIAG_SW),
+ REG_READ(ah, AR_DMADBG_7));
+ return false;
+ } else {
+ return true;
+ }
+
+#undef AH_RX_STOP_DMA_TIMEOUT
+}
+EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
+
+int ath9k_hw_beaconq_setup(struct ath_hw *ah)
+{
+ struct ath9k_tx_queue_info qi;
+
+ memset(&qi, 0, sizeof(qi));
+ qi.tqi_aifs = 1;
+ qi.tqi_cwmin = 0;
+ qi.tqi_cwmax = 0;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
+
+ return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
+}
+EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
+
+bool ath9k_hw_intrpend(struct ath_hw *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (((host_isr & AR_INTR_MAC_IRQ) ||
+ (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+ (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_intrpend);
+
+void ath9k_hw_kill_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_dbg(common, INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
+
+void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+{
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ atomic_set(&ah->intr_ref_cnt, -1);
+ else
+ atomic_dec(&ah->intr_ref_cnt);
+
+ ath9k_hw_kill_interrupts(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 async_mask;
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+ atomic_read(&ah->intr_ref_cnt));
+ return;
+ }
+
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+ AR_SREV_9561(ah))
+ sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
+
+ async_mask = AR_INTR_MAC_IRQ;
+
+ if (ah->imask & ATH9K_INT_MCI)
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+ ath_dbg(common, INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
+ }
+ ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+}
+EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
+
+void ath9k_hw_set_interrupts(struct ath_hw *ah)
+{
+ enum ath9k_int ints = ah->imask;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ints & ATH9K_INT_GLOBAL))
+ ath9k_hw_disable_interrupts(ah);
+
+ ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
+
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ah->config.tx_intr_mitigation)
+ mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
+ else {
+ if (ah->txok_interrupt_mask)
+ mask |= AR_IMR_TXOK;
+ if (ah->txdesc_interrupt_mask)
+ mask |= AR_IMR_TXDESC;
+ }
+ if (ah->txerr_interrupt_mask)
+ mask |= AR_IMR_TXERR;
+ if (ah->txeol_interrupt_mask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation) {
+ mask &= ~AR_IMR_RXOK_LP;
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ } else {
+ mask |= AR_IMR_RXOK_LP;
+ }
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & ATH9K_INT_GENTIMER)
+ mask |= AR_IMR_GENTMR;
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= AR_IMR_S2_CABEND;
+ if (ints & ATH9K_INT_TSFOOR)
+ mask2 |= AR_IMR_S2_TSFOOR;
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG) {
+ mask |= AR_IMR_BCNMISC;
+ mask2 |= AR_IMR_S2_BB_WATCHDOG;
+ }
+ }
+
+ ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
+ AR_IMR_S2_DTIM |
+ AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND |
+ AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR |
+ AR_IMR_S2_GTT |
+ AR_IMR_S2_CST);
+
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+ if (ints & ATH9K_INT_BB_WATCHDOG)
+ ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
+ }
+
+ ah->imrs2_reg |= mask2;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ return;
+}
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
+
+#define ATH9K_HW_MAX_DCU 10
+#define ATH9K_HW_SLICE_PER_DCU 16
+#define ATH9K_HW_BIT_IN_SLICE 16
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
+{
+ int dcu_idx;
+ u32 filter;
+
+ for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
+ filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
+ filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
+ filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
+ AR_D_TXBLK_WRITE_SLICE);
+ filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
+ ath_dbg(ath9k_hw_common(ah), PS,
+ "DCU%d staid %d set %d txfilter %08x\n",
+ dcu_idx, destidx, set, filter);
+ REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
new file mode 100644
index 000000000..e55fa1189
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -0,0 +1,750 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MAC_H
+#define MAC_H
+
+#define set11nTries(_series, _index) \
+ (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
+
+#define set11nRate(_series, _index) \
+ (SM((_series)[_index].Rate, AR_XmitRate##_index))
+
+#define set11nPktDurRTSCTS(_series, _index) \
+ (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
+ ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
+ AR_RTSCTSQual##_index : 0))
+
+#define set11nRateFlags(_series, _index) \
+ (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
+ AR_2040_##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
+ AR_GI##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
+ AR_STBC##_index : 0) \
+ |SM((_series)[_index].ChSel, AR_ChainSel##_index))
+
+#define CCK_SIFS_TIME 10
+#define CCK_PREAMBLE_BITS 144
+#define CCK_PLCP_BITS 48
+
+#define OFDM_SIFS_TIME 16
+#define OFDM_PREAMBLE_TIME 20
+#define OFDM_PLCP_BITS 22
+#define OFDM_SYMBOL_TIME 4
+
+#define OFDM_SIFS_TIME_HALF 32
+#define OFDM_PREAMBLE_TIME_HALF 40
+#define OFDM_PLCP_BITS_HALF 22
+#define OFDM_SYMBOL_TIME_HALF 8
+
+#define OFDM_SIFS_TIME_QUARTER 64
+#define OFDM_PREAMBLE_TIME_QUARTER 80
+#define OFDM_PLCP_BITS_QUARTER 22
+#define OFDM_SYMBOL_TIME_QUARTER 16
+
+#define INIT_AIFS 2
+#define INIT_CWMIN 15
+#define INIT_CWMIN_11B 31
+#define INIT_CWMAX 1023
+#define INIT_SH_RETRY 10
+#define INIT_LG_RETRY 10
+#define INIT_SSH_RETRY 32
+#define INIT_SLG_RETRY 32
+
+#define ATH9K_SLOT_TIME_6 6
+#define ATH9K_SLOT_TIME_9 9
+#define ATH9K_SLOT_TIME_20 20
+
+#define ATH9K_TXERR_XRETRY 0x01
+#define ATH9K_TXERR_FILT 0x02
+#define ATH9K_TXERR_FIFO 0x04
+#define ATH9K_TXERR_XTXOP 0x08
+#define ATH9K_TXERR_TIMER_EXPIRED 0x10
+#define ATH9K_TX_ACKED 0x20
+#define ATH9K_TX_FLUSH 0x40
+#define ATH9K_TXERR_MASK \
+ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
+ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED | ATH9K_TX_FLUSH)
+
+#define ATH9K_TX_BA 0x01
+#define ATH9K_TX_PWRMGMT 0x02
+#define ATH9K_TX_DESC_CFG_ERR 0x04
+#define ATH9K_TX_DATA_UNDERRUN 0x08
+#define ATH9K_TX_DELIM_UNDERRUN 0x10
+#define ATH9K_TX_SW_FILTERED 0x80
+
+/* 64 bytes */
+#define MIN_TX_FIFO_THRESHOLD 0x1
+
+/*
+ * Single stream device AR9285 and AR9271 require 2 KB
+ * to work around a hardware issue, all other devices
+ * have can use the max 4 KB limit.
+ */
+#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
+
+struct ath_tx_status {
+ u32 ts_tstamp;
+ u16 ts_seqnum;
+ u8 ts_status;
+ u8 ts_rateindex;
+ int8_t ts_rssi;
+ u8 ts_shortretry;
+ u8 ts_longretry;
+ u8 ts_virtcol;
+ u8 ts_flags;
+ int8_t ts_rssi_ctl0;
+ int8_t ts_rssi_ctl1;
+ int8_t ts_rssi_ctl2;
+ int8_t ts_rssi_ext0;
+ int8_t ts_rssi_ext1;
+ int8_t ts_rssi_ext2;
+ u8 qid;
+ u16 desc_id;
+ u8 tid;
+ u32 ba_low;
+ u32 ba_high;
+ u32 evm0;
+ u32 evm1;
+ u32 evm2;
+ u32 duration;
+};
+
+struct ath_rx_status {
+ u32 rs_tstamp;
+ u16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
+ u8 rs_isaggr;
+ u8 rs_firstaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ bool is_mybeacon;
+ u32 evm0;
+ u32 evm1;
+ u32 evm2;
+ u32 evm3;
+ u32 evm4;
+ u32 flag; /* see enum mac80211_rx_flags */
+};
+
+struct ath_htc_rx_status {
+ __be64 rs_tstamp;
+ __be16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u8 rs_dummy;
+ /* FIXME: evm* never used? */
+ __be32 evm0;
+ __be32 evm1;
+ __be32 evm2;
+};
+
+#define ATH9K_RXERR_CRC 0x01
+#define ATH9K_RXERR_PHY 0x02
+#define ATH9K_RXERR_FIFO 0x04
+#define ATH9K_RXERR_DECRYPT 0x08
+#define ATH9K_RXERR_MIC 0x10
+#define ATH9K_RXERR_KEYMISS 0x20
+#define ATH9K_RXERR_CORRUPT_DESC 0x40
+
+#define ATH9K_RX_MORE 0x01
+#define ATH9K_RX_MORE_AGGR 0x02
+#define ATH9K_RX_GI 0x04
+#define ATH9K_RX_2040 0x08
+#define ATH9K_RX_DELIM_CRC_PRE 0x10
+#define ATH9K_RX_DELIM_CRC_POST 0x20
+#define ATH9K_RX_DECRYPT_BUSY 0x40
+
+#define ATH9K_RXKEYIX_INVALID ((u8)-1)
+#define ATH9K_TXKEYIX_INVALID ((u8)-1)
+
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_SPECTRAL = 38,
+ ATH9K_PHYERR_MAX = 39,
+};
+
+struct ath_desc {
+ u32 ds_link;
+ u32 ds_data;
+ u32 ds_ctl0;
+ u32 ds_ctl1;
+ u32 ds_hw[20];
+ void *ds_vdata;
+} __packed __aligned(4);
+
+#define ATH9K_TXDESC_NOACK 0x0002
+#define ATH9K_TXDESC_RTSENA 0x0004
+#define ATH9K_TXDESC_CTSENA 0x0008
+/* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for
+ * the descriptor its marked on. We take a tx interrupt to reap
+ * descriptors when the h/w hits an EOL condition or
+ * when the descriptor is specifically marked to generate
+ * an interrupt with this flag. Descriptors should be
+ * marked periodically to insure timely replenishing of the
+ * supply needed for sending frames. Defering interrupts
+ * reduces system load and potentially allows more concurrent
+ * work to be done but if done to aggressively can cause
+ * senders to backup. When the hardware queue is left too
+ * large rate control information may also be too out of
+ * date. An Alternative for this is TX interrupt mitigation
+ * but this needs more testing. */
+#define ATH9K_TXDESC_INTREQ 0x0010
+#define ATH9K_TXDESC_VEOL 0x0020
+#define ATH9K_TXDESC_EXT_ONLY 0x0040
+#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
+#define ATH9K_TXDESC_VMF 0x0100
+#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
+#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
+#define ATH9K_TXDESC_LDPC 0x0800
+#define ATH9K_TXDESC_CLRDMASK 0x1000
+
+#define ATH9K_TXDESC_PAPRD 0x70000
+#define ATH9K_TXDESC_PAPRD_S 16
+
+#define ATH9K_RXDESC_INTREQ 0x0020
+
+struct ar5416_desc {
+ u32 ds_link;
+ u32 ds_data;
+ u32 ds_ctl0;
+ u32 ds_ctl1;
+ union {
+ struct {
+ u32 ctl2;
+ u32 ctl3;
+ u32 ctl4;
+ u32 ctl5;
+ u32 ctl6;
+ u32 ctl7;
+ u32 ctl8;
+ u32 ctl9;
+ u32 ctl10;
+ u32 ctl11;
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ } tx;
+ struct {
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ } rx;
+ } u;
+} __packed __aligned(4);
+
+#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
+#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
+
+#define ds_ctl2 u.tx.ctl2
+#define ds_ctl3 u.tx.ctl3
+#define ds_ctl4 u.tx.ctl4
+#define ds_ctl5 u.tx.ctl5
+#define ds_ctl6 u.tx.ctl6
+#define ds_ctl7 u.tx.ctl7
+#define ds_ctl8 u.tx.ctl8
+#define ds_ctl9 u.tx.ctl9
+#define ds_ctl10 u.tx.ctl10
+#define ds_ctl11 u.tx.ctl11
+
+#define ds_txstatus0 u.tx.status0
+#define ds_txstatus1 u.tx.status1
+#define ds_txstatus2 u.tx.status2
+#define ds_txstatus3 u.tx.status3
+#define ds_txstatus4 u.tx.status4
+#define ds_txstatus5 u.tx.status5
+#define ds_txstatus6 u.tx.status6
+#define ds_txstatus7 u.tx.status7
+#define ds_txstatus8 u.tx.status8
+#define ds_txstatus9 u.tx.status9
+
+#define ds_rxstatus0 u.rx.status0
+#define ds_rxstatus1 u.rx.status1
+#define ds_rxstatus2 u.rx.status2
+#define ds_rxstatus3 u.rx.status3
+#define ds_rxstatus4 u.rx.status4
+#define ds_rxstatus5 u.rx.status5
+#define ds_rxstatus6 u.rx.status6
+#define ds_rxstatus7 u.rx.status7
+#define ds_rxstatus8 u.rx.status8
+
+#define AR_FrameLen 0x00000fff
+#define AR_VirtMoreFrag 0x00001000
+#define AR_TxCtlRsvd00 0x0000e000
+#define AR_XmitPower0 0x003f0000
+#define AR_XmitPower0_S 16
+#define AR_XmitPower1 0x3f000000
+#define AR_XmitPower1_S 24
+#define AR_XmitPower2 0x3f000000
+#define AR_XmitPower2_S 24
+#define AR_XmitPower3 0x3f000000
+#define AR_XmitPower3_S 24
+#define AR_RTSEnable 0x00400000
+#define AR_VEOL 0x00800000
+#define AR_ClrDestMask 0x01000000
+#define AR_TxCtlRsvd01 0x1e000000
+#define AR_TxIntrReq 0x20000000
+#define AR_DestIdxValid 0x40000000
+#define AR_CTSEnable 0x80000000
+
+#define AR_TxMore 0x00001000
+#define AR_DestIdx 0x000fe000
+#define AR_DestIdx_S 13
+#define AR_FrameType 0x00f00000
+#define AR_FrameType_S 20
+#define AR_NoAck 0x01000000
+#define AR_InsertTS 0x02000000
+#define AR_CorruptFCS 0x04000000
+#define AR_ExtOnly 0x08000000
+#define AR_ExtAndCtl 0x10000000
+#define AR_MoreAggr 0x20000000
+#define AR_IsAggr 0x40000000
+
+#define AR_BurstDur 0x00007fff
+#define AR_BurstDur_S 0
+#define AR_DurUpdateEna 0x00008000
+#define AR_XmitDataTries0 0x000f0000
+#define AR_XmitDataTries0_S 16
+#define AR_XmitDataTries1 0x00f00000
+#define AR_XmitDataTries1_S 20
+#define AR_XmitDataTries2 0x0f000000
+#define AR_XmitDataTries2_S 24
+#define AR_XmitDataTries3 0xf0000000
+#define AR_XmitDataTries3_S 28
+
+#define AR_XmitRate0 0x000000ff
+#define AR_XmitRate0_S 0
+#define AR_XmitRate1 0x0000ff00
+#define AR_XmitRate1_S 8
+#define AR_XmitRate2 0x00ff0000
+#define AR_XmitRate2_S 16
+#define AR_XmitRate3 0xff000000
+#define AR_XmitRate3_S 24
+
+#define AR_PacketDur0 0x00007fff
+#define AR_PacketDur0_S 0
+#define AR_RTSCTSQual0 0x00008000
+#define AR_PacketDur1 0x7fff0000
+#define AR_PacketDur1_S 16
+#define AR_RTSCTSQual1 0x80000000
+
+#define AR_PacketDur2 0x00007fff
+#define AR_PacketDur2_S 0
+#define AR_RTSCTSQual2 0x00008000
+#define AR_PacketDur3 0x7fff0000
+#define AR_PacketDur3_S 16
+#define AR_RTSCTSQual3 0x80000000
+
+#define AR_AggrLen 0x0000ffff
+#define AR_AggrLen_S 0
+#define AR_TxCtlRsvd60 0x00030000
+#define AR_PadDelim 0x03fc0000
+#define AR_PadDelim_S 18
+#define AR_EncrType 0x0c000000
+#define AR_EncrType_S 26
+#define AR_TxCtlRsvd61 0xf0000000
+#define AR_LDPC 0x80000000
+
+#define AR_2040_0 0x00000001
+#define AR_GI0 0x00000002
+#define AR_ChainSel0 0x0000001c
+#define AR_ChainSel0_S 2
+#define AR_2040_1 0x00000020
+#define AR_GI1 0x00000040
+#define AR_ChainSel1 0x00000380
+#define AR_ChainSel1_S 7
+#define AR_2040_2 0x00000400
+#define AR_GI2 0x00000800
+#define AR_ChainSel2 0x00007000
+#define AR_ChainSel2_S 12
+#define AR_2040_3 0x00008000
+#define AR_GI3 0x00010000
+#define AR_ChainSel3 0x000e0000
+#define AR_ChainSel3_S 17
+#define AR_RTSCTSRate 0x0ff00000
+#define AR_RTSCTSRate_S 20
+#define AR_STBC0 0x10000000
+#define AR_STBC1 0x20000000
+#define AR_STBC2 0x40000000
+#define AR_STBC3 0x80000000
+
+#define AR_TxRSSIAnt00 0x000000ff
+#define AR_TxRSSIAnt00_S 0
+#define AR_TxRSSIAnt01 0x0000ff00
+#define AR_TxRSSIAnt01_S 8
+#define AR_TxRSSIAnt02 0x00ff0000
+#define AR_TxRSSIAnt02_S 16
+#define AR_TxStatusRsvd00 0x3f000000
+#define AR_TxBaStatus 0x40000000
+#define AR_TxStatusRsvd01 0x80000000
+
+/*
+ * AR_FrmXmitOK - Frame transmission success flag. If set, the frame was
+ * transmitted successfully. If clear, no ACK or BA was received to indicate
+ * successful transmission when we were expecting an ACK or BA.
+ */
+#define AR_FrmXmitOK 0x00000001
+#define AR_ExcessiveRetries 0x00000002
+#define AR_FIFOUnderrun 0x00000004
+#define AR_Filtered 0x00000008
+#define AR_RTSFailCnt 0x000000f0
+#define AR_RTSFailCnt_S 4
+#define AR_DataFailCnt 0x00000f00
+#define AR_DataFailCnt_S 8
+#define AR_VirtRetryCnt 0x0000f000
+#define AR_VirtRetryCnt_S 12
+#define AR_TxDelimUnderrun 0x00010000
+#define AR_TxDataUnderrun 0x00020000
+#define AR_DescCfgErr 0x00040000
+#define AR_TxTimerExpired 0x00080000
+#define AR_TxStatusRsvd10 0xfff00000
+
+#define AR_SendTimestamp ds_txstatus2
+#define AR_BaBitmapLow ds_txstatus3
+#define AR_BaBitmapHigh ds_txstatus4
+
+#define AR_TxRSSIAnt10 0x000000ff
+#define AR_TxRSSIAnt10_S 0
+#define AR_TxRSSIAnt11 0x0000ff00
+#define AR_TxRSSIAnt11_S 8
+#define AR_TxRSSIAnt12 0x00ff0000
+#define AR_TxRSSIAnt12_S 16
+#define AR_TxRSSICombined 0xff000000
+#define AR_TxRSSICombined_S 24
+
+#define AR_TxTid 0xf0000000
+#define AR_TxTid_S 28
+
+#define AR_TxEVM0 ds_txstatus5
+#define AR_TxEVM1 ds_txstatus6
+#define AR_TxEVM2 ds_txstatus7
+
+#define AR_TxDone 0x00000001
+#define AR_SeqNum 0x00001ffe
+#define AR_SeqNum_S 1
+#define AR_TxStatusRsvd80 0x0001e000
+#define AR_TxOpExceeded 0x00020000
+#define AR_TxStatusRsvd81 0x001c0000
+#define AR_FinalTxIdx 0x00600000
+#define AR_FinalTxIdx_S 21
+#define AR_TxStatusRsvd82 0x01800000
+#define AR_PowerMgmt 0x02000000
+#define AR_TxStatusRsvd83 0xfc000000
+
+#define AR_RxCTLRsvd00 0xffffffff
+
+#define AR_RxCtlRsvd00 0x00001000
+#define AR_RxIntrReq 0x00002000
+#define AR_RxCtlRsvd01 0xffffc000
+
+#define AR_RxRSSIAnt00 0x000000ff
+#define AR_RxRSSIAnt00_S 0
+#define AR_RxRSSIAnt01 0x0000ff00
+#define AR_RxRSSIAnt01_S 8
+#define AR_RxRSSIAnt02 0x00ff0000
+#define AR_RxRSSIAnt02_S 16
+#define AR_RxRate 0xff000000
+#define AR_RxRate_S 24
+#define AR_RxStatusRsvd00 0xff000000
+
+#define AR_DataLen 0x00000fff
+#define AR_RxMore 0x00001000
+#define AR_NumDelim 0x003fc000
+#define AR_NumDelim_S 14
+#define AR_RxStatusRsvd10 0xff800000
+
+#define AR_RcvTimestamp ds_rxstatus2
+
+#define AR_GI 0x00000001
+#define AR_2040 0x00000002
+#define AR_Parallel40 0x00000004
+#define AR_Parallel40_S 2
+#define AR_STBC 0x00000008 /* on ar9280 and later */
+#define AR_RxStatusRsvd30 0x000000f0
+#define AR_RxAntenna 0xffffff00
+#define AR_RxAntenna_S 8
+
+#define AR_RxRSSIAnt10 0x000000ff
+#define AR_RxRSSIAnt10_S 0
+#define AR_RxRSSIAnt11 0x0000ff00
+#define AR_RxRSSIAnt11_S 8
+#define AR_RxRSSIAnt12 0x00ff0000
+#define AR_RxRSSIAnt12_S 16
+#define AR_RxRSSICombined 0xff000000
+#define AR_RxRSSICombined_S 24
+
+#define AR_RxEVM0 ds_rxstatus4
+#define AR_RxEVM1 ds_rxstatus5
+#define AR_RxEVM2 ds_rxstatus6
+
+#define AR_RxDone 0x00000001
+#define AR_RxFrameOK 0x00000002
+#define AR_CRCErr 0x00000004
+#define AR_DecryptCRCErr 0x00000008
+#define AR_PHYErr 0x00000010
+#define AR_MichaelErr 0x00000020
+#define AR_PreDelimCRCErr 0x00000040
+#define AR_RxStatusRsvd70 0x00000080
+#define AR_RxKeyIdxValid 0x00000100
+#define AR_KeyIdx 0x0000fe00
+#define AR_KeyIdx_S 9
+#define AR_PHYErrCode 0x0000ff00
+#define AR_PHYErrCode_S 8
+#define AR_RxMoreAggr 0x00010000
+#define AR_RxAggr 0x00020000
+#define AR_PostDelimCRCErr 0x00040000
+#define AR_RxStatusRsvd71 0x3ff80000
+#define AR_RxFirstAggr 0x20000000
+#define AR_DecryptBusyErr 0x40000000
+#define AR_KeyMiss 0x80000000
+
+enum ath9k_tx_queue {
+ ATH9K_TX_QUEUE_INACTIVE = 0,
+ ATH9K_TX_QUEUE_DATA,
+ ATH9K_TX_QUEUE_BEACON,
+ ATH9K_TX_QUEUE_CAB,
+ ATH9K_TX_QUEUE_UAPSD,
+ ATH9K_TX_QUEUE_PSPOLL
+};
+
+#define ATH9K_NUM_TX_QUEUES 10
+
+/* Used as a queue subtype instead of a WMM AC */
+#define ATH9K_WME_UPSD 4
+
+enum ath9k_tx_queue_flags {
+ TXQ_FLAG_TXINT_ENABLE = 0x0001,
+ TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
+ TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
+ TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
+ TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
+ TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
+ TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
+ TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
+};
+
+#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
+#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
+
+#define ATH9K_DECOMP_MASK_SIZE 128
+
+enum ath9k_pkt_type {
+ ATH9K_PKT_TYPE_NORMAL = 0,
+ ATH9K_PKT_TYPE_ATIM,
+ ATH9K_PKT_TYPE_PSPOLL,
+ ATH9K_PKT_TYPE_BEACON,
+ ATH9K_PKT_TYPE_PROBE_RESP,
+ ATH9K_PKT_TYPE_CHIRP,
+ ATH9K_PKT_TYPE_GRP_POLL,
+};
+
+struct ath9k_tx_queue_info {
+ u32 tqi_ver;
+ enum ath9k_tx_queue tqi_type;
+ int tqi_subtype;
+ enum ath9k_tx_queue_flags tqi_qflags;
+ u32 tqi_priority;
+ u32 tqi_aifs;
+ u32 tqi_cwmin;
+ u32 tqi_cwmax;
+ u16 tqi_shretry;
+ u16 tqi_lgretry;
+ u32 tqi_cbrPeriod;
+ u32 tqi_cbrOverflowLimit;
+ u32 tqi_burstTime;
+ u32 tqi_readyTime;
+ u32 tqi_physCompBuf;
+ u32 tqi_intFlags;
+};
+
+enum ath9k_rx_filter {
+ ATH9K_RX_FILTER_UCAST = 0x00000001,
+ ATH9K_RX_FILTER_MCAST = 0x00000002,
+ ATH9K_RX_FILTER_BCAST = 0x00000004,
+ ATH9K_RX_FILTER_CONTROL = 0x00000008,
+ ATH9K_RX_FILTER_BEACON = 0x00000010,
+ ATH9K_RX_FILTER_PROM = 0x00000020,
+ ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
+ ATH9K_RX_FILTER_PHYERR = 0x00000100,
+ ATH9K_RX_FILTER_MYBEACON = 0x00000200,
+ ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+ ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+ ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
+ ATH9K_RX_FILTER_PSPOLL = 0x00004000,
+ ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
+ ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
+ ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
+ ATH9K_RX_FILTER_4ADDRESS = 0x00100000,
+};
+
+#define ATH9K_RATESERIES_RTS_CTS 0x0001
+#define ATH9K_RATESERIES_2040 0x0002
+#define ATH9K_RATESERIES_HALFGI 0x0004
+#define ATH9K_RATESERIES_STBC 0x0008
+
+struct ath9k_11n_rate_series {
+ u32 Tries;
+ u32 Rate;
+ u32 PktDuration;
+ u32 ChSel;
+ u32 RateFlags;
+};
+
+enum aggr_type {
+ AGGR_BUF_NONE,
+ AGGR_BUF_FIRST,
+ AGGR_BUF_MIDDLE,
+ AGGR_BUF_LAST,
+};
+
+enum ath9k_key_type {
+ ATH9K_KEY_TYPE_CLEAR,
+ ATH9K_KEY_TYPE_WEP,
+ ATH9K_KEY_TYPE_AES,
+ ATH9K_KEY_TYPE_TKIP,
+};
+
+struct ath_tx_info {
+ u8 qcu;
+
+ bool is_first;
+ bool is_last;
+
+ enum aggr_type aggr;
+ u8 ndelim;
+ u16 aggr_len;
+
+ dma_addr_t link;
+ int pkt_len;
+ u32 flags;
+
+ dma_addr_t buf_addr[4];
+ int buf_len[4];
+
+ struct ath9k_11n_rate_series rates[4];
+ u8 rtscts_rate;
+ bool dur_update;
+
+ enum ath9k_pkt_type type;
+ enum ath9k_key_type keytype;
+ u8 keyix;
+ u8 txpower[4];
+};
+
+struct ath_hw;
+struct ath9k_channel;
+enum ath9k_int;
+
+u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
+void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
+void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
+u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
+bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
+bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
+ const struct ath9k_tx_queue_info *qinfo);
+bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
+ struct ath9k_tx_queue_info *qinfo);
+int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
+ const struct ath9k_tx_queue_info *qinfo);
+bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
+bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
+int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
+ struct ath_rx_status *rs);
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags);
+bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
+void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
+int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
+
+/* Interrupt Handling */
+bool ath9k_hw_intrpend(struct ath_hw *ah);
+void ath9k_hw_set_interrupts(struct ath_hw *ah);
+void ath9k_hw_enable_interrupts(struct ath_hw *ah);
+void ath9k_hw_disable_interrupts(struct ath_hw *ah);
+void ath9k_hw_kill_interrupts(struct ath_hw *ah);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
+
+#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
new file mode 100644
index 000000000..d5f2fbf62
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -0,0 +1,2657 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/delay.h>
+#include "ath9k.h"
+#include "btcoex.h"
+
+u8 ath9k_parse_mpdudensity(u8 mpdudensity)
+{
+ /*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ 1 microsecond */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
+ bool sw_pending)
+{
+ bool pending = false;
+
+ spin_lock_bh(&txq->axq_lock);
+
+ if (txq->axq_depth) {
+ pending = true;
+ goto out;
+ }
+
+ if (!sw_pending)
+ goto out;
+
+ if (txq->mac80211_qnum >= 0) {
+ struct list_head *list;
+
+ list = &sc->cur_chan->acq[txq->mac80211_qnum];
+ if (!list_empty(list))
+ pending = true;
+ }
+out:
+ spin_unlock_bh(&txq->axq_lock);
+ return pending;
+}
+
+static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ ret = ath9k_hw_setpower(sc->sc_ah, mode);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ return ret;
+}
+
+void ath_ps_full_sleep(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool reset;
+
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock(&common->cc_lock);
+
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
+
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+}
+
+void ath9k_ps_wakeup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ enum ath9k_power_mode power_mode;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (++sc->ps_usecount != 1)
+ goto unlock;
+
+ del_timer_sync(&sc->sleep_timer);
+ power_mode = sc->sc_ah->power_mode;
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
+
+ /*
+ * While the hardware is asleep, the cycle counters contain no
+ * useful data. Better clear them now so that they don't mess up
+ * survey data results.
+ */
+ if (power_mode != ATH9K_PM_AWAKE) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ spin_unlock(&common->cc_lock);
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
+void ath9k_ps_restore(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ enum ath9k_power_mode mode;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (--sc->ps_usecount != 0)
+ goto unlock;
+
+ if (sc->ps_idle) {
+ mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
+ goto unlock;
+ }
+
+ if (sc->ps_enabled &&
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK |
+ PS_WAIT_FOR_ANI))) {
+ mode = ATH9K_PM_NETWORK_SLEEP;
+ if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
+ ath9k_btcoex_stop_gen_timer(sc);
+ } else {
+ goto unlock;
+ }
+
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ spin_unlock(&common->cc_lock);
+
+ ath9k_hw_setpower(sc->sc_ah, mode);
+
+ unlock:
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
+static void __ath_cancel_work(struct ath_softc *sc)
+{
+ cancel_work_sync(&sc->paprd_work);
+ cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ cancel_work_sync(&sc->mci_work);
+#endif
+}
+
+void ath_cancel_work(struct ath_softc *sc)
+{
+ __ath_cancel_work(sc);
+ cancel_work_sync(&sc->hw_reset_work);
+}
+
+void ath_restart_work(struct ath_softc *sc)
+{
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+
+ if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+
+ ath_start_ani(sc);
+}
+
+static bool ath_prepare_reset(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ bool ret = true;
+
+ ieee80211_stop_queues(sc->hw);
+ ath_stop_ani(sc);
+ ath9k_hw_disable_interrupts(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ret &= ath_stoprecv(sc);
+ ret &= ath_drain_all_txq(sc);
+ } else {
+ ret &= ath_drain_all_txq(sc);
+ ret &= ath_stoprecv(sc);
+ }
+
+ return ret;
+}
+
+static bool ath_complete_reset(struct ath_softc *sc, bool start)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long flags;
+
+ ath9k_calculate_summary_state(sc, sc->cur_chan);
+ ath_startrecv(sc);
+ ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
+ sc->cur_chan->txpower,
+ &sc->cur_chan->cur_txpower);
+ clear_bit(ATH_OP_HW_RESET, &common->op_flags);
+
+ if (!sc->cur_chan->offchannel && start) {
+ /* restore per chanctx TSF timer */
+ if (sc->cur_chan->tsf_val) {
+ u32 offset;
+
+ offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
+ NULL);
+ ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
+ }
+
+
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
+ goto work;
+
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ } else {
+ ath9k_set_beacon(sc);
+ }
+ work:
+ ath_restart_work(sc);
+ ath_txq_schedule_all(sc);
+ }
+
+ sc->gtt_cnt = 0;
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+ ieee80211_wake_queues(sc->hw);
+ ath9k_p2p_ps_timer(sc);
+
+ return true;
+}
+
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_cal_data *caldata = NULL;
+ bool fastcc = true;
+ int r;
+
+ __ath_cancel_work(sc);
+
+ disable_irq(sc->irq);
+ tasklet_disable(&sc->intr_tq);
+ tasklet_disable(&sc->bcon_tasklet);
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (!sc->cur_chan->offchannel) {
+ fastcc = false;
+ caldata = &sc->cur_chan->caldata;
+ }
+
+ if (!hchan) {
+ fastcc = false;
+ hchan = ah->curchan;
+ }
+
+ if (!ath_prepare_reset(sc))
+ fastcc = false;
+
+ if (ath9k_is_chanctx_enabled())
+ fastcc = false;
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->cur_chandef = sc->cur_chan->chandef;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
+ hchan->channel, IS_CHAN_HT40(hchan), fastcc);
+
+ r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
+ if (r) {
+ ath_err(common,
+ "Unable to reset channel, reset status %d\n", r);
+
+ ath9k_hw_enable_interrupts(ah);
+ ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
+
+ goto out;
+ }
+
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
+ sc->cur_chan->offchannel)
+ ath9k_mci_set_txpower(sc, true, false);
+
+ if (!ath_complete_reset(sc, true))
+ r = -EIO;
+
+out:
+ enable_irq(sc->irq);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->bcon_tasklet);
+ tasklet_enable(&sc->intr_tq);
+
+ return r;
+}
+
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct ath_node *an;
+ an = (struct ath_node *)sta->drv_priv;
+
+ an->sc = sc;
+ an->sta = sta;
+ an->vif = vif;
+ memset(&an->key_idx, 0, sizeof(an->key_idx));
+
+ ath_tx_node_init(sc, an);
+
+ ath_dynack_node_init(sc->sc_ah, an);
+}
+
+static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ ath_tx_node_cleanup(sc, an);
+
+ ath_dynack_node_deinit(sc->sc_ah, an);
+}
+
+void ath9k_tasklet(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ enum ath_reset_type type;
+ unsigned long flags;
+ u32 status = sc->intrstatus;
+ u32 rxmask;
+
+ ath9k_ps_wakeup(sc);
+ spin_lock(&sc->sc_pcu_lock);
+
+ if (status & ATH9K_INT_FATAL) {
+ type = RESET_TYPE_FATAL_INT;
+ ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
+ goto out;
+ }
+
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG)) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ ar9003_hw_bb_watchdog_dbg_info(ah);
+ spin_unlock(&common->cc_lock);
+
+ if (ar9003_hw_bb_watchdog_check(ah)) {
+ type = RESET_TYPE_BB_WATCHDOG;
+ ath9k_queue_reset(sc, type);
+
+ /*
+ * Increment the ref. counter here so that
+ * interrupts are enabled in the reset routine.
+ */
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, RESET,
+ "BB_WATCHDOG: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
+ if (status & ATH9K_INT_GTT) {
+ sc->gtt_cnt++;
+
+ if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
+ type = RESET_TYPE_TX_GTT;
+ ath9k_queue_reset(sc, type);
+ atomic_inc(&ah->intr_ref_cnt);
+ ath_dbg(common, RESET,
+ "GTT: Skipping interrupts\n");
+ goto out;
+ }
+ }
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
+ /*
+ * TSF sync does not look correct; remain awake to sync with
+ * the next Beacon.
+ */
+ ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN);
+ else
+ rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+
+ if (status & rxmask) {
+ /* Check for high priority Rx first */
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ (status & ATH9K_INT_RXHP))
+ ath_rx_tasklet(sc, 0, true);
+
+ ath_rx_tasklet(sc, 0, false);
+ }
+
+ if (status & ATH9K_INT_TX) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ /*
+ * For EDMA chips, TX completion is enabled for the
+ * beacon queue, so if a beacon has been transmitted
+ * successfully after a GTT interrupt, the GTT counter
+ * gets reset to zero here.
+ */
+ sc->gtt_cnt = 0;
+
+ ath_tx_edma_tasklet(sc);
+ } else {
+ ath_tx_tasklet(sc);
+ }
+
+ wake_up(&sc->tx_wait);
+ }
+
+ if (status & ATH9K_INT_GENTIMER)
+ ath_gen_timer_isr(sc->sc_ah);
+
+ ath9k_btcoex_handle_interrupt(sc, status);
+
+ /* re-enable hardware interrupt */
+ ath9k_hw_enable_interrupts(ah);
+out:
+ spin_unlock(&sc->sc_pcu_lock);
+ ath9k_ps_restore(sc);
+}
+
+irqreturn_t ath_isr(int irq, void *dev)
+{
+#define SCHED_INTR ( \
+ ATH9K_INT_FATAL | \
+ ATH9K_INT_BB_WATCHDOG | \
+ ATH9K_INT_RXORN | \
+ ATH9K_INT_RXEOL | \
+ ATH9K_INT_RX | \
+ ATH9K_INT_RXLP | \
+ ATH9K_INT_RXHP | \
+ ATH9K_INT_TX | \
+ ATH9K_INT_BMISS | \
+ ATH9K_INT_CST | \
+ ATH9K_INT_GTT | \
+ ATH9K_INT_TSFOOR | \
+ ATH9K_INT_GENTIMER | \
+ ATH9K_INT_MCI)
+
+ struct ath_softc *sc = dev;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ enum ath9k_int status;
+ u32 sync_cause = 0;
+ bool sched = false;
+
+ /*
+ * The hardware is not ready/present, don't
+ * touch anything. Note this can happen early
+ * on if the IRQ is shared.
+ */
+ if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
+ return IRQ_NONE;
+
+ /* shared irq, not for us */
+ if (!ath9k_hw_intrpend(ah))
+ return IRQ_NONE;
+
+ /*
+ * Figure out the reason(s) for the interrupt. Note
+ * that the hal returns a pseudo-ISR that may include
+ * bits we haven't explicitly enabled so we mask the
+ * value to insure we only process bits we requested.
+ */
+ ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+ ath9k_debug_sync_cause(sc, sync_cause);
+ status &= ah->imask; /* discard unasked-for bits */
+
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return IRQ_HANDLED;
+
+ /*
+ * If there are no status bits set, then this interrupt was not
+ * for me (should have been caught above).
+ */
+ if (!status)
+ return IRQ_NONE;
+
+ /* Cache the status */
+ sc->intrstatus = status;
+
+ if (status & SCHED_INTR)
+ sched = true;
+
+ /*
+ * If a FATAL interrupt is received, we have to reset the chip
+ * immediately.
+ */
+ if (status & ATH9K_INT_FATAL)
+ goto chip_reset;
+
+ if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+ (status & ATH9K_INT_BB_WATCHDOG))
+ goto chip_reset;
+
+ if (status & ATH9K_INT_SWBA)
+ tasklet_schedule(&sc->bcon_tasklet);
+
+ if (status & ATH9K_INT_TXURN)
+ ath9k_hw_updatetxtriglevel(ah, true);
+
+ if (status & ATH9K_INT_RXEOL) {
+ ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah);
+ }
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ if (status & ATH9K_INT_TIM_TIMER) {
+ if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
+ goto chip_reset;
+ /* Clear RxAbort bit so that we can
+ * receive frames */
+ ath9k_setpower(sc, ATH9K_PM_AWAKE);
+ spin_lock(&sc->sc_pm_lock);
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
+ spin_unlock(&sc->sc_pm_lock);
+ }
+
+chip_reset:
+
+ ath_debug_stat_interrupt(sc, status);
+
+ if (sched) {
+ /* turn off every interrupt */
+ ath9k_hw_disable_interrupts(ah);
+ tasklet_schedule(&sc->intr_tq);
+ }
+
+ return IRQ_HANDLED;
+
+#undef SCHED_INTR
+}
+
+/*
+ * This function is called when a HW reset cannot be deferred
+ * and has to be immediate.
+ */
+int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int r;
+
+ ath9k_hw_kill_interrupts(sc->sc_ah);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
+
+ ath9k_ps_wakeup(sc);
+ r = ath_reset_internal(sc, hchan);
+ ath9k_ps_restore(sc);
+
+ return r;
+}
+
+/*
+ * When a HW reset can be deferred, it is added to the
+ * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before
+ * queueing.
+ */
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+#ifdef CONFIG_ATH9K_DEBUGFS
+ RESET_STAT_INC(sc, type);
+#endif
+ ath9k_hw_kill_interrupts(sc->sc_ah);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+}
+
+void ath_reset_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
+
+ ath9k_ps_wakeup(sc);
+ ath_reset_internal(sc, NULL);
+ ath9k_ps_restore(sc);
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static int ath9k_start(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan;
+ struct ath_chanctx *ctx = sc->cur_chan;
+ struct ath9k_channel *init_channel;
+ int r;
+
+ ath_dbg(common, CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ ath9k_ps_wakeup(sc);
+ mutex_lock(&sc->mutex);
+
+ init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef);
+ sc->cur_chandef = hw->conf.chandef;
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, false);
+
+ /*
+ * The basic interface to setting the hardware in a good
+ * state is ``reset''. On return the hardware is known to
+ * be powered up and with interrupts disabled. This must
+ * be followed by initialization of the appropriate bits
+ * and then setup of the interrupt mask.
+ */
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ atomic_set(&ah->intr_ref_cnt, -1);
+
+ r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
+ if (r) {
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ r, curchan->center_freq);
+ ah->reset_power_on = false;
+ }
+
+ /* Setup our intr mask. */
+ ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN | ATH9K_INT_FATAL |
+ ATH9K_INT_GLOBAL;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ah->imask |= ATH9K_INT_RXHP |
+ ATH9K_INT_RXLP;
+ else
+ ah->imask |= ATH9K_INT_RX;
+
+ if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
+ ah->imask |= ATH9K_INT_BB_WATCHDOG;
+
+ /*
+ * Enable GTT interrupts only for AR9003/AR9004 chips
+ * for now.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->imask |= ATH9K_INT_GTT;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ ah->imask |= ATH9K_INT_CST;
+
+ ath_mci_enable(sc);
+
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
+ sc->sc_ah->is_monitoring = false;
+
+ if (!ath_complete_reset(sc, false))
+ ah->reset_power_on = false;
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 1 : 0);
+ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
+ ath9k_hw_reset_tsf(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ mutex_unlock(&sc->mutex);
+
+ ath9k_ps_restore(sc);
+
+ return 0;
+}
+
+static void ath9k_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_tx_control txctl;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ unsigned long flags;
+
+ if (sc->ps_enabled) {
+ /*
+ * mac80211 does not set PM field for normal data frames, so we
+ * need to update that based on the current PS mode.
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_nullfunc(hdr->frame_control) &&
+ !ieee80211_has_pm(hdr->frame_control)) {
+ ath_dbg(common, PS,
+ "Add PM=1 for a TX frame while in PS mode\n");
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ }
+ }
+
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
+ /*
+ * We are using PS-Poll and mac80211 can request TX while in
+ * power save mode. Need to wake up hardware for the TX to be
+ * completed and if needed, also for RX of buffered frames.
+ */
+ ath9k_ps_wakeup(sc);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
+ if (ieee80211_is_pspoll(hdr->frame_control)) {
+ ath_dbg(common, PS,
+ "Sending PS-Poll to pick a buffered frame\n");
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
+ } else {
+ ath_dbg(common, PS, "Wake up to complete TX\n");
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
+ }
+ /*
+ * The actual restore operation will happen only after
+ * the ps_flags bit is cleared. We are just dropping
+ * the ps_usecount here.
+ */
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ ath9k_ps_restore(sc);
+ }
+
+ /*
+ * Cannot tx while the hardware is in full sleep, it first needs a full
+ * chip reset to recover from that
+ */
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
+ ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
+ goto exit;
+ }
+
+ memset(&txctl, 0, sizeof(struct ath_tx_control));
+ txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+ txctl.sta = control->sta;
+
+ ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, XMIT, "TX failed\n");
+ TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
+ goto exit;
+ }
+
+ return;
+exit:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static void ath9k_stop(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool prev_idle;
+
+ ath9k_deinit_channel_context(sc);
+
+ mutex_lock(&sc->mutex);
+
+ ath_cancel_work(sc);
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ mutex_unlock(&sc->mutex);
+ return;
+ }
+
+ /* Ensure HW is awake when we try to shut it down. */
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ /* prevent tasklets to enable interrupts once we disable them */
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+
+ /* make sure h/w will not generate any interrupt
+ * before setting the invalid flag. */
+ ath9k_hw_disable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ prev_idle = sc->ps_idle;
+ sc->ps_idle = true;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 0 : 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ ath_prepare_reset(sc);
+
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
+
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_channel(hw, ah,
+ &sc->cur_chan->chandef);
+
+ ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
+ ath9k_hw_phy_disable(ah);
+
+ ath9k_hw_configpcipowersave(ah, true);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ ath9k_ps_restore(sc);
+
+ sc->ps_idle = prev_idle;
+
+ mutex_unlock(&sc->mutex);
+
+ ath_dbg(common, CONFIG, "Driver halt\n");
+}
+
+static bool ath9k_uses_beacons(int type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
+ u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
+ int i;
+
+ if (iter_data->has_hw_macaddr) {
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ } else {
+ memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
+ iter_data->has_hw_macaddr = true;
+ }
+
+ if (!vif->bss_conf.use_short_slot)
+ iter_data->slottime = ATH9K_SLOT_TIME_20;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ iter_data->naps++;
+ break;
+ case NL80211_IFTYPE_STATION:
+ iter_data->nstations++;
+ if (avp->assoc && !iter_data->primary_sta)
+ iter_data->primary_sta = vif;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iter_data->nadhocs++;
+ if (vif->bss_conf.enable_beacon)
+ iter_data->beacons = true;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ iter_data->nmeshes++;
+ if (vif->bss_conf.enable_beacon)
+ iter_data->beacons = true;
+ break;
+ case NL80211_IFTYPE_WDS:
+ iter_data->nwds++;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_update_bssid_mask(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath9k_vif_iter_data *iter_data)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp;
+ int i;
+
+ if (!ath9k_is_chanctx_enabled())
+ return;
+
+ list_for_each_entry(avp, &ctx->vifs, list) {
+ if (ctx->nvifs_assigned != 1)
+ continue;
+
+ if (!avp->vif->p2p || !iter_data->has_hw_macaddr)
+ continue;
+
+ ether_addr_copy(common->curbssid, avp->bssid);
+
+ /* perm_addr will be used as the p2p device address. */
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^
+ sc->hw->wiphy->perm_addr[i]);
+ }
+}
+
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ath_softc *sc,
+ struct ath_chanctx *ctx,
+ struct ath9k_vif_iter_data *iter_data)
+{
+ struct ath_vif *avp;
+
+ /*
+ * The hardware will use primary station addr together with the
+ * BSSID mask when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ eth_broadcast_addr(iter_data->mask);
+ iter_data->slottime = ATH9K_SLOT_TIME_9;
+
+ list_for_each_entry(avp, &ctx->vifs, list)
+ ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
+
+ ath9k_update_bssid_mask(sc, ctx, iter_data);
+}
+
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+ struct ieee80211_vif *vif, bool changed)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
+ unsigned long flags;
+
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
+ ether_addr_copy(common->curbssid, avp->bssid);
+ common->curaid = avp->aid;
+ ath9k_hw_write_associd(sc->sc_ah);
+
+ if (changed) {
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
+
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, false);
+
+ ath_dbg(common, CONFIG,
+ "Primary Station interface: %pM, BSSID: %pM\n",
+ vif->addr, common->curbssid);
+}
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+static void ath9k_set_offchannel_state(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_vif *vif = NULL;
+
+ ath9k_ps_wakeup(sc);
+
+ if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START)
+ vif = sc->offchannel.scan_vif;
+ else
+ vif = sc->offchannel.roc_vif;
+
+ if (WARN_ON(!vif))
+ goto exit;
+
+ eth_zero_addr(common->curbssid);
+ eth_broadcast_addr(common->bssidmask);
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
+ common->curaid = 0;
+ ah->opmode = vif->type;
+ ah->imask &= ~ATH9K_INT_SWBA;
+ ah->imask &= ~ATH9K_INT_TSFOOR;
+ ah->slottime = ATH9K_SLOT_TIME_9;
+
+ ath_hw_setbssidmask(common);
+ ath9k_hw_setopmode(ah);
+ ath9k_hw_write_associd(sc->sc_ah);
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_init_global_settings(ah);
+
+exit:
+ ath9k_ps_restore(sc);
+}
+#endif
+
+/* Called with sc->mutex held. */
+void ath9k_calculate_summary_state(struct ath_softc *sc,
+ struct ath_chanctx *ctx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_vif_iter_data iter_data;
+ struct ath_beacon_config *cur_conf;
+
+ ath_chanctx_check_active(sc, ctx);
+
+ if (ctx != sc->cur_chan)
+ return;
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ if (ctx == &sc->offchannel.chan)
+ return ath9k_set_offchannel_state(sc);
+#endif
+
+ ath9k_ps_wakeup(sc);
+ ath9k_calculate_iter_data(sc, ctx, &iter_data);
+
+ if (iter_data.has_hw_macaddr)
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
+
+ if (iter_data.naps > 0) {
+ cur_conf = &ctx->beacon;
+ ath9k_hw_set_tsfadjust(ah, true);
+ ah->opmode = NL80211_IFTYPE_AP;
+ if (cur_conf->enable_beacon)
+ iter_data.beacons = true;
+ } else {
+ ath9k_hw_set_tsfadjust(ah, false);
+
+ if (iter_data.nmeshes)
+ ah->opmode = NL80211_IFTYPE_MESH_POINT;
+ else if (iter_data.nwds)
+ ah->opmode = NL80211_IFTYPE_AP;
+ else if (iter_data.nadhocs)
+ ah->opmode = NL80211_IFTYPE_ADHOC;
+ else
+ ah->opmode = NL80211_IFTYPE_STATION;
+ }
+
+ ath9k_hw_setopmode(ah);
+
+ ctx->switch_after_beacon = false;
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
+ ah->imask |= ATH9K_INT_TSFOOR;
+ else {
+ ah->imask &= ~ATH9K_INT_TSFOOR;
+ if (iter_data.naps == 1 && iter_data.beacons)
+ ctx->switch_after_beacon = true;
+ }
+
+ ah->imask &= ~ATH9K_INT_SWBA;
+ if (ah->opmode == NL80211_IFTYPE_STATION) {
+ bool changed = (iter_data.primary_sta != ctx->primary_sta);
+
+ if (iter_data.primary_sta) {
+ iter_data.beacons = true;
+ ath9k_set_assoc_state(sc, iter_data.primary_sta,
+ changed);
+ ctx->primary_sta = iter_data.primary_sta;
+ } else {
+ ctx->primary_sta = NULL;
+ eth_zero_addr(common->curbssid);
+ common->curaid = 0;
+ ath9k_hw_write_associd(sc->sc_ah);
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, true);
+ }
+ } else if (iter_data.beacons) {
+ ah->imask |= ATH9K_INT_SWBA;
+ }
+ ath9k_hw_set_interrupts(ah);
+
+ if (iter_data.beacons)
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
+ else
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
+
+ if (ah->slottime != iter_data.slottime) {
+ ah->slottime = iter_data.slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+
+ if (iter_data.primary_sta)
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+ else
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
+ ath_dbg(common, CONFIG,
+ "macaddr: %pM, bssid: %pM, bssidmask: %pM\n",
+ common->macaddr, common->curbssid, common->bssidmask);
+
+ ath9k_ps_restore(sc);
+}
+
+static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ int *power = (int *)data;
+
+ if (*power < vif->bss_conf.txpower)
+ *power = vif->bss_conf.txpower;
+}
+
+/* Called with sc->mutex held. */
+void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ int power;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+
+ ath9k_ps_wakeup(sc);
+ if (ah->tpc_enabled) {
+ power = (vif) ? vif->bss_conf.txpower : -1;
+ ieee80211_iterate_active_interfaces_atomic(
+ sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_tpc_vif_iter, &power);
+ if (power == -1)
+ power = sc->hw->conf.power_level;
+ } else {
+ power = sc->hw->conf.power_level;
+ }
+ sc->cur_chan->txpower = 2 * power;
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+ sc->cur_chan->cur_txpower = reg->max_power_level;
+ ath9k_ps_restore(sc);
+}
+
+static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ int i;
+
+ if (!ath9k_is_chanctx_enabled())
+ return;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ vif->hw_queue[i] = i;
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ vif->cab_queue = hw->queues - 2;
+ else
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+}
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_node *an = &avp->mcast_node;
+
+ mutex_lock(&sc->mutex);
+
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (sc->cur_chan->nvifs >= 1) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+ sc->tx99_vif = vif;
+ }
+
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
+ sc->cur_chan->nvifs++;
+
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
+ avp->vif = vif;
+ if (!ath9k_is_chanctx_enabled()) {
+ avp->chanctx = sc->cur_chan;
+ list_add_tail(&avp->list, &avp->chanctx->vifs);
+ }
+
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+
+ ath9k_assign_hw_queues(hw, vif);
+
+ ath9k_set_txpower(sc, vif);
+
+ an->sc = sc;
+ an->sta = NULL;
+ an->vif = vif;
+ an->no_ps_filter = true;
+ ath_tx_node_init(sc, an);
+
+ mutex_unlock(&sc->mutex);
+ return 0;
+}
+
+static int ath9k_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type,
+ bool p2p)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ mutex_lock(&sc->mutex);
+
+ if (config_enabled(CONFIG_ATH9K_TX99)) {
+ mutex_unlock(&sc->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ ath_dbg(common, CONFIG, "Change Interface\n");
+
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_remove_slot(sc, vif);
+
+ vif->type = new_type;
+ vif->p2p = p2p;
+
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
+ ath9k_assign_hw_queues(hw, vif);
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+
+ ath9k_set_txpower(sc, vif);
+
+ mutex_unlock(&sc->mutex);
+ return 0;
+}
+
+static void ath9k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ ath_dbg(common, CONFIG, "Detach Interface\n");
+
+ mutex_lock(&sc->mutex);
+
+ ath9k_p2p_remove_vif(sc, vif);
+
+ sc->cur_chan->nvifs--;
+ sc->tx99_vif = NULL;
+ if (!ath9k_is_chanctx_enabled())
+ list_del(&avp->list);
+
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_remove_slot(sc, vif);
+
+ ath_tx_node_cleanup(sc, &avp->mcast_node);
+
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+
+ ath9k_set_txpower(sc, NULL);
+
+ mutex_unlock(&sc->mutex);
+}
+
+static void ath9k_enable_ps(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ sc->ps_enabled = true;
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ ah->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
+ ath9k_hw_setrxabort(ah, 1);
+ }
+ ath_dbg(common, PS, "PowerSave enabled\n");
+}
+
+static void ath9k_disable_ps(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ sc->ps_enabled = false;
+ ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ ath9k_hw_setrxabort(ah, 0);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
+ if (ah->imask & ATH9K_INT_TIM_TIMER) {
+ ah->imask &= ~ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(ah);
+ }
+ }
+ ath_dbg(common, PS, "PowerSave disabled\n");
+}
+
+static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ath_chanctx *ctx = sc->cur_chan;
+
+ ath9k_ps_wakeup(sc);
+ mutex_lock(&sc->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (sc->ps_idle) {
+ ath_cancel_work(sc);
+ ath9k_stop_btcoex(sc);
+ } else {
+ ath9k_start_btcoex(sc);
+ /*
+ * The chip needs a reset to properly wake up from
+ * full sleep
+ */
+ ath_chanctx_set_channel(sc, ctx, &ctx->chandef);
+ }
+ }
+
+ /*
+ * We just prepare to enable PS. We have to wait until our AP has
+ * ACK'd our null data frame to disable RX otherwise we'll ignore
+ * those ACKs and end up retransmitting the same null data frames.
+ * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
+ */
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (conf->flags & IEEE80211_CONF_PS)
+ ath9k_enable_ps(sc);
+ else
+ ath9k_disable_ps(sc);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
+ sc->sc_ah->is_monitoring = true;
+ } else {
+ ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
+ sc->sc_ah->is_monitoring = false;
+ }
+ }
+
+ if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL);
+ ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
+ }
+
+ mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+/* FIXME: sc->sc_full_reset ? */
+static void ath9k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath_softc *sc = hw->priv;
+ u32 rfilt;
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->cur_chan->rxfilter = *total_flags;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath9k_ps_wakeup(sc);
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
+ ath9k_ps_restore(sc);
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
+}
+
+static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { };
+ int key;
+
+ ath_node_attach(sc, sta, vif);
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_AP_VLAN)
+ return 0;
+
+ key = ath_key_config(common, vif, sta, &ps_key);
+ if (key > 0) {
+ an->ps_key = key;
+ an->key_idx[0] = key;
+ }
+
+ return 0;
+}
+
+static void ath9k_del_ps_key(struct ath_softc *sc,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
+
+ if (!an->ps_key)
+ return;
+
+ ath_key_delete(common, &ps_key);
+ an->ps_key = 0;
+ an->key_idx[0] = 0;
+}
+
+static int ath9k_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_softc *sc = hw->priv;
+
+ ath9k_del_ps_key(sc, vif, sta);
+ ath_node_detach(sc, sta);
+
+ return 0;
+}
+
+static int ath9k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = ath9k_sta_add(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Add station: %pM\n", sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ ret = ath9k_sta_remove(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Remove station: %pM\n", sta->addr);
+ }
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ath_chanctx_event(sc, vif,
+ ATH_CHANCTX_EVENT_AUTHORIZED);
+ }
+ }
+
+ return ret;
+}
+
+static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
+ struct ath_node *an,
+ bool set)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (!an->key_idx[i])
+ continue;
+ ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
+ }
+}
+
+static void ath9k_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ an->sleeping = true;
+ ath_tx_aggr_sleep(sta, sc, an);
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
+ an->sleeping = false;
+ ath_tx_aggr_wakeup(sc, an);
+ break;
+ }
+}
+
+static int ath9k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq;
+ struct ath9k_tx_queue_info qi;
+ int ret = 0;
+
+ if (queue >= IEEE80211_NUM_ACS)
+ return 0;
+
+ txq = sc->tx.txq_map[queue];
+
+ ath9k_ps_wakeup(sc);
+ mutex_lock(&sc->mutex);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop * 32;
+
+ ath_dbg(common, CONFIG,
+ "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, txq->axq_qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
+ ret = ath_txq_update(sc, txq->axq_qnum, &qi);
+ if (ret)
+ ath_err(common, "TXQ Update failed\n");
+
+ mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
+
+ return ret;
+}
+
+static int ath9k_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = NULL;
+ int ret = 0, i;
+
+ if (ath9k_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /*
+ * For now, disable hw crypto for the RSN IBSS group keys. This
+ * could be optimized in the future to use a modified key cache
+ * design to support per-STA RX GTK, but until that gets
+ * implemented, use of software crypto for group addressed
+ * frames is a acceptable to allow RSN IBSS to be used.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
+ ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
+ if (sta)
+ an = (struct ath_node *)sta->drv_priv;
+
+ switch (cmd) {
+ case SET_KEY:
+ if (sta)
+ ath9k_del_ps_key(sc, vif, sta);
+
+ key->hw_key_idx = 0;
+ ret = ath_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (sc->sc_ah->sw_mgmt_crypto_tx &&
+ key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ ret = 0;
+ }
+ if (an && key->hw_key_idx) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i])
+ continue;
+ an->key_idx[i] = key->hw_key_idx;
+ break;
+ }
+ WARN_ON(i == ARRAY_SIZE(an->key_idx));
+ }
+ break;
+ case DISABLE_KEY:
+ ath_key_delete(common, key);
+ if (an) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i] != key->hw_key_idx)
+ continue;
+ an->key_idx[i] = 0;
+ break;
+ }
+ }
+ key->hw_key_idx = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+
+ return ret;
+}
+
+static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+#define CHECK_ANI \
+ (BSS_CHANGED_ASSOC | \
+ BSS_CHANGED_IBSS | \
+ BSS_CHANGED_BEACON_ENABLED)
+
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ int slottime;
+
+ ath9k_ps_wakeup(sc);
+ mutex_lock(&sc->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
+ bss_conf->bssid, bss_conf->assoc);
+
+ memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
+ avp->aid = bss_conf->aid;
+ avp->assoc = bss_conf->assoc;
+
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+ }
+
+ if (changed & BSS_CHANGED_IBSS) {
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = bss_conf->aid;
+ ath9k_hw_write_associd(sc->sc_ah);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
+ (changed & BSS_CHANGED_BEACON_INT) ||
+ (changed & BSS_CHANGED_BEACON_INFO)) {
+ ath9k_beacon_config(sc, vif, changed);
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath9k_calculate_summary_state(sc, avp->chanctx);
+ }
+
+ if ((avp->chanctx == sc->cur_chan) &&
+ (changed & BSS_CHANGED_ERP_SLOT)) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ sc->beacon.slottime = slottime;
+ sc->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
+ if (changed & BSS_CHANGED_P2P_PS)
+ ath9k_p2p_bss_info_changed(sc, vif);
+
+ if (changed & CHECK_ANI)
+ ath_check_ani(sc);
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
+ vif->addr, bss_conf->txpower, bss_conf->txpower_type);
+ ath9k_set_txpower(sc, vif);
+ }
+
+ mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
+
+#undef CHECK_ANI
+}
+
+static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+
+ return tsf;
+}
+
+static void ath9k_set_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct ath_softc *sc = hw->priv;
+
+ mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_settsf64(sc->sc_ah, tsf);
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+}
+
+static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+
+ mutex_lock(&sc->mutex);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_reset_tsf(sc->sc_ah);
+ ath9k_ps_restore(sc);
+
+ mutex_unlock(&sc->mutex);
+}
+
+static int ath9k_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn, u8 buf_size)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool flush = false;
+ int ret = 0;
+
+ mutex_lock(&sc->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (ath9k_is_chanctx_enabled()) {
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ ath9k_ps_wakeup(sc);
+ ret = ath_tx_aggr_start(sc, sta, tid, ssn);
+ if (!ret)
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ flush = true;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ath9k_ps_wakeup(sc);
+ ath_tx_aggr_stop(sc, sta, tid);
+ if (!flush)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ath9k_ps_wakeup(sc);
+ ath_tx_aggr_resume(sc, sta, tid);
+ ath9k_ps_restore(sc);
+ break;
+ default:
+ ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
+ }
+
+ mutex_unlock(&sc->mutex);
+
+ return ret;
+}
+
+static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ int pos;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&common->cc_lock);
+ if (idx == 0)
+ ath_update_survey_stats(sc);
+
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ spin_unlock_bh(&common->cc_lock);
+ return -ENOENT;
+ }
+
+ chan = &sband->channels[idx];
+ pos = chan->hw_value;
+ memcpy(survey, &sc->survey[pos], sizeof(*survey));
+ survey->channel = chan;
+ spin_unlock_bh(&common->cc_lock);
+
+ return 0;
+}
+
+static void ath9k_enable_dynack(struct ath_softc *sc)
+{
+#ifdef CONFIG_ATH9K_DYNACK
+ u32 rfilt;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_dynack_reset(ah);
+
+ ah->dynack.enabled = true;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+#endif
+}
+
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return;
+
+ mutex_lock(&sc->mutex);
+
+ if (coverage_class >= 0) {
+ ah->coverage_class = coverage_class;
+ if (ah->dynack.enabled) {
+ u32 rfilt;
+
+ ah->dynack.enabled = false;
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+ }
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_init_global_settings(ah);
+ ath9k_ps_restore(sc);
+ } else if (!ah->dynack.enabled) {
+ ath9k_enable_dynack(sc);
+ }
+
+ mutex_unlock(&sc->mutex);
+}
+
+static bool ath9k_has_tx_pending(struct ath_softc *sc,
+ bool sw_pending)
+{
+ int i, npend = 0;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i],
+ sw_pending);
+ if (npend)
+ break;
+ }
+
+ return !!npend;
+}
+
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+ goto flush;
+
+ /*
+ * If MCC is active, extend the flush timeout
+ * and wait for the HW/SW queues to become
+ * empty. This needs to be done outside the
+ * sc->mutex lock to allow the channel scheduler
+ * to switch channel contexts.
+ *
+ * The vif queues have been stopped in mac80211,
+ * so there won't be any incoming frames.
+ */
+ __ath9k_flush(hw, queues, drop, true, true);
+ return;
+ }
+flush:
+ mutex_lock(&sc->mutex);
+ __ath9k_flush(hw, queues, drop, true, false);
+ mutex_unlock(&sc->mutex);
+}
+
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
+ bool sw_pending, bool timeout_override)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int timeout;
+ bool drain_txq;
+
+ cancel_delayed_work_sync(&sc->tx_complete_work);
+
+ if (ah->ah_flags & AH_UNPLUGGED) {
+ ath_dbg(common, ANY, "Device has been unplugged!\n");
+ return;
+ }
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ return;
+ }
+
+ spin_lock_bh(&sc->chan_lock);
+ if (timeout_override)
+ timeout = HZ / 5;
+ else
+ timeout = sc->cur_chan->flush_timeout;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_dbg(common, CHAN_CTX,
+ "Flush timeout: %d\n", jiffies_to_msecs(timeout));
+
+ if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending),
+ timeout) > 0)
+ drop = false;
+
+ if (drop) {
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ drain_txq = ath_drain_all_txq(sc);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ if (!drain_txq)
+ ath_reset(sc, NULL);
+
+ ath9k_ps_restore(sc);
+ }
+
+ ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
+}
+
+static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ return ath9k_has_tx_pending(sc, true);
+}
+
+static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_vif *vif;
+ struct ath_vif *avp;
+ struct ath_buf *bf;
+ struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int status;
+
+ vif = sc->beacon.bslot[0];
+ if (!vif)
+ return 0;
+
+ if (!vif->bss_conf.enable_beacon)
+ return 0;
+
+ avp = (void *)vif->drv_priv;
+
+ if (!sc->beacon.tx_processed && !edma) {
+ tasklet_disable(&sc->bcon_tasklet);
+
+ bf = avp->av_bcbuf;
+ if (!bf || !bf->bf_mpdu)
+ goto skip;
+
+ status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
+ if (status == -EINPROGRESS)
+ goto skip;
+
+ sc->beacon.tx_processed = true;
+ sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+
+skip:
+ tasklet_enable(&sc->bcon_tasklet);
+ }
+
+ return sc->beacon.tx_last;
+}
+
+static int ath9k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
+
+ stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
+ stats->dot11RTSFailureCount = mib_stats->rts_bad;
+ stats->dot11FCSErrorCount = mib_stats->fcs_bad;
+ stats->dot11RTSSuccessCount = mib_stats->rts_good;
+ return 0;
+}
+
+static u32 fill_chainmask(u32 cap, u32 new)
+{
+ u32 filled = 0;
+ int i;
+
+ for (i = 0; cap && new; i++, cap >>= 1) {
+ if (!(cap & BIT(0)))
+ continue;
+
+ if (new & BIT(0))
+ filled |= BIT(i);
+
+ new >>= 1;
+ }
+
+ return filled;
+}
+
+static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
+{
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return true;
+
+ switch (val & 0x7) {
+ case 0x1:
+ case 0x3:
+ case 0x7:
+ return true;
+ case 0x2:
+ return (ah->caps.rx_chainmask == 1);
+ default:
+ return false;
+ }
+}
+
+static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.rx_chainmask != 1)
+ rx_ant |= tx_ant;
+
+ if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
+ return -EINVAL;
+
+ sc->ant_rx = rx_ant;
+ sc->ant_tx = tx_ant;
+
+ if (ah->caps.rx_chainmask == 1)
+ return 0;
+
+ /* AR9100 runs into calibration issues if not all rx chains are enabled */
+ if (AR_SREV_9100(ah))
+ ah->rxchainmask = 0x7;
+ else
+ ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
+
+ ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
+ ath9k_cmn_reload_chainmask(ah);
+
+ return 0;
+}
+
+static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath_softc *sc = hw->priv;
+
+ *tx_ant = sc->ant_tx;
+ *rx_ant = sc->ant_rx;
+ return 0;
+}
+
+static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
+}
+
+static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
+}
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+
+static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (sc->offchannel.roc_vif) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting RoC\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+ ath_roc_complete(sc, true);
+ }
+
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
+ ath_dbg(common, CHAN_CTX,
+ "%s: Aborting HW scan\n", __func__);
+
+ del_timer_sync(&sc->offchannel.timer);
+ ath_scan_complete(sc, true);
+ }
+}
+
+static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+ mutex_lock(&sc->mutex);
+
+ if (WARN_ON(sc->offchannel.scan_req)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ath9k_ps_wakeup(sc);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
+ sc->offchannel.scan_vif = vif;
+ sc->offchannel.scan_req = req;
+ sc->offchannel.scan_idx = 0;
+
+ ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n",
+ vif->addr);
+
+ if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
+ ath_dbg(common, CHAN_CTX, "Starting HW scan\n");
+ ath_offchannel_next(sc);
+ }
+
+out:
+ mutex_unlock(&sc->mutex);
+
+ return ret;
+}
+
+static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr);
+
+ mutex_lock(&sc->mutex);
+ del_timer_sync(&sc->offchannel.timer);
+ ath_scan_complete(sc, true);
+ mutex_unlock(&sc->mutex);
+}
+
+static int ath9k_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+ mutex_lock(&sc->mutex);
+
+ if (WARN_ON(sc->offchannel.roc_vif)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ath9k_ps_wakeup(sc);
+ sc->offchannel.roc_vif = vif;
+ sc->offchannel.roc_chan = chan;
+ sc->offchannel.roc_duration = duration;
+
+ ath_dbg(common, CHAN_CTX,
+ "RoC request on vif: %pM, type: %d duration: %d\n",
+ vif->addr, type, duration);
+
+ if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
+ ath_dbg(common, CHAN_CTX, "Starting RoC period\n");
+ ath_offchannel_next(sc);
+ }
+
+out:
+ mutex_unlock(&sc->mutex);
+
+ return ret;
+}
+
+static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ mutex_lock(&sc->mutex);
+
+ ath_dbg(common, CHAN_CTX, "Cancel RoC\n");
+ del_timer_sync(&sc->offchannel.timer);
+
+ if (sc->offchannel.roc_vif) {
+ if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+ ath_roc_complete(sc, true);
+ }
+
+ mutex_unlock(&sc->mutex);
+
+ return 0;
+}
+
+static int ath9k_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ctx, **ptr;
+ int pos;
+
+ mutex_lock(&sc->mutex);
+
+ ath_for_each_chanctx(sc, ctx) {
+ if (ctx->assigned)
+ continue;
+
+ ptr = (void *) conf->drv_priv;
+ *ptr = ctx;
+ ctx->assigned = true;
+ pos = ctx - &sc->chanctx[0];
+ ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
+
+ ath_dbg(common, CHAN_CTX,
+ "Add channel context: %d MHz\n",
+ conf->def.chan->center_freq);
+
+ ath_chanctx_set_channel(sc, ctx, &conf->def);
+
+ mutex_unlock(&sc->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&sc->mutex);
+ return -ENOSPC;
+}
+
+
+static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ctx = ath_chanctx_get(conf);
+
+ mutex_lock(&sc->mutex);
+
+ ath_dbg(common, CHAN_CTX,
+ "Remove channel context: %d MHz\n",
+ conf->def.chan->center_freq);
+
+ ctx->assigned = false;
+ ctx->hw_queue_base = 0;
+ ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
+
+ mutex_unlock(&sc->mutex);
+}
+
+static void ath9k_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_chanctx *ctx = ath_chanctx_get(conf);
+
+ mutex_lock(&sc->mutex);
+ ath_dbg(common, CHAN_CTX,
+ "Change channel context: %d MHz\n",
+ conf->def.chan->center_freq);
+ ath_chanctx_set_channel(sc, ctx, &conf->def);
+ mutex_unlock(&sc->mutex);
+}
+
+static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_chanctx *ctx = ath_chanctx_get(conf);
+ int i;
+
+ ath9k_cancel_pending_offchannel(sc);
+
+ mutex_lock(&sc->mutex);
+
+ ath_dbg(common, CHAN_CTX,
+ "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n",
+ vif->addr, vif->type, vif->p2p,
+ conf->def.chan->center_freq);
+
+ avp->chanctx = ctx;
+ ctx->nvifs_assigned++;
+ list_add_tail(&avp->list, &ctx->vifs);
+ ath9k_calculate_summary_state(sc, ctx);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ vif->hw_queue[i] = ctx->hw_queue_base + i;
+
+ mutex_unlock(&sc->mutex);
+
+ return 0;
+}
+
+static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_chanctx *ctx = ath_chanctx_get(conf);
+ int ac;
+
+ ath9k_cancel_pending_offchannel(sc);
+
+ mutex_lock(&sc->mutex);
+
+ ath_dbg(common, CHAN_CTX,
+ "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n",
+ vif->addr, vif->type, vif->p2p,
+ conf->def.chan->center_freq);
+
+ avp->chanctx = NULL;
+ ctx->nvifs_assigned--;
+ list_del(&avp->list);
+ ath9k_calculate_summary_state(sc, ctx);
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+ mutex_unlock(&sc->mutex);
+}
+
+static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
+ struct ath_beacon_config *cur_conf;
+ struct ath_chanctx *go_ctx;
+ unsigned long timeout;
+ bool changed = false;
+ u32 beacon_int;
+
+ if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+ return;
+
+ if (!avp->chanctx)
+ return;
+
+ mutex_lock(&sc->mutex);
+
+ spin_lock_bh(&sc->chan_lock);
+ if (sc->next_chan || (sc->cur_chan != avp->chanctx))
+ changed = true;
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (!changed)
+ goto out;
+
+ ath9k_cancel_pending_offchannel(sc);
+
+ go_ctx = ath_is_go_chanctx_present(sc);
+
+ if (go_ctx) {
+ /*
+ * Wait till the GO interface gets a chance
+ * to send out an NoA.
+ */
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = true;
+ cur_conf = &go_ctx->beacon;
+ beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
+ spin_unlock_bh(&sc->chan_lock);
+
+ timeout = usecs_to_jiffies(beacon_int * 2);
+ init_completion(&sc->go_beacon);
+
+ mutex_unlock(&sc->mutex);
+
+ if (wait_for_completion_timeout(&sc->go_beacon,
+ timeout) == 0) {
+ ath_dbg(common, CHAN_CTX,
+ "Failed to send new NoA\n");
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->sched.mgd_prepare_tx = false;
+ spin_unlock_bh(&sc->chan_lock);
+ }
+
+ mutex_lock(&sc->mutex);
+ }
+
+ ath_dbg(common, CHAN_CTX,
+ "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n",
+ __func__, vif->addr);
+
+ spin_lock_bh(&sc->chan_lock);
+ sc->next_chan = avp->chanctx;
+ sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
+ spin_unlock_bh(&sc->chan_lock);
+
+ ath_chanctx_set_next(sc, true);
+out:
+ mutex_unlock(&sc->mutex);
+}
+
+void ath9k_fill_chanctx_ops(void)
+{
+ if (!ath9k_is_chanctx_enabled())
+ return;
+
+ ath9k_ops.hw_scan = ath9k_hw_scan;
+ ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
+ ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
+ ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
+ ath9k_ops.add_chanctx = ath9k_add_chanctx;
+ ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
+ ath9k_ops.change_chanctx = ath9k_change_chanctx;
+ ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
+ ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
+ ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
+}
+
+#endif
+
+static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ mutex_lock(&sc->mutex);
+ if (avp->chanctx)
+ *dbm = avp->chanctx->cur_txpower;
+ else
+ *dbm = sc->cur_chan->cur_txpower;
+ mutex_unlock(&sc->mutex);
+
+ *dbm /= 2;
+
+ return 0;
+}
+
+struct ieee80211_ops ath9k_ops = {
+ .tx = ath9k_tx,
+ .start = ath9k_start,
+ .stop = ath9k_stop,
+ .add_interface = ath9k_add_interface,
+ .change_interface = ath9k_change_interface,
+ .remove_interface = ath9k_remove_interface,
+ .config = ath9k_config,
+ .configure_filter = ath9k_configure_filter,
+ .sta_state = ath9k_sta_state,
+ .sta_notify = ath9k_sta_notify,
+ .conf_tx = ath9k_conf_tx,
+ .bss_info_changed = ath9k_bss_info_changed,
+ .set_key = ath9k_set_key,
+ .get_tsf = ath9k_get_tsf,
+ .set_tsf = ath9k_set_tsf,
+ .reset_tsf = ath9k_reset_tsf,
+ .ampdu_action = ath9k_ampdu_action,
+ .get_survey = ath9k_get_survey,
+ .rfkill_poll = ath9k_rfkill_poll_state,
+ .set_coverage_class = ath9k_set_coverage_class,
+ .flush = ath9k_flush,
+ .tx_frames_pending = ath9k_tx_frames_pending,
+ .tx_last_beacon = ath9k_tx_last_beacon,
+ .release_buffered_frames = ath9k_release_buffered_frames,
+ .get_stats = ath9k_get_stats,
+ .set_antenna = ath9k_set_antenna,
+ .get_antenna = ath9k_get_antenna,
+
+#ifdef CONFIG_ATH9K_WOW
+ .suspend = ath9k_suspend,
+ .resume = ath9k_resume,
+ .set_wakeup = ath9k_set_wakeup,
+#endif
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+ .get_et_sset_count = ath9k_get_et_sset_count,
+ .get_et_stats = ath9k_get_et_stats,
+ .get_et_strings = ath9k_get_et_strings,
+#endif
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
+ .sta_add_debugfs = ath9k_sta_add_debugfs,
+#endif
+ .sw_scan_start = ath9k_sw_scan_start,
+ .sw_scan_complete = ath9k_sw_scan_complete,
+ .get_txpower = ath9k_get_txpower,
+};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644
index 000000000..66596b952
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ if (list_empty(&mci->info))
+ return NULL;
+
+ list_for_each_entry(entry, &mci->info, list) {
+ if (entry->conn_handle == info->conn_handle)
+ return entry;
+ }
+ return NULL;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+ u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
+
+ if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+ (info->type == MCI_GPM_COEX_PROFILE_VOICE))
+ return false;
+
+ if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+ (info->type != MCI_GPM_COEX_PROFILE_VOICE))
+ return false;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return false;
+
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&entry->list, &mci->info);
+ if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
+ if (info->voice_type < sizeof(voice_priority))
+ mci->voice_priority = voice_priority[info->voice_type];
+ else
+ mci->voice_priority = 110;
+ }
+
+ return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *entry)
+{
+ if (!entry)
+ return;
+
+ DEC_PROF(mci, entry);
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+ struct ath_mci_profile_info *info, *tinfo;
+
+ mci->aggr_limit = 0;
+ mci->num_mgmt = 0;
+
+ if (list_empty(&mci->info))
+ return;
+
+ list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+ list_del(&info->list);
+ DEC_PROF(mci, info);
+ kfree(info);
+ }
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u32 wlan_airtime = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ /*
+ * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+ * When wlan_airtime is less than 4ms, aggregation limit has to be
+ * adjusted half of wlan_airtime to ensure that the aggregation can fit
+ * without collision with BT traffic.
+ */
+ if ((wlan_airtime <= 4) &&
+ (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+ mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+ struct ath_mci_profile_info *info;
+ u32 num_profile = NUM_PROF(mci);
+
+ if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
+ goto skip_tuning;
+
+ mci->aggr_limit = 0;
+ btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+ btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+ if (NUM_PROF(mci))
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ else
+ btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+ ATH_BTCOEX_STOMP_LOW;
+
+ if (num_profile == 1) {
+ info = list_first_entry(&mci->info,
+ struct ath_mci_profile_info,
+ list);
+ if (mci->num_sco) {
+ if (info->T == 12)
+ mci->aggr_limit = 8;
+ else if (info->T == 6) {
+ mci->aggr_limit = 6;
+ btcoex->duty_cycle = 30;
+ } else
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Single SCO, aggregation limit %d 1/4 ms\n",
+ mci->aggr_limit);
+ } else if (mci->num_pan || mci->num_other_acl) {
+ /*
+ * For single PAN/FTP profile, allocate 35% for BT
+ * to improve WLAN throughput.
+ */
+ btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
+ btcoex->btcoex_period = 53;
+ ath_dbg(common, MCI,
+ "Single PAN/FTP bt period %d ms dutycycle %d\n",
+ btcoex->duty_cycle, btcoex->btcoex_period);
+ } else if (mci->num_hid) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Multiple attempt/timeout single HID "
+ "aggregation limit 1.5 ms dutycycle 30%%\n");
+ }
+ } else if (num_profile == 2) {
+ if (mci->num_hid == 2)
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
+ btcoex->duty_cycle);
+ } else if (num_profile >= 3) {
+ mci->aggr_limit = 4;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1 ms\n");
+ }
+
+skip_tuning:
+ if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+ if (IS_CHAN_HT(sc->sc_ah->curchan))
+ ath_mci_adjust_aggr_limit(btcoex);
+ else
+ btcoex->btcoex_period >>= 1;
+ }
+
+ ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(sc->sc_ah);
+
+ if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+ return;
+
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
+ if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+ btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ ath9k_hw_btcoex_enable(sc->sc_ah);
+ ath9k_btcoex_timer_resume(sc);
+}
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ switch (opcode) {
+ case MCI_GPM_BT_CAL_REQ:
+ if (mci_hw->bt_state == MCI_BT_AWAKE) {
+ mci_hw->bt_state = MCI_BT_CAL_START;
+ ath9k_queue_reset(sc, RESET_TYPE_MCI);
+ }
+ ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
+ break;
+ case MCI_GPM_BT_CAL_GRANT:
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+ ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+ 16, false, true);
+ break;
+ default:
+ ath_dbg(common, MCI, "Unknown GPM CAL message\n");
+ break;
+ }
+}
+
+static void ath9k_mci_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
+
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
+{
+ if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
+ stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
+
+ if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
+ stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
+
+ if ((cur_txprio > ATH_MCI_HI_PRIO) &&
+ (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
+ stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
+}
+
+static void ath_mci_set_concur_txprio(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX];
+
+ memset(stomp_txprio, 0, sizeof(stomp_txprio));
+ if (mci->num_mgmt) {
+ stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
+ if (!mci->num_pan && !mci->num_other_acl)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
+ ATH_MCI_INQUIRY_PRIO;
+ } else {
+ u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
+
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
+
+ if (mci->num_sco)
+ ath_mci_update_stomp_txprio(mci->voice_priority,
+ stomp_txprio);
+ if (mci->num_other_acl)
+ ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
+ if (mci->num_a2dp)
+ ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
+ if (mci->num_hid)
+ ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
+ if (mci->num_pan)
+ ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
+ }
+ ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
+}
+
+static u8 ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *entry = NULL;
+
+ entry = ath_mci_find_profile(mci, info);
+ if (entry) {
+ /*
+ * Two MCI interrupts are generated while connecting to
+ * headset and A2DP profile, but only one MCI interrupt
+ * is generated with last added profile type while disconnecting
+ * both profiles.
+ * So while adding second profile type decrement
+ * the first one.
+ */
+ if (entry->type != info->type) {
+ DEC_PROF(mci, entry);
+ INC_PROF(mci, info);
+ }
+ memcpy(entry, info, 10);
+ }
+
+ if (info->start) {
+ if (!entry && !ath_mci_add_profile(common, mci, info))
+ return 0;
+ } else
+ ath_mci_del_profile(common, mci, entry);
+
+ ath_mci_set_concur_txprio(sc);
+ return 1;
+}
+
+static u8 ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info info;
+ int i = 0, old_num_mgmt = mci->num_mgmt;
+
+ /* Link status type are not handled */
+ if (status->is_link)
+ return 0;
+
+ info.conn_handle = status->conn_handle;
+ if (ath_mci_find_profile(mci, &info))
+ return 0;
+
+ if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
+ return 0;
+
+ if (status->is_critical)
+ __set_bit(status->conn_handle, mci->status);
+ else
+ __clear_bit(status->conn_handle, mci->status);
+
+ mci->num_mgmt = 0;
+ do {
+ if (test_bit(i, mci->status))
+ mci->num_mgmt++;
+ } while (++i < ATH_MCI_MAX_PROFILE);
+
+ ath_mci_set_concur_txprio(sc);
+ if (old_num_mgmt != mci->num_mgmt)
+ return 1;
+
+ return 0;
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_profile_info profile_info;
+ struct ath_mci_profile_status profile_status;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 major, minor, update_scheme = 0;
+ u32 seq_num;
+
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
+ ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
+ ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
+ ath_mci_flush_profile(&sc->btcoex.mci);
+ ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
+ }
+
+ switch (opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+ minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+ ar9003_mci_set_bt_version(ah, major, minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ar9003_mci_send_wlan_channels(ah);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ memcpy(&profile_info,
+ (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
+ (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
+ ath_dbg(common, MCI,
+ "Illegal profile type = %d, state = %d\n",
+ profile_info.type,
+ profile_info.start);
+ break;
+ }
+
+ update_scheme += ath_mci_process_profile(sc, &profile_info);
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ profile_status.is_link = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_TYPE);
+ profile_status.conn_handle = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_LINKID);
+ profile_status.is_critical = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_STATE);
+
+ seq_num = *((u32 *)(rx_payload + 12));
+ ath_dbg(common, MCI,
+ "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
+ profile_status.is_link, profile_status.conn_handle,
+ profile_status.is_critical, seq_num);
+
+ update_scheme += ath_mci_process_status(sc, &profile_status);
+ break;
+ default:
+ ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
+ break;
+ }
+ if (update_scheme)
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_mci_buf *buf = &mci->sched_buf;
+ int ret;
+
+ buf->bf_addr = dmam_alloc_coherent(sc->dev,
+ ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
+ &buf->bf_paddr, GFP_KERNEL);
+
+ if (buf->bf_addr == NULL) {
+ ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
+ return -ENOMEM;
+ }
+
+ memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN,
+ ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE);
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+ mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+ ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+ if (ret) {
+ ath_err(common, "Failed to initialize MCI\n");
+ return ret;
+ }
+
+ INIT_WORK(&sc->mci_work, ath9k_mci_work);
+ ath_dbg(common, MCI, "MCI Initialized\n");
+
+ return 0;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+
+ ar9003_mci_cleanup(ah);
+
+ ath_dbg(common, MCI, "MCI De-Initialized\n");
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u32 mci_int, mci_int_rxmsg;
+ u32 offset, subtype, opcode;
+ u32 *pgpm;
+ u32 more_data = MCI_GPM_MORE;
+ bool skip_gpm = false;
+
+ ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
+ return;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+ u32 payload[4] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffff00};
+
+ /*
+ * The following REMOTE_RESET and SYS_WAKING used to sent
+ * only when BT wake up. Now they are always sent, as a
+ * recovery method to reset BT MCI's RX alignment.
+ */
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+ payload, 16, true, false);
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+ NULL, 0, true, false);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
+
+ /*
+ * always do this for recovery and 2G/5G toggling and LNA_TRANS
+ */
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+ if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_SLEEP))
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_AWAKE))
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
+ skip_gpm = true;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+ while (more_data == MCI_GPM_MORE) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return;
+
+ pgpm = mci->gpm_buf.bf_addr;
+ offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ pgpm += (offset >> 2);
+
+ /*
+ * The first dword is timer.
+ * The real data starts from 2nd dword.
+ */
+ subtype = MCI_GPM_TYPE(pgpm);
+ opcode = MCI_GPM_OPCODE(pgpm);
+
+ if (skip_gpm)
+ goto recycle;
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype)) {
+ ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
+ } else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode, (u8 *)pgpm);
+ break;
+ default:
+ break;
+ }
+ }
+ recycle:
+ MCI_GPM_RECYCLE(pgpm);
+ }
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+ int value_dbm = MS(mci_hw->cont_status,
+ AR_MCI_CONT_RSSI_POWER);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
+ MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
+ "tx" : "rx",
+ MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
+ value_dbm);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+ mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+ ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
+ }
+}
+
+void ath_mci_enable(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (!common->btcoex_enabled)
+ return;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ sc->sc_ah->imask |= ATH9K_INT_MCI;
+}
+
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ struct ath9k_channel *chan = ah->curchan;
+ u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+ int i;
+ s16 chan_start, chan_end;
+ u16 wlan_chan;
+
+ if (!chan || !IS_CHAN_2GHZ(chan))
+ return;
+
+ if (allow_all)
+ goto send_wlan_chan;
+
+ wlan_chan = chan->channel - 2402;
+
+ chan_start = wlan_chan - 10;
+ chan_end = wlan_chan + 10;
+
+ if (IS_CHAN_HT40PLUS(chan))
+ chan_end += 20;
+ else if (IS_CHAN_HT40MINUS(chan))
+ chan_start -= 20;
+
+ /* adjust side band */
+ chan_start -= 7;
+ chan_end += 7;
+
+ if (chan_start <= 0)
+ chan_start = 0;
+ if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
+ chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
+
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "WLAN current channel %d mask BT channel %d - %d\n",
+ wlan_chan, chan_start, chan_end);
+
+ for (i = chan_start; i < chan_end; i++)
+ MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
+
+send_wlan_chan:
+ /* update and send wlan channels info to BT */
+ for (i = 0; i < 4; i++)
+ mci->wlan_channels[i] = channelmap[i];
+ ar9003_mci_send_wlan_channels(ah);
+ ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
+}
+
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+ bool old_concur_tx = mci_hw->concur_tx;
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
+ mci_hw->concur_tx = false;
+ return;
+ }
+
+ if (!IS_CHAN_2GHZ(ah->curchan))
+ return;
+
+ if (setchannel) {
+ struct ath9k_hw_cal_data *caldata = &sc->cur_chan->caldata;
+ if (IS_CHAN_HT40PLUS(ah->curchan) &&
+ (ah->curchan->channel > caldata->channel) &&
+ (ah->curchan->channel <= caldata->channel + 20))
+ return;
+ if (IS_CHAN_HT40MINUS(ah->curchan) &&
+ (ah->curchan->channel < caldata->channel) &&
+ (ah->curchan->channel >= caldata->channel - 20))
+ return;
+ mci_hw->concur_tx = false;
+ } else
+ mci_hw->concur_tx = concur_tx;
+
+ if (old_concur_tx != mci_hw->concur_tx)
+ ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
+}
+
+static void ath9k_mci_stomp_audio(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+
+ if (!mci->num_sco && !mci->num_a2dp)
+ return;
+
+ if (ah->stats.avgbrssi > 25) {
+ btcoex->stomp_audio = 0;
+ return;
+ }
+
+ btcoex->stomp_audio++;
+}
+void ath9k_mci_update_rssi(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+
+ ath9k_mci_stomp_audio(sc);
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
+ return;
+
+ if (ah->stats.avgbrssi >= 40) {
+ if (btcoex->rssi_count < 0)
+ btcoex->rssi_count = 0;
+ if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, true);
+ }
+ } else {
+ if (btcoex->rssi_count > 0)
+ btcoex->rssi_count = 0;
+ if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, false);
+ }
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644
index 000000000..069588376
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#include "ar9003_mci.h"
+
+#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY 16
+#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD 40
+#define ATH_MCI_BDR_DUTY_CYCLE 20
+#define ATH_MCI_MAX_DUTY_CYCLE 90
+
+#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE 7
+#define ATH_MCI_MAX_SCO_PROFILE 1
+#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
+ ATH_MCI_MAX_SCO_PROFILE)
+
+#define ATH_MCI_INQUIRY_PRIO 62
+#define ATH_MCI_HI_PRIO 60
+#define ATH_MCI_NUM_BT_CHANNELS 79
+#define ATH_MCI_CONCUR_TX_SWITCH 5
+
+#define MCI_GPM_SET_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) |= (1 << (_bt_chan & 7)); \
+ } \
+ } while (0)
+
+#define MCI_GPM_CLR_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) &= ~(1 << (_bt_chan & 7));\
+ } \
+ } while (0)
+
+#define INC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp++; \
+ if (!_info->edr) \
+ _mci->num_bdr++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
+ _mci->num_sco++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp--; \
+ if (!_info->edr) \
+ _mci->num_bdr--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
+ _mci->num_sco--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \
+ _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+ u8 type;
+ u8 conn_handle;
+ bool start;
+ bool master;
+ bool edr;
+ u8 voice_type;
+ u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */
+ u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */
+ u8 A; /* HID: Sniff attempt, in slots */
+ struct list_head list;
+};
+
+struct ath_mci_profile_status {
+ bool is_critical;
+ bool is_link;
+ u8 conn_handle;
+};
+
+struct ath_mci_profile {
+ struct list_head info;
+ DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+ u16 aggr_limit;
+ u8 num_mgmt;
+ u8 num_sco;
+ u8 num_a2dp;
+ u8 num_hid;
+ u8 num_pan;
+ u8 num_other_acl;
+ u8 num_bdr;
+ u8 voice_priority;
+};
+
+struct ath_mci_buf {
+ void *bf_addr; /* virtual addr of desc */
+ dma_addr_t bf_paddr; /* physical addr of buffer */
+ u32 bf_len; /* len of data */
+};
+
+struct ath_mci_coex {
+ struct ath_mci_buf sched_buf;
+ struct ath_mci_buf gpm_buf;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+void ath9k_mci_update_rssi(struct ath_softc *sc);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ath_mci_enable(struct ath_softc *sc);
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all);
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx);
+#else
+static inline void ath_mci_enable(struct ath_softc *sc)
+{
+}
+static inline void ath9k_mci_update_wlan_channels(struct ath_softc *sc,
+ bool allow_all)
+{
+}
+static inline void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
new file mode 100644
index 000000000..e6fef1be9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/nl80211.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/ath9k_platform.h>
+#include <linux/module.h>
+#include "ath9k.h"
+
+static const struct pci_device_id ath_pci_id_table[] = {
+ { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+
+#ifdef CONFIG_ATH9K_PCOEM
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1C71),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE01F),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6632),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6642),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_QMI,
+ 0x0306),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x185F, /* WNC */
+ 0x309D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147C),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x1536),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+
+ /* AR9285 card for Asus */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002B,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C37),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
+
+ { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
+ { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
+ { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+
+ /* Killer Wireless (3x3) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2000),
+ .driver_data = ATH9K_PCI_KILLER },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2001),
+ .driver_data = ATH9K_PCI_KILLER },
+
+ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+
+#ifdef CONFIG_ATH9K_PCOEM
+ /* PCI-E CUS198 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2086),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1237),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2126),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x126A),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+
+ /* PCI-E CUS230 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2152),
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE075),
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB225 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3122),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4105),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4106),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410D),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410E),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410F),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC706),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC680),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC708),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3218),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3219),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
+ /* AR9485 cards with PLL power-save disabled by default. */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C97),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2100),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1C56, /* ASKEY */
+ 0x4001),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6627),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x11AD, /* LITEON */
+ 0x6628),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04E),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE04F),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x144F, /* ASKEY */
+ 0x7197),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2000),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x2001),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1186),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F86),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1195),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1F95),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C00),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x1B9A, /* XAVI */
+ 0x1C01),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850D),
+ .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE },
+#endif
+
+ { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
+ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+
+#ifdef CONFIG_ATH9K_PCOEM
+ /* PCI-E CUS217 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2116),
+ .driver_data = ATH9K_PCI_CUS217 },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6661),
+ .driver_data = ATH9K_PCI_CUS217 },
+
+ /* AR9462 with WoW support */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3117),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3214),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ATTANSIC,
+ 0x0091),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2110),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x850E),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6631),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x11AD, /* LITEON */
+ 0x6641),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ PCI_VENDOR_ID_HP,
+ 0x1864),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0063),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x14CD, /* USI */
+ 0x0064),
+ .driver_data = ATH9K_PCI_WOW },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0034,
+ 0x10CF, /* Fujitsu */
+ 0x1783),
+ .driver_data = ATH9K_PCI_WOW },
+
+ /* Killer Wireless (2x2) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0030,
+ 0x1A56,
+ 0x2003),
+ .driver_data = ATH9K_PCI_KILLER },
+
+ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
+ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
+
+ /* CUS252 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3028),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2176),
+ .driver_data = ATH9K_PCI_CUS252 |
+ ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 1-ANT */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE068),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA119),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0632),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x06B2),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0842),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x1842),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x6671),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2811),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2812),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A1),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2F8A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+ /* WB335 1-ANT / Antenna Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3025),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302B),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE069),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3028),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0622),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0672),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0662),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x06A2),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0682),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213A),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213C),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x18E3),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x217F),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_HP,
+ 0x2005),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020C),
+ .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB335 2-ANT / Antenna-Diversity */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411D),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x411E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4129),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x412A),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x302C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0642),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0652),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0612),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0832),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x1832),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0692),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0803),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x11AD, /* LITEON */
+ 0x0813),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2130),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x213B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2182),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218B),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x218C),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2F82),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x144F, /* ASKEY */
+ 0x7202),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2810),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x2813),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x1B9A, /* XAVI */
+ 0x28A4),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ 0x185F, /* WNC */
+ 0xA120),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE07F),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE08F),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE081),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE091),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE099),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_LENOVO,
+ 0x4026),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_ASUSTEK,
+ 0x85F2),
+ .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0036,
+ PCI_VENDOR_ID_DELL,
+ 0x020E),
+ .driver_data = ATH9K_PCI_AR9565_2ANT |
+ ATH9K_PCI_BT_ANT_DIV |
+ ATH9K_PCI_LED_ACT_HI},
+
+ /* PCI-E AR9565 (WB335) */
+ { PCI_VDEVICE(ATHEROS, 0x0036),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+#endif
+
+ { 0 }
+};
+
+
+/* return bus cachesize in 4B word units */
+static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u8 u8tmp;
+
+ pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
+ *csz = (int)u8tmp;
+
+ /*
+ * This check was put in to avoid "unpleasant" consequences if
+ * the bootrom has not fully initialized all PCI devices.
+ * Sometimes the cache line size register is not set
+ */
+
+ if (*csz == 0)
+ *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
+}
+
+static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+
+ if (pdata && !pdata->use_eeprom) {
+ if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
+ ath_err(common,
+ "%s: eeprom read failed, offset %08x is out of range\n",
+ __func__, off);
+ }
+
+ *data = pdata->eeprom_data[off];
+ } else {
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ common->ops->read(ah, AR5416_EEPROM_OFFSET +
+ (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT)) {
+ return false;
+ }
+
+ *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+ }
+
+ return true;
+}
+
+/* Need to be called after we discover btcoex capabilities */
+static void ath_pci_aspm_init(struct ath_common *common)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct pci_dev *pdev = to_pci_dev(sc->dev);
+ struct pci_dev *parent;
+ u16 aspm;
+
+ if (!ah->is_pciexpress)
+ return;
+
+ parent = pdev->bus->self;
+ if (!parent)
+ return;
+
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
+ (AR_SREV_9285(ah))) {
+ /* Bluetooth coexistence requires disabling ASPM. */
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
+
+ /*
+ * Both upstream and downstream PCIe components should
+ * have the same ASPM settings.
+ */
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
+
+ ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
+ return;
+ }
+
+ /*
+ * 0x70c - Ack Frequency Register.
+ *
+ * Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY.
+ *
+ * 000 : 1 us
+ * 001 : 2 us
+ * 010 : 4 us
+ * 011 : 8 us
+ * 100 : 16 us
+ * 101 : 32 us
+ * 110/111 : 64 us
+ */
+ if (AR_SREV_9462(ah))
+ pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
+ if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
+ ah->aspm_enabled = true;
+ /* Initialize PCIe PM and SERDES registers. */
+ ath9k_hw_configpcipowersave(ah, false);
+ ath_info(common, "ASPM enabled: 0x%x\n", aspm);
+ }
+}
+
+static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
+ .read_cachesize = ath_pci_read_cachesize,
+ .eeprom_read = ath_pci_eeprom_read,
+ .aspm_init = ath_pci_aspm_init,
+};
+
+static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ath_softc *sc;
+ struct ieee80211_hw *hw;
+ u8 csz;
+ u32 val;
+ int ret = 0;
+ char hw_name[64];
+
+ if (pcim_enable_device(pdev))
+ return -EIO;
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("32-bit DMA not available\n");
+ return ret;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ pr_err("32-bit DMA consistent DMA enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
+ if (csz == 0) {
+ /*
+ * Linux 2.4.18 (at least) writes the cache line size
+ * register as a 16-bit wide register which is wrong.
+ * We must have this setup properly for rx buffer
+ * DMA to work so force a reasonable value here if it
+ * comes up zero.
+ */
+ csz = L1_CACHE_BYTES / sizeof(u32);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
+ }
+ /*
+ * The default setting of latency timer yields poor results,
+ * set it to the value used by other systems. It may be worth
+ * tweaking this setting more.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
+
+ pci_set_master(pdev);
+
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), "ath9k");
+ if (ret) {
+ dev_err(&pdev->dev, "PCI memory region reserve error\n");
+ return -ENODEV;
+ }
+
+ ath9k_fill_chanctx_ops();
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
+ if (!hw) {
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
+ return -ENOMEM;
+ }
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ sc = hw->priv;
+ sc->hw = hw;
+ sc->dev = &pdev->dev;
+ sc->mem = pcim_iomap_table(pdev)[0];
+ sc->driver_data = id->driver_data;
+
+ ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_irq;
+ }
+
+ sc->irq = pdev->irq;
+
+ ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
+ wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
+ hw_name, (unsigned long)sc->mem, pdev->irq);
+
+ return 0;
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+static void ath_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath_softc *sc = hw->priv;
+
+ if (!is_ath9k_unloaded)
+ sc->sc_ah->ah_flags |= AH_UNPLUGGED;
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int ath_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
+ dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
+ return 0;
+ }
+
+ /* The device has to be moved to FULLSLEEP forcibly.
+ * Otherwise the chip never moved to full sleep,
+ * when no interface is up.
+ */
+ ath9k_stop_btcoex(sc);
+ ath9k_hw_disable(sc->sc_ah);
+ del_timer_sync(&sc->sleep_timer);
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
+ return 0;
+}
+
+static int ath_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 val;
+
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ ath_pci_aspm_init(common);
+ ah->reset_power_on = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath9k_pm_ops, ath_pci_suspend, ath_pci_resume);
+
+#define ATH9K_PM_OPS (&ath9k_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define ATH9K_PM_OPS NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+
+MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
+
+static struct pci_driver ath_pci_driver = {
+ .name = "ath9k",
+ .id_table = ath_pci_id_table,
+ .probe = ath_pci_probe,
+ .remove = ath_pci_remove,
+ .driver.pm = ATH9K_PM_OPS,
+};
+
+int ath_pci_init(void)
+{
+ return pci_register_driver(&ath_pci_driver);
+}
+
+void ath_pci_exit(void)
+{
+ pci_unregister_driver(&ath_pci_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
new file mode 100644
index 000000000..4a1b99238
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef PHY_H
+#define PHY_H
+
+#define CHANSEL_DIV 15
+#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
+#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
+
+#define AR_PHY_BASE 0x9800
+#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
+
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
+#define AR_PHY_TX_GAIN_CLC 0x0000001E
+#define AR_PHY_TX_GAIN_CLC_S 1
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CLC_TBL1 0xa35c
+#define AR_PHY_CLC_I0 0x07ff0000
+#define AR_PHY_CLC_I0_S 16
+#define AR_PHY_CLC_Q0 0x0000ffd0
+#define AR_PHY_CLC_Q0_S 5
+
+#define ANTSWAP_AB 0x0001
+#define REDUCE_CHAIN_0 0x00000050
+#define REDUCE_CHAIN_1 0x00000051
+#define AR_PHY_CHIP_ID 0x9818
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_PLL_CONTROL 0x16180
+#define AR_PHY_PLL_MODE 0x16184
+
+enum ath9k_ant_div_comb_lna_conf {
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
+ ATH_ANT_DIV_COMB_LNA2,
+ ATH_ANT_DIV_COMB_LNA1,
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
new file mode 100644
index 000000000..6fb40ef86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "ath9k.h"
+#include "ar9003_mac.h"
+
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
+
+static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
+{
+ return sc->ps_enabled &&
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
+}
+
+/*
+ * Setup and link descriptors.
+ *
+ * 11N: we can no longer afford to self link the last descriptor.
+ * MAC acknowledges BA status as long as it copies frames to host
+ * buffer (or rx fifo). This can incorrectly acknowledge packets
+ * to a sender if last desc is self-linked.
+ */
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_desc *ds;
+ struct sk_buff *skb;
+
+ ds = bf->bf_desc;
+ ds->ds_link = 0; /* link to null */
+ ds->ds_data = bf->bf_buf_addr;
+
+ /* virtual addr of the beginning of the buffer. */
+ skb = bf->bf_mpdu;
+ BUG_ON(skb == NULL);
+ ds->ds_vdata = skb->data;
+
+ /*
+ * setup rx descriptors. The rx_bufsize here tells the hardware
+ * how much data it can DMA to us and that we are prepared
+ * to process
+ */
+ ath9k_hw_setuprxdesc(ah, ds,
+ common->rx_bufsize,
+ 0);
+
+ if (sc->rx.rxlink)
+ *sc->rx.rxlink = bf->bf_daddr;
+ else if (!flush)
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
+
+ sc->rx.rxlink = &ds->ds_link;
+}
+
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
+{
+ if (sc->rx.buf_hold)
+ ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
+
+ sc->rx.buf_hold = bf;
+}
+
+static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
+{
+ /* XXX block beacon interrupts */
+ ath9k_hw_setantenna(sc->sc_ah, antenna);
+ sc->rx.defant = antenna;
+ sc->rx.rxotherant = 0;
+}
+
+static void ath_opmode_init(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath_calcrxfilter(sc);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ ath_hw_setbssidmask(common);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_edma *rx_edma;
+ struct sk_buff *skb;
+ struct ath_rxbuf *bf;
+
+ rx_edma = &sc->rx.rx_edma[qtype];
+ if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
+ return false;
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
+ list_del_init(&bf->list);
+
+ skb = bf->bf_mpdu;
+
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
+
+ SKB_CB_ATHBUF(skb) = bf;
+ ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
+ __skb_queue_tail(&rx_edma->rx_fifo, skb);
+
+ return true;
+}
+
+static void ath_rx_addbuffer_edma(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_rxbuf *bf, *tbf;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ ath_dbg(common, QUEUE, "No free rx buf available\n");
+ return;
+ }
+
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
+ if (!ath_rx_edma_buf_link(sc, qtype))
+ break;
+
+}
+
+static void ath_rx_remove_buffer(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_rxbuf *bf;
+ struct ath_rx_edma *rx_edma;
+ struct sk_buff *skb;
+
+ rx_edma = &sc->rx.rx_edma[qtype];
+
+ while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+}
+
+static void ath_rx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_rxbuf *bf;
+
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ if (bf->bf_mpdu) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_BIDIRECTIONAL);
+ dev_kfree_skb_any(bf->bf_mpdu);
+ bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
+ }
+ }
+}
+
+static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
+{
+ __skb_queue_head_init(&rx_edma->rx_fifo);
+ rx_edma->rx_fifo_hwsize = size;
+}
+
+static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct sk_buff *skb;
+ struct ath_rxbuf *bf;
+ int error = 0, i;
+ u32 size;
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
+ ah->caps.rx_lp_qdepth);
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
+ ah->caps.rx_hp_qdepth);
+
+ size = sizeof(struct ath_rxbuf) * nbufs;
+ bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+
+ for (i = 0; i < nbufs; i++, bf++) {
+ skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
+ if (!skb) {
+ error = -ENOMEM;
+ goto rx_init_fail;
+ }
+
+ memset(skb->data, 0, common->rx_bufsize);
+ bf->bf_mpdu = skb;
+
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ common->rx_bufsize,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
+ ath_err(common,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto rx_init_fail;
+ }
+
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+
+ return 0;
+
+rx_init_fail:
+ ath_rx_edma_cleanup(sc);
+ return error;
+}
+
+static void ath_edma_start_recv(struct ath_softc *sc)
+{
+ ath9k_hw_rxena(sc->sc_ah);
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
+ ath_opmode_init(sc);
+ ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
+}
+
+static void ath_edma_stop_recv(struct ath_softc *sc)
+{
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+}
+
+int ath_rx_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct sk_buff *skb;
+ struct ath_rxbuf *bf;
+ int error = 0;
+
+ spin_lock_init(&sc->sc_pcu_lock);
+
+ common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+ sc->sc_ah->caps.rx_status_len;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ return ath_rx_edma_init(sc, nbufs);
+
+ ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ /* Initialize rx descriptors */
+
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+ "rx", nbufs, 1, 0);
+ if (error != 0) {
+ ath_err(common,
+ "failed to allocate rx descriptors: %d\n",
+ error);
+ goto err;
+ }
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(common, common->rx_bufsize,
+ GFP_KERNEL);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
+ ath_err(common,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto err;
+ }
+ }
+ sc->rx.rxlink = NULL;
+err:
+ if (error)
+ ath_rx_cleanup(sc);
+
+ return error;
+}
+
+void ath_rx_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ath_rxbuf *bf;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_rx_edma_cleanup(sc);
+ return;
+ }
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = bf->bf_mpdu;
+ if (skb) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
+ }
+ }
+}
+
+/*
+ * Calculate the receive filter according to the
+ * operating mode and state:
+ *
+ * o always accept unicast, broadcast, and multicast traffic
+ * o maintain current state of phy error reception (the hal
+ * may enable phy error frames for noise immunity work)
+ * o probe request frames are accepted only when operating in
+ * hostap, adhoc, or monitor modes
+ * o enable promiscuous mode according to the interface state
+ * o accept beacons:
+ * - when operating in adhoc mode so the 802.11 layer creates
+ * node table entries for peers,
+ * - when operating in station mode for collecting rssi data when
+ * the station is otherwise quiet, or
+ * - when operating as a repeater so we see repeater-sta beacons
+ * - when scanning
+ */
+
+u32 ath_calcrxfilter(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 rfilt;
+
+ if (config_enabled(CONFIG_ATH9K_TX99))
+ return 0;
+
+ rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* if operating on a DFS channel, enable radar pulse detection */
+ if (sc->hw->conf.radar_enabled)
+ rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
+
+ spin_lock_bh(&sc->chan_lock);
+
+ if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (sc->sc_ah->is_monitoring)
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if ((sc->cur_chan->rxfilter & FIF_CONTROL) ||
+ sc->sc_ah->dynack.enabled)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
+ (sc->cur_chan->nvifs <= 1) &&
+ !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
+ (sc->cur_chan->rxfilter & FIF_PSPOLL))
+ rfilt |= ATH9K_RX_FILTER_PSPOLL;
+
+ if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+
+ if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) {
+ /* This is needed for older chips */
+ if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
+ rfilt |= ATH9K_RX_FILTER_PROM;
+ rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
+ }
+
+ if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
+ AR_SREV_9561(sc->sc_ah))
+ rfilt |= ATH9K_RX_FILTER_4ADDRESS;
+
+ if (ath9k_is_chanctx_enabled() &&
+ test_bit(ATH_OP_SCANNING, &common->op_flags))
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ spin_unlock_bh(&sc->chan_lock);
+
+ return rfilt;
+
+}
+
+void ath_startrecv(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rxbuf *bf, *tbf;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_edma_start_recv(sc);
+ return;
+ }
+
+ if (list_empty(&sc->rx.rxbuf))
+ goto start_recv;
+
+ sc->rx.buf_hold = NULL;
+ sc->rx.rxlink = NULL;
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
+ ath_rx_buf_link(sc, bf, false);
+ }
+
+ /* We could have deleted elements so the list may be empty now */
+ if (list_empty(&sc->rx.rxbuf))
+ goto start_recv;
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
+ ath9k_hw_rxena(ah);
+
+start_recv:
+ ath_opmode_init(sc);
+ ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
+}
+
+static void ath_flushrecv(struct ath_softc *sc)
+{
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
+}
+
+bool ath_stoprecv(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ bool stopped, reset = false;
+
+ ath9k_hw_abortpcurecv(ah);
+ ath9k_hw_setrxfilter(ah, 0);
+ stopped = ath9k_hw_stopdmarecv(ah, &reset);
+
+ ath_flushrecv(sc);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_edma_stop_recv(sc);
+ else
+ sc->rx.rxlink = NULL;
+
+ if (!(ah->ah_flags & AH_UNPLUGGED) &&
+ unlikely(!stopped)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Could not stop RX, we could be "
+ "confusing the DMA engine when we start RX up\n");
+ ATH_DBG_WARN_ON_ONCE(!stopped);
+ }
+ return stopped && !reset;
+}
+
+static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
+{
+ /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos, *end, id, elen;
+ struct ieee80211_tim_ie *tim;
+
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ pos = mgmt->u.beacon.variable;
+ end = skb->data + skb->len;
+
+ while (pos + 2 < end) {
+ id = *pos++;
+ elen = *pos++;
+ if (pos + elen > end)
+ break;
+
+ if (id == WLAN_EID_TIM) {
+ if (elen < sizeof(*tim))
+ break;
+ tim = (struct ieee80211_tim_ie *) pos;
+ if (tim->dtim_count != 0)
+ break;
+ return tim->bitmap_ctrl & 0x01;
+ }
+
+ pos += elen;
+ }
+
+ return false;
+}
+
+static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ bool skip_beacon = false;
+
+ if (skb->len < 24 + 8 + 2 + 2)
+ return;
+
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
+
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
+ ath_dbg(common, PS,
+ "Reconfigure beacon timers based on synchronized timestamp\n");
+
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+ if (ath9k_is_chanctx_enabled()) {
+ if (sc->cur_chan == &sc->offchannel.chan)
+ skip_beacon = true;
+ }
+#endif
+
+ if (!skip_beacon &&
+ !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
+ ath9k_set_beacon(sc);
+
+ ath9k_p2p_beacon_sync(sc);
+ }
+
+ if (ath_beacon_dtim_pending_cab(skb)) {
+ /*
+ * Remain awake waiting for buffered broadcast/multicast
+ * frames. If the last broadcast/multicast frame is not
+ * received properly, the next beacon frame will work as
+ * a backup trigger for returning into NETWORK SLEEP state,
+ * so we are waiting for it as well.
+ */
+ ath_dbg(common, PS,
+ "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
+ return;
+ }
+
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
+ /*
+ * This can happen if a broadcast frame is dropped or the AP
+ * fails to send a frame indicating that all CAB frames have
+ * been delivered.
+ */
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
+ ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
+ }
+}
+
+static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* Process Beacon and CAB receive in PS state */
+ if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
+ && mybeacon) {
+ ath_rx_ps_beacon(sc, skb);
+ } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
+ (ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_moredata(hdr->frame_control)) {
+ /*
+ * No more broadcast/multicast frames to be received at this
+ * point.
+ */
+ sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
+ ath_dbg(common, PS,
+ "All PS CAB frames received, back to sleep\n");
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_morefrags(hdr->frame_control)) {
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
+ ath_dbg(common, PS,
+ "Going back to sleep after having received PS-Poll data (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
+ }
+}
+
+static bool ath_edma_get_buffers(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype,
+ struct ath_rx_status *rs,
+ struct ath_rxbuf **dest)
+{
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ath_rxbuf *bf;
+ int ret;
+
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return false;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
+
+ ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
+ if (ret == -EINPROGRESS) {
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
+ return false;
+ }
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ if (ret == -EINVAL) {
+ /* corrupt descriptor, skip this one and the following one */
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (skb) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ }
+
+ bf = NULL;
+ }
+
+ *dest = bf;
+ return true;
+}
+
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_rxbuf *bf = NULL;
+
+ while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
+ if (!bf)
+ continue;
+
+ return bf;
+ }
+ return NULL;
+}
+
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_desc *ds;
+ struct ath_rxbuf *bf;
+ int ret;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
+ if (bf == sc->rx.buf_hold)
+ return NULL;
+
+ ds = bf->bf_desc;
+
+ /*
+ * Must provide the virtual address of the current
+ * descriptor, the physical address, and the virtual
+ * address of the next descriptor in the h/w chain.
+ * This allows the HAL to look ahead to see if the
+ * hardware is done with a descriptor by checking the
+ * done bit in the following descriptor and the address
+ * of the current descriptor the DMA engine is working
+ * on. All this is necessary because of our use of
+ * a self-linked list to avoid rx overruns.
+ */
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ struct ath_rx_status trs;
+ struct ath_rxbuf *tbf;
+ struct ath_desc *tds;
+
+ memset(&trs, 0, sizeof(trs));
+ if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
+
+ /*
+ * On some hardware the descriptor status words could
+ * get corrupted, including the done bit. Because of
+ * this, check if the next descriptor's done bit is
+ * set or not.
+ *
+ * If the next descriptor's done bit is set, the current
+ * descriptor has been corrupted. Force s/w to discard
+ * this descriptor and continue...
+ */
+
+ tds = tbf->bf_desc;
+ ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
+ if (ret == -EINPROGRESS)
+ return NULL;
+
+ /*
+ * Re-check previous descriptor, in case it has been filled
+ * in the mean time.
+ */
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ /*
+ * mark descriptor as zero-length and set the 'more'
+ * flag to ensure that both buffers get discarded
+ */
+ rs->rs_datalen = 0;
+ rs->rs_more = true;
+ }
+ }
+
+ list_del(&bf->list);
+ if (!bf->bf_mpdu)
+ return bf;
+
+ /*
+ * Synchronize the DMA transfer with CPU before
+ * 1. accessing the frame
+ * 2. requeueing the same buffer to h/w
+ */
+ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ return bf;
+}
+
+static void ath9k_process_tsf(struct ath_rx_status *rs,
+ struct ieee80211_rx_status *rxs,
+ u64 tsf)
+{
+ u32 tsf_lower = tsf & 0xffffffff;
+
+ rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
+ if (rs->rs_tstamp > tsf_lower &&
+ unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
+ rxs->mactime -= 0x100000000ULL;
+
+ if (rs->rs_tstamp < tsf_lower &&
+ unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
+ rxs->mactime += 0x100000000ULL;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rx_status,
+ bool *decrypt_error, u64 tsf)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr;
+ bool discard_current = sc->rx.discard_next;
+
+ /*
+ * Discard corrupt descriptors which are marked in
+ * ath_get_next_rx_buf().
+ */
+ if (discard_current)
+ goto corrupt;
+
+ sc->rx.discard_next = false;
+
+ /*
+ * Discard zero-length packets.
+ */
+ if (!rx_stats->rs_datalen) {
+ RX_STAT_INC(rx_len_err);
+ goto corrupt;
+ }
+
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
+ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+ RX_STAT_INC(rx_len_err);
+ goto corrupt;
+ }
+
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
+ /*
+ * Return immediately if the RX descriptor has been marked
+ * as corrupt based on the various error bits.
+ *
+ * This is different from the other corrupt descriptor
+ * condition handled above.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+ goto corrupt;
+
+ hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+
+ ath9k_process_tsf(rx_stats, rx_status, tsf);
+ ath_debug_stat_rx(sc, rx_stats);
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
+ ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
+ if (ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, rx_status->mactime))
+ RX_STAT_INC(rx_spectral);
+
+ return -EINVAL;
+ }
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ spin_lock_bh(&sc->chan_lock);
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error,
+ sc->cur_chan->rxfilter)) {
+ spin_unlock_bh(&sc->chan_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&sc->chan_lock);
+
+ if (ath_is_mybeacon(common, hdr)) {
+ RX_STAT_INC(rx_beacons);
+ rx_stats->is_mybeacon = true;
+ }
+
+ /*
+ * This shouldn't happen, but have a safety check anyway.
+ */
+ if (WARN_ON(!ah->curchan))
+ return -EINVAL;
+
+ if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
+ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+ rx_stats->rs_rate);
+ RX_STAT_INC(rx_rate_err);
+ return -EINVAL;
+ }
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (rx_stats->is_mybeacon)
+ ath_chanctx_beacon_recv_ev(sc,
+ ATH_CHANCTX_EVENT_BEACON_RECEIVED);
+ }
+
+ ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
+
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats->rs_antenna;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control))
+ sc->rx.num_pkts++;
+#endif
+
+ return 0;
+
+corrupt:
+ sc->rx.discard_next = rx_stats->rs_more;
+ return -EINVAL;
+}
+
+/*
+ * Run the LNA combining algorithm only in these cases:
+ *
+ * Standalone WLAN cards with both LNA/Antenna diversity
+ * enabled in the EEPROM.
+ *
+ * WLAN+BT cards which are in the supported card list
+ * in ath_pci_id_table and the user has loaded the
+ * driver with "bt_ant_diversity" set to true.
+ */
+static void ath9k_antenna_check(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
+ return;
+
+ /*
+ * Change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs->rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs->rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
+ if (common->bt_ant_diversity)
+ ath_ant_comb_scan(sc, rs);
+ } else {
+ ath_ant_comb_scan(sc, rs);
+ }
+}
+
+static void ath9k_apply_ampdu_details(struct ath_softc *sc,
+ struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
+{
+ if (rs->rs_isaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+
+ rxs->ampdu_reference = sc->rx.ampdu_ref;
+
+ if (!rs->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
+ sc->rx.ampdu_ref++;
+ }
+
+ if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
+ rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
+ }
+}
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+{
+ struct ath_rxbuf *bf;
+ struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
+ struct ieee80211_rx_status *rxs;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hw *hw = sc->hw;
+ int retval;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int dma_type;
+ u64 tsf = 0;
+ unsigned long flags;
+ dma_addr_t new_buf_addr;
+ unsigned int budget = 512;
+ struct ieee80211_hdr *hdr;
+
+ if (edma)
+ dma_type = DMA_BIDIRECTIONAL;
+ else
+ dma_type = DMA_FROM_DEVICE;
+
+ qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
+
+ tsf = ath9k_hw_gettsf64(ah);
+
+ do {
+ bool decrypt_error = false;
+
+ memset(&rs, 0, sizeof(rs));
+ if (edma)
+ bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
+ else
+ bf = ath_get_next_rx_buf(sc, &rs);
+
+ if (!bf)
+ break;
+
+ skb = bf->bf_mpdu;
+ if (!skb)
+ continue;
+
+ /*
+ * Take frame header from the first fragment and RX status from
+ * the last one.
+ */
+ if (sc->rx.frag)
+ hdr_skb = sc->rx.frag;
+ else
+ hdr_skb = skb;
+
+ rxs = IEEE80211_SKB_RXCB(hdr_skb);
+ memset(rxs, 0, sizeof(struct ieee80211_rx_status));
+
+ retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
+ &decrypt_error, tsf);
+ if (retval)
+ goto requeue_drop_frag;
+
+ /* Ensure we always have an skb to requeue once we are done
+ * processing the current buffer's skb */
+ requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
+
+ /* If there is no memory we ignore the current RX'd frame,
+ * tell hardware it can give us a new frame using the old
+ * skb and put it at the tail of the sc->rx.rxbuf list for
+ * processing. */
+ if (!requeue_skb) {
+ RX_STAT_INC(rx_oom_err);
+ goto requeue_drop_frag;
+ }
+
+ /* We will now give hardware our shiny new allocated skb */
+ new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
+ common->rx_bufsize, dma_type);
+ if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
+ dev_kfree_skb_any(requeue_skb);
+ goto requeue_drop_frag;
+ }
+
+ /* Unmap the frame */
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, dma_type);
+
+ bf->bf_mpdu = requeue_skb;
+ bf->bf_buf_addr = new_buf_addr;
+
+ skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
+ if (ah->caps.rx_status_len)
+ skb_pull(skb, ah->caps.rx_status_len);
+
+ if (!rs.rs_more)
+ ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
+
+ if (rs.rs_more) {
+ RX_STAT_INC(rx_frags);
+ /*
+ * rs_more indicates chained descriptors which can be
+ * used to link buffers together for a sort of
+ * scatter-gather operation.
+ */
+ if (sc->rx.frag) {
+ /* too many fragments - cannot handle frame */
+ dev_kfree_skb_any(sc->rx.frag);
+ dev_kfree_skb_any(skb);
+ RX_STAT_INC(rx_too_many_frags_err);
+ skb = NULL;
+ }
+ sc->rx.frag = skb;
+ goto requeue;
+ }
+
+ if (sc->rx.frag) {
+ int space = skb->len - skb_tailroom(hdr_skb);
+
+ if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+ dev_kfree_skb(skb);
+ RX_STAT_INC(rx_oom_err);
+ goto requeue_drop_frag;
+ }
+
+ sc->rx.frag = NULL;
+
+ skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ skb = hdr_skb;
+ }
+
+ if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
+ skb_trim(skb, skb->len - 8);
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)) ||
+ ath9k_check_auto_sleep(sc))
+ ath_rx_ps(sc, skb, rs.is_mybeacon);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ ath9k_antenna_check(sc, &rs);
+ ath9k_apply_ampdu_details(sc, &rs, rxs);
+ ath_debug_rate_stats(sc, &rs, skb);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_ack(hdr->frame_control))
+ ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp);
+
+ ieee80211_rx(hw, skb);
+
+requeue_drop_frag:
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
+requeue:
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+
+ if (!edma) {
+ ath_rx_buf_relink(sc, bf, flush);
+ if (!flush)
+ ath9k_hw_rxena(ah);
+ } else if (!flush) {
+ ath_rx_edma_buf_link(sc, qtype);
+ }
+
+ if (!budget--)
+ break;
+ } while (1);
+
+ if (!(ah->imask & ATH9K_INT_RXEOL)) {
+ ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
new file mode 100644
index 000000000..caba54dda
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -0,0 +1,2051 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_H
+#define REG_H
+
+#include "../reg.h"
+
+#define AR_CR 0x0008
+#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
+#define AR_CR_RXD 0x00000020
+#define AR_CR_SWI 0x00000040
+
+#define AR_RXDP 0x000C
+
+#define AR_CFG 0x0014
+#define AR_CFG_SWTD 0x00000001
+#define AR_CFG_SWTB 0x00000002
+#define AR_CFG_SWRD 0x00000004
+#define AR_CFG_SWRB 0x00000008
+#define AR_CFG_SWRG 0x00000010
+#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
+#define AR_CFG_PHOK 0x00000100
+#define AR_CFG_CLK_GATE_DIS 0x00000400
+#define AR_CFG_EEBS 0x00000200
+#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
+#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
+
+#define AR_RXBP_THRESH 0x0018
+#define AR_RXBP_THRESH_HP 0x0000000f
+#define AR_RXBP_THRESH_HP_S 0
+#define AR_RXBP_THRESH_LP 0x00003f00
+#define AR_RXBP_THRESH_LP_S 8
+
+#define AR_MIRT 0x0020
+#define AR_MIRT_VAL 0x0000ffff
+#define AR_MIRT_VAL_S 16
+
+#define AR_IER 0x0024
+#define AR_IER_ENABLE 0x00000001
+#define AR_IER_DISABLE 0x00000000
+
+#define AR_TIMT 0x0028
+#define AR_TIMT_LAST 0x0000ffff
+#define AR_TIMT_LAST_S 0
+#define AR_TIMT_FIRST 0xffff0000
+#define AR_TIMT_FIRST_S 16
+
+#define AR_RIMT 0x002C
+#define AR_RIMT_LAST 0x0000ffff
+#define AR_RIMT_LAST_S 0
+#define AR_RIMT_FIRST 0xffff0000
+#define AR_RIMT_FIRST_S 16
+
+#define AR_DMASIZE_4B 0x00000000
+#define AR_DMASIZE_8B 0x00000001
+#define AR_DMASIZE_16B 0x00000002
+#define AR_DMASIZE_32B 0x00000003
+#define AR_DMASIZE_64B 0x00000004
+#define AR_DMASIZE_128B 0x00000005
+#define AR_DMASIZE_256B 0x00000006
+#define AR_DMASIZE_512B 0x00000007
+
+#define AR_TXCFG 0x0030
+#define AR_TXCFG_DMASZ_MASK 0x00000007
+#define AR_TXCFG_DMASZ_4B 0
+#define AR_TXCFG_DMASZ_8B 1
+#define AR_TXCFG_DMASZ_16B 2
+#define AR_TXCFG_DMASZ_32B 3
+#define AR_TXCFG_DMASZ_64B 4
+#define AR_TXCFG_DMASZ_128B 5
+#define AR_TXCFG_DMASZ_256B 6
+#define AR_TXCFG_DMASZ_512B 7
+#define AR_FTRIG 0x000003F0
+#define AR_FTRIG_S 4
+#define AR_FTRIG_IMMED 0x00000000
+#define AR_FTRIG_64B 0x00000010
+#define AR_FTRIG_128B 0x00000020
+#define AR_FTRIG_192B 0x00000030
+#define AR_FTRIG_256B 0x00000040
+#define AR_FTRIG_512B 0x00000080
+#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800
+
+#define AR_RXCFG 0x0034
+#define AR_RXCFG_CHIRP 0x00000008
+#define AR_RXCFG_ZLFDMA 0x00000010
+#define AR_RXCFG_DMASZ_MASK 0x00000007
+#define AR_RXCFG_DMASZ_4B 0
+#define AR_RXCFG_DMASZ_8B 1
+#define AR_RXCFG_DMASZ_16B 2
+#define AR_RXCFG_DMASZ_32B 3
+#define AR_RXCFG_DMASZ_64B 4
+#define AR_RXCFG_DMASZ_128B 5
+#define AR_RXCFG_DMASZ_256B 6
+#define AR_RXCFG_DMASZ_512B 7
+
+#define AR_TOPS 0x0044
+#define AR_TOPS_MASK 0x0000FFFF
+
+#define AR_RXNPTO 0x0048
+#define AR_RXNPTO_MASK 0x000003FF
+
+#define AR_TXNPTO 0x004C
+#define AR_TXNPTO_MASK 0x000003FF
+#define AR_TXNPTO_QCU_MASK 0x000FFC00
+
+#define AR_RPGTO 0x0050
+#define AR_RPGTO_MASK 0x000003FF
+
+#define AR_RPCNT 0x0054
+#define AR_RPCNT_MASK 0x0000001F
+
+#define AR_MACMISC 0x0058
+#define AR_MACMISC_PCI_EXT_FORCE 0x00000010
+#define AR_MACMISC_DMA_OBS 0x000001E0
+#define AR_MACMISC_DMA_OBS_S 5
+#define AR_MACMISC_DMA_OBS_LINE_0 0
+#define AR_MACMISC_DMA_OBS_LINE_1 1
+#define AR_MACMISC_DMA_OBS_LINE_2 2
+#define AR_MACMISC_DMA_OBS_LINE_3 3
+#define AR_MACMISC_DMA_OBS_LINE_4 4
+#define AR_MACMISC_DMA_OBS_LINE_5 5
+#define AR_MACMISC_DMA_OBS_LINE_6 6
+#define AR_MACMISC_DMA_OBS_LINE_7 7
+#define AR_MACMISC_DMA_OBS_LINE_8 8
+#define AR_MACMISC_MISC_OBS 0x00000E00
+#define AR_MACMISC_MISC_OBS_S 9
+#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000
+#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12
+#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000
+#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
+#define AR_MACMISC_MISC_OBS_BUS_1 1
+
+#define AR_DATABUF_SIZE 0x0060
+#define AR_DATABUF_SIZE_MASK 0x00000FFF
+
+#define AR_GTXTO 0x0064
+#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
+#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
+#define AR_GTXTO_TIMEOUT_LIMIT_S 16
+
+#define AR_GTTM 0x0068
+#define AR_GTTM_USEC 0x00000001
+#define AR_GTTM_IGNORE_IDLE 0x00000002
+#define AR_GTTM_RESET_IDLE 0x00000004
+#define AR_GTTM_CST_USEC 0x00000008
+
+#define AR_CST 0x006C
+#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF
+#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
+#define AR_CST_TIMEOUT_LIMIT_S 16
+
+#define AR_HP_RXDP 0x0074
+#define AR_LP_RXDP 0x0078
+
+#define AR_ISR 0x0080
+#define AR_ISR_RXOK 0x00000001
+#define AR_ISR_RXDESC 0x00000002
+#define AR_ISR_HP_RXOK 0x00000001
+#define AR_ISR_LP_RXOK 0x00000002
+#define AR_ISR_RXERR 0x00000004
+#define AR_ISR_RXNOPKT 0x00000008
+#define AR_ISR_RXEOL 0x00000010
+#define AR_ISR_RXORN 0x00000020
+#define AR_ISR_TXOK 0x00000040
+#define AR_ISR_TXDESC 0x00000080
+#define AR_ISR_TXERR 0x00000100
+#define AR_ISR_TXNOPKT 0x00000200
+#define AR_ISR_TXEOL 0x00000400
+#define AR_ISR_TXURN 0x00000800
+#define AR_ISR_MIB 0x00001000
+#define AR_ISR_SWI 0x00002000
+#define AR_ISR_RXPHY 0x00004000
+#define AR_ISR_RXKCM 0x00008000
+#define AR_ISR_SWBA 0x00010000
+#define AR_ISR_BRSSI 0x00020000
+#define AR_ISR_BMISS 0x00040000
+#define AR_ISR_BNR 0x00100000
+#define AR_ISR_RXCHIRP 0x00200000
+#define AR_ISR_BCNMISC 0x00800000
+#define AR_ISR_TIM 0x00800000
+#define AR_ISR_QCBROVF 0x02000000
+#define AR_ISR_QCBRURN 0x04000000
+#define AR_ISR_QTRIG 0x08000000
+#define AR_ISR_GENTMR 0x10000000
+
+#define AR_ISR_TXMINTR 0x00080000
+#define AR_ISR_RXMINTR 0x01000000
+#define AR_ISR_TXINTM 0x40000000
+#define AR_ISR_RXINTM 0x80000000
+
+#define AR_ISR_S0 0x0084
+#define AR_ISR_S0_QCU_TXOK 0x000003FF
+#define AR_ISR_S0_QCU_TXOK_S 0
+#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
+#define AR_ISR_S0_QCU_TXDESC_S 16
+
+#define AR_ISR_S1 0x0088
+#define AR_ISR_S1_QCU_TXERR 0x000003FF
+#define AR_ISR_S1_QCU_TXERR_S 0
+#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
+#define AR_ISR_S1_QCU_TXEOL_S 16
+
+#define AR_ISR_S2 0x008c
+#define AR_ISR_S2_QCU_TXURN 0x000003FF
+#define AR_ISR_S2_BB_WATCHDOG 0x00010000
+#define AR_ISR_S2_CST 0x00400000
+#define AR_ISR_S2_GTT 0x00800000
+#define AR_ISR_S2_TIM 0x01000000
+#define AR_ISR_S2_CABEND 0x02000000
+#define AR_ISR_S2_DTIMSYNC 0x04000000
+#define AR_ISR_S2_BCNTO 0x08000000
+#define AR_ISR_S2_CABTO 0x10000000
+#define AR_ISR_S2_DTIM 0x20000000
+#define AR_ISR_S2_TSFOOR 0x40000000
+#define AR_ISR_S2_TBTT_TIME 0x80000000
+
+#define AR_ISR_S3 0x0090
+#define AR_ISR_S3_QCU_QCBROVF 0x000003FF
+#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000
+
+#define AR_ISR_S4 0x0094
+#define AR_ISR_S4_QCU_QTRIG 0x000003FF
+#define AR_ISR_S4_RESV0 0xFFFFFC00
+
+#define AR_ISR_S5 0x0098
+#define AR_ISR_S5_TIMER_TRIG 0x000000FF
+#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
+#define AR_ISR_S5_TIM_TIMER 0x00000010
+#define AR_ISR_S5_DTIM_TIMER 0x00000020
+#define AR_IMR_S5 0x00b8
+#define AR_IMR_S5_TIM_TIMER 0x00000010
+#define AR_IMR_S5_DTIM_TIMER 0x00000020
+#define AR_ISR_S5_GENTIMER_TRIG 0x0000FF80
+#define AR_ISR_S5_GENTIMER_TRIG_S 0
+#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
+#define AR_ISR_S5_GENTIMER_THRESH_S 16
+#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
+#define AR_IMR_S5_GENTIMER_TRIG_S 0
+#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
+#define AR_IMR_S5_GENTIMER_THRESH_S 16
+
+#define AR_IMR 0x00a0
+#define AR_IMR_RXOK 0x00000001
+#define AR_IMR_RXDESC 0x00000002
+#define AR_IMR_RXOK_HP 0x00000001
+#define AR_IMR_RXOK_LP 0x00000002
+#define AR_IMR_RXERR 0x00000004
+#define AR_IMR_RXNOPKT 0x00000008
+#define AR_IMR_RXEOL 0x00000010
+#define AR_IMR_RXORN 0x00000020
+#define AR_IMR_TXOK 0x00000040
+#define AR_IMR_TXDESC 0x00000080
+#define AR_IMR_TXERR 0x00000100
+#define AR_IMR_TXNOPKT 0x00000200
+#define AR_IMR_TXEOL 0x00000400
+#define AR_IMR_TXURN 0x00000800
+#define AR_IMR_MIB 0x00001000
+#define AR_IMR_SWI 0x00002000
+#define AR_IMR_RXPHY 0x00004000
+#define AR_IMR_RXKCM 0x00008000
+#define AR_IMR_SWBA 0x00010000
+#define AR_IMR_BRSSI 0x00020000
+#define AR_IMR_BMISS 0x00040000
+#define AR_IMR_BNR 0x00100000
+#define AR_IMR_RXCHIRP 0x00200000
+#define AR_IMR_BCNMISC 0x00800000
+#define AR_IMR_TIM 0x00800000
+#define AR_IMR_QCBROVF 0x02000000
+#define AR_IMR_QCBRURN 0x04000000
+#define AR_IMR_QTRIG 0x08000000
+#define AR_IMR_GENTMR 0x10000000
+
+#define AR_IMR_TXMINTR 0x00080000
+#define AR_IMR_RXMINTR 0x01000000
+#define AR_IMR_TXINTM 0x40000000
+#define AR_IMR_RXINTM 0x80000000
+
+#define AR_IMR_S0 0x00a4
+#define AR_IMR_S0_QCU_TXOK 0x000003FF
+#define AR_IMR_S0_QCU_TXOK_S 0
+#define AR_IMR_S0_QCU_TXDESC 0x03FF0000
+#define AR_IMR_S0_QCU_TXDESC_S 16
+
+#define AR_IMR_S1 0x00a8
+#define AR_IMR_S1_QCU_TXERR 0x000003FF
+#define AR_IMR_S1_QCU_TXERR_S 0
+#define AR_IMR_S1_QCU_TXEOL 0x03FF0000
+#define AR_IMR_S1_QCU_TXEOL_S 16
+
+#define AR_IMR_S2 0x00ac
+#define AR_IMR_S2_QCU_TXURN 0x000003FF
+#define AR_IMR_S2_QCU_TXURN_S 0
+#define AR_IMR_S2_BB_WATCHDOG 0x00010000
+#define AR_IMR_S2_CST 0x00400000
+#define AR_IMR_S2_GTT 0x00800000
+#define AR_IMR_S2_TIM 0x01000000
+#define AR_IMR_S2_CABEND 0x02000000
+#define AR_IMR_S2_DTIMSYNC 0x04000000
+#define AR_IMR_S2_BCNTO 0x08000000
+#define AR_IMR_S2_CABTO 0x10000000
+#define AR_IMR_S2_DTIM 0x20000000
+#define AR_IMR_S2_TSFOOR 0x40000000
+
+#define AR_IMR_S3 0x00b0
+#define AR_IMR_S3_QCU_QCBROVF 0x000003FF
+#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000
+#define AR_IMR_S3_QCU_QCBRURN_S 16
+
+#define AR_IMR_S4 0x00b4
+#define AR_IMR_S4_QCU_QTRIG 0x000003FF
+#define AR_IMR_S4_RESV0 0xFFFFFC00
+
+#define AR_IMR_S5 0x00b8
+#define AR_IMR_S5_TIMER_TRIG 0x000000FF
+#define AR_IMR_S5_TIMER_THRESH 0x0000FF00
+
+
+#define AR_ISR_RAC 0x00c0
+#define AR_ISR_S0_S 0x00c4
+#define AR_ISR_S0_QCU_TXOK 0x000003FF
+#define AR_ISR_S0_QCU_TXOK_S 0
+#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
+#define AR_ISR_S0_QCU_TXDESC_S 16
+
+#define AR_ISR_S1_S 0x00c8
+#define AR_ISR_S1_QCU_TXERR 0x000003FF
+#define AR_ISR_S1_QCU_TXERR_S 0
+#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
+#define AR_ISR_S1_QCU_TXEOL_S 16
+
+#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
+#define AR_DMADBG_0 0x00e0
+#define AR_DMADBG_1 0x00e4
+#define AR_DMADBG_2 0x00e8
+#define AR_DMADBG_3 0x00ec
+#define AR_DMADBG_4 0x00f0
+#define AR_DMADBG_5 0x00f4
+#define AR_DMADBG_6 0x00f8
+#define AR_DMADBG_7 0x00fc
+
+#define AR_NUM_QCU 10
+#define AR_QCU_0 0x0001
+#define AR_QCU_1 0x0002
+#define AR_QCU_2 0x0004
+#define AR_QCU_3 0x0008
+#define AR_QCU_4 0x0010
+#define AR_QCU_5 0x0020
+#define AR_QCU_6 0x0040
+#define AR_QCU_7 0x0080
+#define AR_QCU_8 0x0100
+#define AR_QCU_9 0x0200
+
+#define AR_Q0_TXDP 0x0800
+#define AR_Q1_TXDP 0x0804
+#define AR_Q2_TXDP 0x0808
+#define AR_Q3_TXDP 0x080c
+#define AR_Q4_TXDP 0x0810
+#define AR_Q5_TXDP 0x0814
+#define AR_Q6_TXDP 0x0818
+#define AR_Q7_TXDP 0x081c
+#define AR_Q8_TXDP 0x0820
+#define AR_Q9_TXDP 0x0824
+#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
+
+#define AR_Q_STATUS_RING_START 0x830
+#define AR_Q_STATUS_RING_END 0x834
+
+#define AR_Q_TXE 0x0840
+#define AR_Q_TXE_M 0x000003FF
+
+#define AR_Q_TXD 0x0880
+#define AR_Q_TXD_M 0x000003FF
+
+#define AR_Q0_CBRCFG 0x08c0
+#define AR_Q1_CBRCFG 0x08c4
+#define AR_Q2_CBRCFG 0x08c8
+#define AR_Q3_CBRCFG 0x08cc
+#define AR_Q4_CBRCFG 0x08d0
+#define AR_Q5_CBRCFG 0x08d4
+#define AR_Q6_CBRCFG 0x08d8
+#define AR_Q7_CBRCFG 0x08dc
+#define AR_Q8_CBRCFG 0x08e0
+#define AR_Q9_CBRCFG 0x08e4
+#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2))
+#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF
+#define AR_Q_CBRCFG_INTERVAL_S 0
+#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000
+#define AR_Q_CBRCFG_OVF_THRESH_S 24
+
+#define AR_Q0_RDYTIMECFG 0x0900
+#define AR_Q1_RDYTIMECFG 0x0904
+#define AR_Q2_RDYTIMECFG 0x0908
+#define AR_Q3_RDYTIMECFG 0x090c
+#define AR_Q4_RDYTIMECFG 0x0910
+#define AR_Q5_RDYTIMECFG 0x0914
+#define AR_Q6_RDYTIMECFG 0x0918
+#define AR_Q7_RDYTIMECFG 0x091c
+#define AR_Q8_RDYTIMECFG 0x0920
+#define AR_Q9_RDYTIMECFG 0x0924
+#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2))
+#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF
+#define AR_Q_RDYTIMECFG_DURATION_S 0
+#define AR_Q_RDYTIMECFG_EN 0x01000000
+
+#define AR_Q_ONESHOTARM_SC 0x0940
+#define AR_Q_ONESHOTARM_SC_M 0x000003FF
+#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00
+
+#define AR_Q_ONESHOTARM_CC 0x0980
+#define AR_Q_ONESHOTARM_CC_M 0x000003FF
+#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00
+
+#define AR_Q0_MISC 0x09c0
+#define AR_Q1_MISC 0x09c4
+#define AR_Q2_MISC 0x09c8
+#define AR_Q3_MISC 0x09cc
+#define AR_Q4_MISC 0x09d0
+#define AR_Q5_MISC 0x09d4
+#define AR_Q6_MISC 0x09d8
+#define AR_Q7_MISC 0x09dc
+#define AR_Q8_MISC 0x09e0
+#define AR_Q9_MISC 0x09e4
+#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2))
+#define AR_Q_MISC_FSP 0x0000000F
+#define AR_Q_MISC_FSP_ASAP 0
+#define AR_Q_MISC_FSP_CBR 1
+#define AR_Q_MISC_FSP_DBA_GATED 2
+#define AR_Q_MISC_FSP_TIM_GATED 3
+#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4
+#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5
+#define AR_Q_MISC_ONE_SHOT_EN 0x00000010
+#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020
+#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040
+#define AR_Q_MISC_BEACON_USE 0x00000080
+#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100
+#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200
+#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400
+#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800
+#define AR_Q_MISC_RESV0 0xFFFFF000
+
+#define AR_Q0_STS 0x0a00
+#define AR_Q1_STS 0x0a04
+#define AR_Q2_STS 0x0a08
+#define AR_Q3_STS 0x0a0c
+#define AR_Q4_STS 0x0a10
+#define AR_Q5_STS 0x0a14
+#define AR_Q6_STS 0x0a18
+#define AR_Q7_STS 0x0a1c
+#define AR_Q8_STS 0x0a20
+#define AR_Q9_STS 0x0a24
+#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2))
+#define AR_Q_STS_PEND_FR_CNT 0x00000003
+#define AR_Q_STS_RESV0 0x000000FC
+#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00
+#define AR_Q_STS_RESV1 0xFFFF0000
+
+#define AR_Q_RDYTIMESHDN 0x0a40
+#define AR_Q_RDYTIMESHDN_M 0x000003FF
+
+/* MAC Descriptor CRC check */
+#define AR_Q_DESC_CRCCHK 0xa44
+/* Enable CRC check on the descriptor fetched from host */
+#define AR_Q_DESC_CRCCHK_EN 1
+
+#define AR_NUM_DCU 10
+#define AR_DCU_0 0x0001
+#define AR_DCU_1 0x0002
+#define AR_DCU_2 0x0004
+#define AR_DCU_3 0x0008
+#define AR_DCU_4 0x0010
+#define AR_DCU_5 0x0020
+#define AR_DCU_6 0x0040
+#define AR_DCU_7 0x0080
+#define AR_DCU_8 0x0100
+#define AR_DCU_9 0x0200
+
+#define AR_D0_QCUMASK 0x1000
+#define AR_D1_QCUMASK 0x1004
+#define AR_D2_QCUMASK 0x1008
+#define AR_D3_QCUMASK 0x100c
+#define AR_D4_QCUMASK 0x1010
+#define AR_D5_QCUMASK 0x1014
+#define AR_D6_QCUMASK 0x1018
+#define AR_D7_QCUMASK 0x101c
+#define AR_D8_QCUMASK 0x1020
+#define AR_D9_QCUMASK 0x1024
+#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2))
+#define AR_D_QCUMASK 0x000003FF
+#define AR_D_QCUMASK_RESV0 0xFFFFFC00
+
+#define AR_D0_LCL_IFS 0x1040
+#define AR_D1_LCL_IFS 0x1044
+#define AR_D2_LCL_IFS 0x1048
+#define AR_D3_LCL_IFS 0x104c
+#define AR_D4_LCL_IFS 0x1050
+#define AR_D5_LCL_IFS 0x1054
+#define AR_D6_LCL_IFS 0x1058
+#define AR_D7_LCL_IFS 0x105c
+#define AR_D8_LCL_IFS 0x1060
+#define AR_D9_LCL_IFS 0x1064
+#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2))
+#define AR_D_LCL_IFS_CWMIN 0x000003FF
+#define AR_D_LCL_IFS_CWMIN_S 0
+#define AR_D_LCL_IFS_CWMAX 0x000FFC00
+#define AR_D_LCL_IFS_CWMAX_S 10
+#define AR_D_LCL_IFS_AIFS 0x0FF00000
+#define AR_D_LCL_IFS_AIFS_S 20
+
+#define AR_D_LCL_IFS_RESV0 0xF0000000
+
+#define AR_D0_RETRY_LIMIT 0x1080
+#define AR_D1_RETRY_LIMIT 0x1084
+#define AR_D2_RETRY_LIMIT 0x1088
+#define AR_D3_RETRY_LIMIT 0x108c
+#define AR_D4_RETRY_LIMIT 0x1090
+#define AR_D5_RETRY_LIMIT 0x1094
+#define AR_D6_RETRY_LIMIT 0x1098
+#define AR_D7_RETRY_LIMIT 0x109c
+#define AR_D8_RETRY_LIMIT 0x10a0
+#define AR_D9_RETRY_LIMIT 0x10a4
+#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2))
+#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F
+#define AR_D_RETRY_LIMIT_FR_SH_S 0
+#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00
+#define AR_D_RETRY_LIMIT_STA_SH_S 8
+#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000
+#define AR_D_RETRY_LIMIT_STA_LG_S 14
+#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000
+
+#define AR_D0_CHNTIME 0x10c0
+#define AR_D1_CHNTIME 0x10c4
+#define AR_D2_CHNTIME 0x10c8
+#define AR_D3_CHNTIME 0x10cc
+#define AR_D4_CHNTIME 0x10d0
+#define AR_D5_CHNTIME 0x10d4
+#define AR_D6_CHNTIME 0x10d8
+#define AR_D7_CHNTIME 0x10dc
+#define AR_D8_CHNTIME 0x10e0
+#define AR_D9_CHNTIME 0x10e4
+#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2))
+#define AR_D_CHNTIME_DUR 0x000FFFFF
+#define AR_D_CHNTIME_DUR_S 0
+#define AR_D_CHNTIME_EN 0x00100000
+#define AR_D_CHNTIME_RESV0 0xFFE00000
+
+#define AR_D0_MISC 0x1100
+#define AR_D1_MISC 0x1104
+#define AR_D2_MISC 0x1108
+#define AR_D3_MISC 0x110c
+#define AR_D4_MISC 0x1110
+#define AR_D5_MISC 0x1114
+#define AR_D6_MISC 0x1118
+#define AR_D7_MISC 0x111c
+#define AR_D8_MISC 0x1120
+#define AR_D9_MISC 0x1124
+#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2))
+#define AR_D_MISC_BKOFF_THRESH 0x0000003F
+#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040
+#define AR_D_MISC_CW_RESET_EN 0x00000080
+#define AR_D_MISC_FRAG_WAIT_EN 0x00000100
+#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200
+#define AR_D_MISC_CW_BKOFF_EN 0x00001000
+#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000
+#define AR_D_MISC_VIR_COL_HANDLING_S 14
+#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0
+#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1
+#define AR_D_MISC_BEACON_USE 0x00010000
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1
+#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2
+#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000
+#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000
+#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000
+#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000
+#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000
+#define AR_D_MISC_RESV0 0xFF000000
+
+#define AR_D_SEQNUM 0x1140
+
+#define AR_D_GBL_IFS_SIFS 0x1030
+#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
+#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
+
+#define AR_D_TXBLK_BASE 0x1038
+#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF
+#define AR_D_TXBLK_WRITE_BITMASK_S 0
+#define AR_D_TXBLK_WRITE_SLICE 0x000F0000
+#define AR_D_TXBLK_WRITE_SLICE_S 16
+#define AR_D_TXBLK_WRITE_DCU 0x00F00000
+#define AR_D_TXBLK_WRITE_DCU_S 20
+#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000
+#define AR_D_TXBLK_WRITE_COMMAND_S 24
+
+#define AR_D_GBL_IFS_SLOT 0x1070
+#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
+#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
+
+#define AR_D_GBL_IFS_EIFS 0x10b0
+#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
+#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
+#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO 363
+
+#define AR_D_GBL_IFS_MISC 0x10f0
+#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
+#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008
+#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00
+#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000
+#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000
+#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000
+#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000
+#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000
+
+#define AR_D_FPCTL 0x1230
+#define AR_D_FPCTL_DCU 0x0000000F
+#define AR_D_FPCTL_DCU_S 0
+#define AR_D_FPCTL_PREFETCH_EN 0x00000010
+#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0
+#define AR_D_FPCTL_BURST_PREFETCH_S 5
+
+#define AR_D_TXPSE 0x1270
+#define AR_D_TXPSE_CTRL 0x000003FF
+#define AR_D_TXPSE_RESV0 0x0000FC00
+#define AR_D_TXPSE_STATUS 0x00010000
+#define AR_D_TXPSE_RESV1 0xFFFE0000
+
+#define AR_D_TXSLOTMASK 0x12f0
+#define AR_D_TXSLOTMASK_NUM 0x0000000F
+
+#define AR_CFG_LED 0x1f04
+#define AR_CFG_SCLK_RATE_IND 0x00000003
+#define AR_CFG_SCLK_RATE_IND_S 0
+#define AR_CFG_SCLK_32MHZ 0x00000000
+#define AR_CFG_SCLK_4MHZ 0x00000001
+#define AR_CFG_SCLK_1MHZ 0x00000002
+#define AR_CFG_SCLK_32KHZ 0x00000003
+#define AR_CFG_LED_BLINK_SLOW 0x00000008
+#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
+#define AR_CFG_LED_MODE_SEL 0x00000380
+#define AR_CFG_LED_MODE_SEL_S 7
+#define AR_CFG_LED_POWER 0x00000280
+#define AR_CFG_LED_POWER_S 7
+#define AR_CFG_LED_NETWORK 0x00000300
+#define AR_CFG_LED_NETWORK_S 7
+#define AR_CFG_LED_MODE_PROP 0x0
+#define AR_CFG_LED_MODE_RPROP 0x1
+#define AR_CFG_LED_MODE_SPLIT 0x2
+#define AR_CFG_LED_MODE_RAND 0x3
+#define AR_CFG_LED_MODE_POWER_OFF 0x4
+#define AR_CFG_LED_MODE_POWER_ON 0x5
+#define AR_CFG_LED_MODE_NETWORK_OFF 0x4
+#define AR_CFG_LED_MODE_NETWORK_ON 0x6
+#define AR_CFG_LED_ASSOC_CTL 0x00000c00
+#define AR_CFG_LED_ASSOC_CTL_S 10
+#define AR_CFG_LED_ASSOC_NONE 0x0
+#define AR_CFG_LED_ASSOC_ACTIVE 0x1
+#define AR_CFG_LED_ASSOC_PENDING 0x2
+
+#define AR_CFG_LED_BLINK_SLOW 0x00000008
+#define AR_CFG_LED_BLINK_SLOW_S 3
+
+#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
+#define AR_CFG_LED_BLINK_THRESH_SEL_S 4
+
+#define AR_MAC_SLEEP 0x1f00
+#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000
+#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001
+
+#define AR_RC 0x4000
+#define AR_RC_AHB 0x00000001
+#define AR_RC_APB 0x00000002
+#define AR_RC_HOSTIF 0x00000100
+
+#define AR_WA (AR_SREV_9340(ah) ? 0x40c4 : 0x4004)
+#define AR_WA_BIT6 (1 << 6)
+#define AR_WA_BIT7 (1 << 7)
+#define AR_WA_BIT23 (1 << 23)
+#define AR_WA_D3_L1_DISABLE (1 << 14)
+#define AR_WA_UNTIE_RESET_EN (1 << 15) /* Enable PCI Reset
+ to POR (power-on-reset) */
+#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16)
+#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17)
+#define AR_WA_RESET_EN (1 << 18) /* Enable PCI-Reset to
+ POR (bit 15) */
+#define AR_WA_ANALOG_SHIFT (1 << 20)
+#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
+#define AR_WA_BIT22 (1 << 22)
+#define AR9285_WA_DEFAULT 0x004a050b
+#define AR9280_WA_DEFAULT 0x0040073b
+#define AR_WA_DEFAULT 0x0000073f
+
+
+#define AR_PM_STATE 0x4008
+#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
+
+#define AR_HOST_TIMEOUT (AR_SREV_9340(ah) ? 0x4008 : 0x4018)
+#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
+#define AR_HOST_TIMEOUT_APB_CNTR_S 0
+#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
+#define AR_HOST_TIMEOUT_LCL_CNTR_S 16
+
+#define AR_EEPROM 0x401c
+#define AR_EEPROM_ABSENT 0x00000100
+#define AR_EEPROM_CORRUPT 0x00000200
+#define AR_EEPROM_PROT_MASK 0x03FFFC00
+#define AR_EEPROM_PROT_MASK_S 10
+
+#define EEPROM_PROTECT_RP_0_31 0x0001
+#define EEPROM_PROTECT_WP_0_31 0x0002
+#define EEPROM_PROTECT_RP_32_63 0x0004
+#define EEPROM_PROTECT_WP_32_63 0x0008
+#define EEPROM_PROTECT_RP_64_127 0x0010
+#define EEPROM_PROTECT_WP_64_127 0x0020
+#define EEPROM_PROTECT_RP_128_191 0x0040
+#define EEPROM_PROTECT_WP_128_191 0x0080
+#define EEPROM_PROTECT_RP_192_255 0x0100
+#define EEPROM_PROTECT_WP_192_255 0x0200
+#define EEPROM_PROTECT_RP_256_511 0x0400
+#define EEPROM_PROTECT_WP_256_511 0x0800
+#define EEPROM_PROTECT_RP_512_1023 0x1000
+#define EEPROM_PROTECT_WP_512_1023 0x2000
+#define EEPROM_PROTECT_RP_1024_2047 0x4000
+#define EEPROM_PROTECT_WP_1024_2047 0x8000
+
+#define AR_SREV \
+ ((AR_SREV_9100(ah)) ? 0x0600 : (AR_SREV_9340(ah) \
+ ? 0x400c : 0x4020))
+
+#define AR_SREV_ID \
+ ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
+#define AR_SREV_VERSION 0x000000F0
+#define AR_SREV_VERSION_S 4
+#define AR_SREV_REVISION 0x00000007
+
+#define AR_SREV_ID2 0xFFFFFFFF
+#define AR_SREV_VERSION2 0xFFFC0000
+#define AR_SREV_VERSION2_S 18
+#define AR_SREV_TYPE2 0x0003F000
+#define AR_SREV_TYPE2_S 12
+#define AR_SREV_TYPE2_CHAIN 0x00001000
+#define AR_SREV_TYPE2_HOST_MODE 0x00002000
+#define AR_SREV_REVISION2 0x00000F00
+#define AR_SREV_REVISION2_S 8
+
+#define AR_SREV_VERSION_5416_PCI 0xD
+#define AR_SREV_VERSION_5416_PCIE 0xC
+#define AR_SREV_REVISION_5416_10 0
+#define AR_SREV_REVISION_5416_20 1
+#define AR_SREV_REVISION_5416_22 2
+#define AR_SREV_VERSION_9100 0x14
+#define AR_SREV_VERSION_9160 0x40
+#define AR_SREV_REVISION_9160_10 0
+#define AR_SREV_REVISION_9160_11 1
+#define AR_SREV_VERSION_9280 0x80
+#define AR_SREV_REVISION_9280_10 0
+#define AR_SREV_REVISION_9280_20 1
+#define AR_SREV_REVISION_9280_21 2
+#define AR_SREV_VERSION_9285 0xC0
+#define AR_SREV_REVISION_9285_10 0
+#define AR_SREV_REVISION_9285_11 1
+#define AR_SREV_REVISION_9285_12 2
+#define AR_SREV_VERSION_9287 0x180
+#define AR_SREV_REVISION_9287_10 0
+#define AR_SREV_REVISION_9287_11 1
+#define AR_SREV_REVISION_9287_12 2
+#define AR_SREV_REVISION_9287_13 3
+#define AR_SREV_VERSION_9271 0x140
+#define AR_SREV_REVISION_9271_10 0
+#define AR_SREV_REVISION_9271_11 1
+#define AR_SREV_VERSION_9300 0x1c0
+#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_REVISION_9300_22 3
+#define AR_SREV_VERSION_9330 0x200
+#define AR_SREV_REVISION_9330_10 0
+#define AR_SREV_REVISION_9330_11 1
+#define AR_SREV_REVISION_9330_12 2
+#define AR_SREV_VERSION_9485 0x240
+#define AR_SREV_REVISION_9485_10 0
+#define AR_SREV_REVISION_9485_11 1
+#define AR_SREV_VERSION_9340 0x300
+#define AR_SREV_REVISION_9340_10 0
+#define AR_SREV_REVISION_9340_11 1
+#define AR_SREV_REVISION_9340_12 2
+#define AR_SREV_REVISION_9340_13 3
+#define AR_SREV_VERSION_9580 0x1C0
+#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
+#define AR_SREV_VERSION_9462 0x280
+#define AR_SREV_REVISION_9462_20 2
+#define AR_SREV_REVISION_9462_21 3
+#define AR_SREV_VERSION_9565 0x2C0
+#define AR_SREV_REVISION_9565_10 0
+#define AR_SREV_REVISION_9565_101 1
+#define AR_SREV_REVISION_9565_11 2
+#define AR_SREV_VERSION_9550 0x400
+#define AR_SREV_VERSION_9531 0x500
+#define AR_SREV_REVISION_9531_10 0
+#define AR_SREV_REVISION_9531_11 1
+#define AR_SREV_REVISION_9531_20 2
+#define AR_SREV_VERSION_9561 0x600
+
+#define AR_SREV_5416(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
+ ((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE))
+#define AR_SREV_5416_22_OR_LATER(_ah) \
+ (((AR_SREV_5416(_ah)) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \
+ ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
+
+#define AR_SREV_9100(ah) \
+ ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100)
+#define AR_SREV_9100_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
+
+#define AR_SREV_9160(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9160))
+#define AR_SREV_9160_10_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9160))
+#define AR_SREV_9160_11(_ah) \
+ (AR_SREV_9160(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9160_11))
+#define AR_SREV_9280(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280))
+#define AR_SREV_9280_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9280))
+#define AR_SREV_9280_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280))
+
+#define AR_SREV_9285(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9285))
+#define AR_SREV_9285_12_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9285))
+
+#define AR_SREV_9287(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287))
+#define AR_SREV_9287_11_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9287))
+#define AR_SREV_9287_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_11))
+#define AR_SREV_9287_12(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_12))
+#define AR_SREV_9287_12_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12)))
+#define AR_SREV_9287_13_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_13)))
+
+#define AR_SREV_9271(_ah) \
+ (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271)
+#define AR_SREV_9271_10(_ah) \
+ (AR_SREV_9271(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_10))
+#define AR_SREV_9271_11(_ah) \
+ (AR_SREV_9271(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+
+#define AR_SREV_9300(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
+#define AR_SREV_9300_20_OR_LATER(_ah) \
+ ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
+#define AR_SREV_9300_22(_ah) \
+ (AR_SREV_9300(ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
+
+#define AR_SREV_9330(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
+#define AR_SREV_9330_11(_ah) \
+ (AR_SREV_9330((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_11))
+#define AR_SREV_9330_12(_ah) \
+ (AR_SREV_9330((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_12))
+
+#ifdef CONFIG_ATH9K_PCOEM
+#define AR_SREV_9462(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
+#define AR_SREV_9485(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
+#define AR_SREV_9565(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+#define AR_SREV_9003_PCOEM(_ah) \
+ (AR_SREV_9462(_ah) || AR_SREV_9485(_ah) || AR_SREV_9565(_ah))
+#else
+#define AR_SREV_9462(_ah) 0
+#define AR_SREV_9485(_ah) 0
+#define AR_SREV_9565(_ah) 0
+#define AR_SREV_9003_PCOEM(_ah) 0
+#endif
+
+#define AR_SREV_9485_11_OR_LATER(_ah) \
+ (AR_SREV_9485(_ah) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
+#define AR_SREV_9485_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
+
+#define AR_SREV_9340(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340))
+
+#define AR_SREV_9340_13(_ah) \
+ (AR_SREV_9340((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9340_13))
+
+#define AR_SREV_9340_13_OR_LATER(_ah) \
+ (AR_SREV_9340((_ah)) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13))
+
+#define AR_SREV_9285E_20(_ah) \
+ (AR_SREV_9285_12_OR_LATER(_ah) && \
+ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
+
+#define AR_SREV_9462_20(_ah) \
+ (AR_SREV_9462(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21(_ah) \
+ (AR_SREV_9462(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
+#define AR_SREV_9462_20_OR_LATER(_ah) \
+ (AR_SREV_9462(_ah) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21_OR_LATER(_ah) \
+ (AR_SREV_9462(_ah) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
+
+#define AR_SREV_9565_10(_ah) \
+ (AR_SREV_9565(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+#define AR_SREV_9565_101(_ah) \
+ (AR_SREV_9565(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
+#define AR_SREV_9565_11(_ah) \
+ (AR_SREV_9565(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
+#define AR_SREV_9565_11_OR_LATER(_ah) \
+ (AR_SREV_9565(_ah) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
+
+#define AR_SREV_9550(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
+#define AR_SREV_9550_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9550))
+
+#define AR_SREV_9580(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
+#define AR_SREV_9580_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10))
+
+#define AR_SREV_9531(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531))
+#define AR_SREV_9531_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_10))
+#define AR_SREV_9531_11(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11))
+#define AR_SREV_9531_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_20))
+
+#define AR_SREV_9561(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
+
+/* NOTE: When adding chips newer than Peacock, add chip check here */
+#define AR_SREV_9580_10_OR_LATER(_ah) \
+ (AR_SREV_9580(_ah))
+
+enum ath_usb_dev {
+ AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
+ AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
+ STORAGE_DEVICE = 3,
+};
+
+#define AR_DEVID_7010(_ah) \
+ (((_ah)->hw_version.usbdev == AR9280_USB) || \
+ ((_ah)->hw_version.usbdev == AR9287_USB))
+
+#define AR_RADIO_SREV_MAJOR 0xf0
+#define AR_RAD5133_SREV_MAJOR 0xc0
+#define AR_RAD2133_SREV_MAJOR 0xd0
+#define AR_RAD5122_SREV_MAJOR 0xe0
+#define AR_RAD2122_SREV_MAJOR 0xf0
+
+#define AR_AHB_MODE 0x4024
+#define AR_AHB_EXACT_WR_EN 0x00000000
+#define AR_AHB_BUF_WR_EN 0x00000001
+#define AR_AHB_EXACT_RD_EN 0x00000000
+#define AR_AHB_CACHELINE_RD_EN 0x00000002
+#define AR_AHB_PREFETCH_RD_EN 0x00000004
+#define AR_AHB_PAGE_SIZE_1K 0x00000000
+#define AR_AHB_PAGE_SIZE_2K 0x00000008
+#define AR_AHB_PAGE_SIZE_4K 0x00000010
+#define AR_AHB_CUSTOM_BURST_EN 0x000000C0
+#define AR_AHB_CUSTOM_BURST_EN_S 6
+#define AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL 3
+
+#define AR_INTR_RTC_IRQ 0x00000001
+#define AR_INTR_MAC_IRQ 0x00000002
+#define AR_INTR_EEP_PROT_ACCESS 0x00000004
+#define AR_INTR_MAC_AWAKE 0x00020000
+#define AR_INTR_MAC_ASLEEP 0x00040000
+#define AR_INTR_SPURIOUS 0xFFFFFFFF
+
+
+#define AR_INTR_SYNC_CAUSE (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
+#define AR_INTR_SYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
+
+
+#define AR_INTR_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4014 : 0x402c)
+#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
+#define AR_INTR_SYNC_ENABLE_GPIO_S 18
+
+enum {
+ AR_INTR_SYNC_RTC_IRQ = 0x00000001,
+ AR_INTR_SYNC_MAC_IRQ = 0x00000002,
+ AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004,
+ AR_INTR_SYNC_APB_TIMEOUT = 0x00000008,
+ AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010,
+ AR_INTR_SYNC_HOST1_FATAL = 0x00000020,
+ AR_INTR_SYNC_HOST1_PERR = 0x00000040,
+ AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080,
+ AR_INTR_SYNC_RADM_CPL_EP = 0x00000100,
+ AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200,
+ AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400,
+ AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800,
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000,
+ AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000,
+ AR_INTR_SYNC_PM_ACCESS = 0x00004000,
+ AR_INTR_SYNC_MAC_AWAKE = 0x00008000,
+ AR_INTR_SYNC_MAC_ASLEEP = 0x00010000,
+ AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000,
+ AR_INTR_SYNC_ALL = 0x0003FFFF,
+
+
+ AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL |
+ AR_INTR_SYNC_HOST1_PERR |
+ AR_INTR_SYNC_RADM_CPL_EP |
+ AR_INTR_SYNC_RADM_CPL_DLLP_ABORT |
+ AR_INTR_SYNC_RADM_CPL_TLP_ABORT |
+ AR_INTR_SYNC_RADM_CPL_ECRC_ERR |
+ AR_INTR_SYNC_RADM_CPL_TIMEOUT |
+ AR_INTR_SYNC_LOCAL_TIMEOUT |
+ AR_INTR_SYNC_MAC_SLEEP_ACCESS),
+
+ AR9340_INTR_SYNC_LOCAL_TIMEOUT = 0x00000010,
+
+ AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
+
+};
+
+#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
+#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
+#define AR_INTR_ASYNC_MASK_GPIO_S 18
+#define AR_INTR_ASYNC_MASK_MCI 0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S 7
+
+#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
+#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
+#define AR_INTR_SYNC_MASK_GPIO_S 18
+
+#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
+#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
+ AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S 7
+
+
+#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
+#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
+#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
+
+#define AR_PCIE_SERDES 0x4040
+#define AR_PCIE_SERDES2 0x4044
+#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
+#define AR_PCIE_PM_CTRL_ENA 0x00080000
+
+#define AR_PCIE_PHY_REG3 0x18c08
+
+#define AR_NUM_GPIO 14
+#define AR928X_NUM_GPIO 10
+#define AR9285_NUM_GPIO 12
+#define AR9287_NUM_GPIO 11
+#define AR9271_NUM_GPIO 16
+#define AR9300_NUM_GPIO 17
+#define AR7010_NUM_GPIO 16
+
+#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
+#define AR_GPIO_IN_VAL 0x0FFFC000
+#define AR_GPIO_IN_VAL_S 14
+#define AR928X_GPIO_IN_VAL 0x000FFC00
+#define AR928X_GPIO_IN_VAL_S 10
+#define AR9285_GPIO_IN_VAL 0x00FFF000
+#define AR9285_GPIO_IN_VAL_S 12
+#define AR9287_GPIO_IN_VAL 0x003FF800
+#define AR9287_GPIO_IN_VAL_S 11
+#define AR9271_GPIO_IN_VAL 0xFFFF0000
+#define AR9271_GPIO_IN_VAL_S 16
+#define AR7010_GPIO_IN_VAL 0x0000FFFF
+#define AR7010_GPIO_IN_VAL_S 0
+
+#define AR_GPIO_IN (AR_SREV_9340(ah) ? 0x402c : 0x404c)
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
+
+#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
+#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \
+ 0x0000000F : 0xFFFFFFFF)
+#define AR_GPIO_OE_OUT_DRV 0x3
+#define AR_GPIO_OE_OUT_DRV_NO 0x0
+#define AR_GPIO_OE_OUT_DRV_LOW 0x1
+#define AR_GPIO_OE_OUT_DRV_HI 0x2
+#define AR_GPIO_OE_OUT_DRV_ALL 0x3
+
+#define AR7010_GPIO_OE 0x52000
+#define AR7010_GPIO_OE_MASK 0x1
+#define AR7010_GPIO_OE_AS_OUTPUT 0x0
+#define AR7010_GPIO_OE_AS_INPUT 0x1
+#define AR7010_GPIO_IN 0x52004
+#define AR7010_GPIO_OUT 0x52008
+#define AR7010_GPIO_SET 0x5200C
+#define AR7010_GPIO_CLEAR 0x52010
+#define AR7010_GPIO_INT 0x52014
+#define AR7010_GPIO_INT_TYPE 0x52018
+#define AR7010_GPIO_INT_POLARITY 0x5201C
+#define AR7010_GPIO_PENDING 0x52020
+#define AR7010_GPIO_INT_MASK 0x52024
+#define AR7010_GPIO_FUNCTION 0x52028
+
+#define AR_GPIO_INTR_POL (AR_SREV_9340(ah) ? 0x4038 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050))
+#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
+#define AR_GPIO_INTR_POL_VAL_S 0
+
+#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9340(ah) ? 0x403c : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054))
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
+#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
+#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_S 3
+#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_DEF 0x00000010
+#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
+#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
+#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
+#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
+#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
+#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
+#define AR_GPIO_JTAG_DISABLE 0x00020000
+
+#define AR_GPIO_INPUT_MUX1 (AR_SREV_9340(ah) ? 0x4040 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058))
+#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
+#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
+#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
+#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
+
+#define AR_GPIO_INPUT_MUX2 (AR_SREV_9340(ah) ? 0x4044 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c))
+#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
+#define AR_GPIO_INPUT_MUX2_CLK25_S 0
+#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
+#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4
+#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
+#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
+
+#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9340(ah) ? 0x4048 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060))
+#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9340(ah) ? 0x404c : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064))
+#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9340(ah) ? 0x4050 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068))
+
+#define AR_INPUT_STATE (AR_SREV_9340(ah) ? 0x4054 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c))
+
+#define AR_EEPROM_STATUS_DATA (AR_SREV_9340(ah) ? 0x40c8 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c))
+#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
+#define AR_EEPROM_STATUS_DATA_VAL_S 0
+#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
+#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000
+#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
+#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
+
+#define AR_OBS (AR_SREV_9340(ah) ? 0x405c : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080))
+
+#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
+
+#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
+ (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
+#define AR_PCIE_MSI_ENABLE 0x00000001
+
+#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
+#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
+#define AR_INTR_PRIO_SYNC_MASK (AR_SREV_9340(ah) ? 0x4090 : 0x40cc)
+#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
+#define AR_ENT_OTP 0x40d8
+#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
+#define AR_ENT_OTP_49GHZ_DISABLE 0x00100000
+#define AR_ENT_OTP_MIN_PKT_SIZE_DISABLE 0x00800000
+
+#define AR_CH0_BB_DPLL1 0x16180
+#define AR_CH0_BB_DPLL1_REFDIV 0xF8000000
+#define AR_CH0_BB_DPLL1_REFDIV_S 27
+#define AR_CH0_BB_DPLL1_NINI 0x07FC0000
+#define AR_CH0_BB_DPLL1_NINI_S 18
+#define AR_CH0_BB_DPLL1_NFRAC 0x0003FFFF
+#define AR_CH0_BB_DPLL1_NFRAC_S 0
+
+#define AR_CH0_BB_DPLL2 0x16184
+#define AR_CH0_BB_DPLL2_LOCAL_PLL 0x40000000
+#define AR_CH0_BB_DPLL2_LOCAL_PLL_S 30
+#define AR_CH0_DPLL2_KI 0x3C000000
+#define AR_CH0_DPLL2_KI_S 26
+#define AR_CH0_DPLL2_KD 0x03F80000
+#define AR_CH0_DPLL2_KD_S 19
+#define AR_CH0_BB_DPLL2_EN_NEGTRIG 0x00040000
+#define AR_CH0_BB_DPLL2_EN_NEGTRIG_S 18
+#define AR_CH0_BB_DPLL2_PLL_PWD 0x00010000
+#define AR_CH0_BB_DPLL2_PLL_PWD_S 16
+#define AR_CH0_BB_DPLL2_OUTDIV 0x0000E000
+#define AR_CH0_BB_DPLL2_OUTDIV_S 13
+
+#define AR_CH0_BB_DPLL3 0x16188
+#define AR_CH0_BB_DPLL3_PHASE_SHIFT 0x3F800000
+#define AR_CH0_BB_DPLL3_PHASE_SHIFT_S 23
+
+#define AR_CH0_DDR_DPLL2 0x16244
+#define AR_CH0_DDR_DPLL3 0x16248
+#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
+#define AR_PHY_CCA_NOM_VAL_2GHZ -118
+
+#define AR_RTC_9300_SOC_PLL_DIV_INT 0x0000003f
+#define AR_RTC_9300_SOC_PLL_DIV_INT_S 0
+#define AR_RTC_9300_SOC_PLL_DIV_FRAC 0x000fffc0
+#define AR_RTC_9300_SOC_PLL_DIV_FRAC_S 6
+#define AR_RTC_9300_SOC_PLL_REFDIV 0x01f00000
+#define AR_RTC_9300_SOC_PLL_REFDIV_S 20
+#define AR_RTC_9300_SOC_PLL_CLKSEL 0x06000000
+#define AR_RTC_9300_SOC_PLL_CLKSEL_S 25
+#define AR_RTC_9300_SOC_PLL_BYPASS 0x08000000
+
+#define AR_RTC_9300_PLL_DIV 0x000003ff
+#define AR_RTC_9300_PLL_DIV_S 0
+#define AR_RTC_9300_PLL_REFDIV 0x00003C00
+#define AR_RTC_9300_PLL_REFDIV_S 10
+#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9300_PLL_CLKSEL_S 14
+#define AR_RTC_9300_PLL_BYPASS 0x00010000
+
+#define AR_RTC_9160_PLL_DIV 0x000003ff
+#define AR_RTC_9160_PLL_DIV_S 0
+#define AR_RTC_9160_PLL_REFDIV 0x00003C00
+#define AR_RTC_9160_PLL_REFDIV_S 10
+#define AR_RTC_9160_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9160_PLL_CLKSEL_S 14
+
+#define AR_RTC_BASE 0x00020000
+#define AR_RTC_RC \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
+#define AR_RTC_RC_M 0x00000003
+#define AR_RTC_RC_MAC_WARM 0x00000001
+#define AR_RTC_RC_MAC_COLD 0x00000002
+#define AR_RTC_RC_COLD_RESET 0x00000004
+#define AR_RTC_RC_WARM_RESET 0x00000008
+
+/* Crystal Control */
+#define AR_RTC_XTAL_CONTROL 0x7004
+
+/* Reg Control 0 */
+#define AR_RTC_REG_CONTROL0 0x7008
+
+/* Reg Control 1 */
+#define AR_RTC_REG_CONTROL1 0x700c
+#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
+
+#define AR_RTC_PLL_CONTROL \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
+
+#define AR_RTC_PLL_CONTROL2 0x703c
+
+#define AR_RTC_PLL_DIV 0x0000001f
+#define AR_RTC_PLL_DIV_S 0
+#define AR_RTC_PLL_DIV2 0x00000020
+#define AR_RTC_PLL_REFDIV_5 0x000000c0
+#define AR_RTC_PLL_CLKSEL 0x00000300
+#define AR_RTC_PLL_CLKSEL_S 8
+#define AR_RTC_PLL_BYPASS 0x00010000
+#define AR_RTC_PLL_NOPWD 0x00040000
+#define AR_RTC_PLL_NOPWD_S 18
+
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE 0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
+#define AR_RTC_RESET \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
+#define AR_RTC_RESET_EN (0x00000001)
+
+#define AR_RTC_STATUS \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
+
+#define AR_RTC_STATUS_M \
+ ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
+
+#define AR_RTC_PM_STATUS_M 0x0000000f
+
+#define AR_RTC_STATUS_SHUTDOWN 0x00000001
+#define AR_RTC_STATUS_ON 0x00000002
+#define AR_RTC_STATUS_SLEEP 0x00000004
+#define AR_RTC_STATUS_WAKEUP 0x00000008
+
+#define AR_RTC_SLEEP_CLK \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
+#define AR_RTC_FORCE_DERIVED_CLK 0x2
+#define AR_RTC_FORCE_SWREG_PRD 0x00000004
+
+#define AR_RTC_FORCE_WAKE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
+#define AR_RTC_FORCE_WAKE_EN 0x00000001
+#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
+
+
+#define AR_RTC_INTR_CAUSE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
+
+#define AR_RTC_INTR_ENABLE \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
+
+#define AR_RTC_INTR_MASK \
+ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+
+#define AR_RTC_KEEP_AWAKE 0x7034
+
+/* RTC_DERIVED_* - only for AR9100 */
+
+#define AR_RTC_DERIVED_CLK \
+ (AR_SREV_9100(ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
+#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe
+#define AR_RTC_DERIVED_CLK_PERIOD_S 1
+
+#define AR_SEQ_MASK 0x8060
+
+#define AR_AN_RF2G1_CH0 0x7810
+#define AR_AN_RF2G1_CH0_OB 0x03800000
+#define AR_AN_RF2G1_CH0_OB_S 23
+#define AR_AN_RF2G1_CH0_DB 0x1C000000
+#define AR_AN_RF2G1_CH0_DB_S 26
+
+#define AR_AN_RF5G1_CH0 0x7818
+#define AR_AN_RF5G1_CH0_OB5 0x00070000
+#define AR_AN_RF5G1_CH0_OB5_S 16
+#define AR_AN_RF5G1_CH0_DB5 0x00380000
+#define AR_AN_RF5G1_CH0_DB5_S 19
+
+#define AR_AN_RF2G1_CH1 0x7834
+#define AR_AN_RF2G1_CH1_OB 0x03800000
+#define AR_AN_RF2G1_CH1_OB_S 23
+#define AR_AN_RF2G1_CH1_DB 0x1C000000
+#define AR_AN_RF2G1_CH1_DB_S 26
+
+#define AR_AN_RF5G1_CH1 0x783C
+#define AR_AN_RF5G1_CH1_OB5 0x00070000
+#define AR_AN_RF5G1_CH1_OB5_S 16
+#define AR_AN_RF5G1_CH1_DB5 0x00380000
+#define AR_AN_RF5G1_CH1_DB5_S 19
+
+#define AR_AN_TOP1 0x7890
+#define AR_AN_TOP1_DACIPMODE 0x00040000
+#define AR_AN_TOP1_DACIPMODE_S 18
+
+#define AR_AN_TOP2 0x7894
+#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
+#define AR_AN_TOP2_XPABIAS_LVL_S 30
+#define AR_AN_TOP2_LOCALBIAS 0x00200000
+#define AR_AN_TOP2_LOCALBIAS_S 21
+#define AR_AN_TOP2_PWDCLKIND 0x00400000
+#define AR_AN_TOP2_PWDCLKIND_S 22
+
+#define AR_AN_SYNTH9 0x7868
+#define AR_AN_SYNTH9_REFDIVA 0xf8000000
+#define AR_AN_SYNTH9_REFDIVA_S 27
+
+#define AR9285_AN_RF2G1 0x7820
+#define AR9285_AN_RF2G1_ENPACAL 0x00000800
+#define AR9285_AN_RF2G1_ENPACAL_S 11
+#define AR9285_AN_RF2G1_PDPADRV1 0x02000000
+#define AR9285_AN_RF2G1_PDPADRV1_S 25
+#define AR9285_AN_RF2G1_PDPADRV2 0x01000000
+#define AR9285_AN_RF2G1_PDPADRV2_S 24
+#define AR9285_AN_RF2G1_PDPAOUT 0x00800000
+#define AR9285_AN_RF2G1_PDPAOUT_S 23
+
+
+#define AR9285_AN_RF2G2 0x7824
+#define AR9285_AN_RF2G2_OFFCAL 0x00001000
+#define AR9285_AN_RF2G2_OFFCAL_S 12
+
+#define AR9285_AN_RF2G3 0x7828
+#define AR9285_AN_RF2G3_PDVCCOMP 0x02000000
+#define AR9285_AN_RF2G3_PDVCCOMP_S 25
+#define AR9285_AN_RF2G3_OB_0 0x00E00000
+#define AR9285_AN_RF2G3_OB_0_S 21
+#define AR9285_AN_RF2G3_OB_1 0x001C0000
+#define AR9285_AN_RF2G3_OB_1_S 18
+#define AR9285_AN_RF2G3_OB_2 0x00038000
+#define AR9285_AN_RF2G3_OB_2_S 15
+#define AR9285_AN_RF2G3_OB_3 0x00007000
+#define AR9285_AN_RF2G3_OB_3_S 12
+#define AR9285_AN_RF2G3_OB_4 0x00000E00
+#define AR9285_AN_RF2G3_OB_4_S 9
+
+#define AR9285_AN_RF2G3_DB1_0 0x000001C0
+#define AR9285_AN_RF2G3_DB1_0_S 6
+#define AR9285_AN_RF2G3_DB1_1 0x00000038
+#define AR9285_AN_RF2G3_DB1_1_S 3
+#define AR9285_AN_RF2G3_DB1_2 0x00000007
+#define AR9285_AN_RF2G3_DB1_2_S 0
+#define AR9285_AN_RF2G4 0x782C
+#define AR9285_AN_RF2G4_DB1_3 0xE0000000
+#define AR9285_AN_RF2G4_DB1_3_S 29
+#define AR9285_AN_RF2G4_DB1_4 0x1C000000
+#define AR9285_AN_RF2G4_DB1_4_S 26
+
+#define AR9285_AN_RF2G4_DB2_0 0x03800000
+#define AR9285_AN_RF2G4_DB2_0_S 23
+#define AR9285_AN_RF2G4_DB2_1 0x00700000
+#define AR9285_AN_RF2G4_DB2_1_S 20
+#define AR9285_AN_RF2G4_DB2_2 0x000E0000
+#define AR9285_AN_RF2G4_DB2_2_S 17
+#define AR9285_AN_RF2G4_DB2_3 0x0001C000
+#define AR9285_AN_RF2G4_DB2_3_S 14
+#define AR9285_AN_RF2G4_DB2_4 0x00003800
+#define AR9285_AN_RF2G4_DB2_4_S 11
+
+#define AR9285_RF2G5 0x7830
+#define AR9285_RF2G5_IC50TX 0xfffff8ff
+#define AR9285_RF2G5_IC50TX_SET 0x00000400
+#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
+#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
+#define AR9285_RF2G5_IC50TX_CLEAR_S 8
+
+/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
+#define AR9271_AN_RF2G3_OB_cck 0x001C0000
+#define AR9271_AN_RF2G3_OB_cck_S 18
+#define AR9271_AN_RF2G3_OB_psk 0x00038000
+#define AR9271_AN_RF2G3_OB_psk_S 15
+#define AR9271_AN_RF2G3_OB_qam 0x00007000
+#define AR9271_AN_RF2G3_OB_qam_S 12
+
+#define AR9271_AN_RF2G3_DB_1 0x00E00000
+#define AR9271_AN_RF2G3_DB_1_S 21
+
+#define AR9271_AN_RF2G3_CCOMP 0xFFF
+#define AR9271_AN_RF2G3_CCOMP_S 0
+
+#define AR9271_AN_RF2G4_DB_2 0xE0000000
+#define AR9271_AN_RF2G4_DB_2_S 29
+
+#define AR9285_AN_RF2G6 0x7834
+#define AR9285_AN_RF2G6_CCOMP 0x00007800
+#define AR9285_AN_RF2G6_CCOMP_S 11
+#define AR9285_AN_RF2G6_OFFS 0x03f00000
+#define AR9285_AN_RF2G6_OFFS_S 20
+
+#define AR9271_AN_RF2G6_OFFS 0x07f00000
+#define AR9271_AN_RF2G6_OFFS_S 20
+
+#define AR9285_AN_RF2G7 0x7838
+#define AR9285_AN_RF2G7_PWDDB 0x00000002
+#define AR9285_AN_RF2G7_PWDDB_S 1
+#define AR9285_AN_RF2G7_PADRVGN2TAB0 0xE0000000
+#define AR9285_AN_RF2G7_PADRVGN2TAB0_S 29
+
+#define AR9285_AN_RF2G8 0x783C
+#define AR9285_AN_RF2G8_PADRVGN2TAB0 0x0001C000
+#define AR9285_AN_RF2G8_PADRVGN2TAB0_S 14
+
+
+#define AR9285_AN_RF2G9 0x7840
+#define AR9285_AN_RXTXBB1 0x7854
+#define AR9285_AN_RXTXBB1_PDRXTXBB1 0x00000020
+#define AR9285_AN_RXTXBB1_PDRXTXBB1_S 5
+#define AR9285_AN_RXTXBB1_PDV2I 0x00000080
+#define AR9285_AN_RXTXBB1_PDV2I_S 7
+#define AR9285_AN_RXTXBB1_PDDACIF 0x00000100
+#define AR9285_AN_RXTXBB1_PDDACIF_S 8
+#define AR9285_AN_RXTXBB1_SPARE9 0x00000001
+#define AR9285_AN_RXTXBB1_SPARE9_S 0
+
+#define AR9285_AN_TOP2 0x7868
+
+#define AR9285_AN_TOP3 0x786c
+#define AR9285_AN_TOP3_XPABIAS_LVL 0x0000000C
+#define AR9285_AN_TOP3_XPABIAS_LVL_S 2
+#define AR9285_AN_TOP3_PWDDAC 0x00800000
+#define AR9285_AN_TOP3_PWDDAC_S 23
+
+#define AR9285_AN_TOP4 0x7870
+#define AR9285_AN_TOP4_DEFAULT 0x10142c00
+
+#define AR9287_AN_RF2G3_CH0 0x7808
+#define AR9287_AN_RF2G3_CH1 0x785c
+#define AR9287_AN_RF2G3_DB1 0xE0000000
+#define AR9287_AN_RF2G3_DB1_S 29
+#define AR9287_AN_RF2G3_DB2 0x1C000000
+#define AR9287_AN_RF2G3_DB2_S 26
+#define AR9287_AN_RF2G3_OB_CCK 0x03800000
+#define AR9287_AN_RF2G3_OB_CCK_S 23
+#define AR9287_AN_RF2G3_OB_PSK 0x00700000
+#define AR9287_AN_RF2G3_OB_PSK_S 20
+#define AR9287_AN_RF2G3_OB_QAM 0x000E0000
+#define AR9287_AN_RF2G3_OB_QAM_S 17
+#define AR9287_AN_RF2G3_OB_PAL_OFF 0x0001C000
+#define AR9287_AN_RF2G3_OB_PAL_OFF_S 14
+
+#define AR9287_AN_TXPC0 0x7898
+#define AR9287_AN_TXPC0_TXPCMODE 0x0000C000
+#define AR9287_AN_TXPC0_TXPCMODE_S 14
+#define AR9287_AN_TXPC0_TXPCMODE_NORMAL 0
+#define AR9287_AN_TXPC0_TXPCMODE_TEST 1
+#define AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE 2
+#define AR9287_AN_TXPC0_TXPCMODE_ATBTEST 3
+
+#define AR9287_AN_TOP2 0x78b4
+#define AR9287_AN_TOP2_XPABIAS_LVL 0xC0000000
+#define AR9287_AN_TOP2_XPABIAS_LVL_S 30
+
+/* AR9271 specific stuff */
+#define AR9271_RESET_POWER_DOWN_CONTROL 0x50044
+#define AR9271_RADIO_RF_RST 0x20
+#define AR9271_GATE_MAC_CTL 0x4000
+
+#define AR_STA_ID1_STA_AP 0x00010000
+#define AR_STA_ID1_ADHOC 0x00020000
+#define AR_STA_ID1_PWR_SAV 0x00040000
+#define AR_STA_ID1_KSRCHDIS 0x00080000
+#define AR_STA_ID1_PCF 0x00100000
+#define AR_STA_ID1_USE_DEFANT 0x00200000
+#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
+#define AR_STA_ID1_AR9100_BA_FIX 0x00400000
+#define AR_STA_ID1_RTS_USE_DEF 0x00800000
+#define AR_STA_ID1_ACKCTS_6MB 0x01000000
+#define AR_STA_ID1_BASE_RATE_11B 0x02000000
+#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000
+#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000
+#define AR_STA_ID1_KSRCH_MODE 0x10000000
+#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000
+#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000
+#define AR_STA_ID1_MCAST_KSRCH 0x80000000
+
+#define AR_BSS_ID0 0x8008
+#define AR_BSS_ID1 0x800C
+#define AR_BSS_ID1_U16 0x0000FFFF
+#define AR_BSS_ID1_AID 0x07FF0000
+#define AR_BSS_ID1_AID_S 16
+
+#define AR_BCN_RSSI_AVE 0x8010
+#define AR_BCN_RSSI_AVE_MASK 0x00000FFF
+
+#define AR_TIME_OUT 0x8014
+#define AR_TIME_OUT_ACK 0x00003FFF
+#define AR_TIME_OUT_ACK_S 0
+#define AR_TIME_OUT_CTS 0x3FFF0000
+#define AR_TIME_OUT_CTS_S 16
+
+#define AR_RSSI_THR 0x8018
+#define AR_RSSI_THR_MASK 0x000000FF
+#define AR_RSSI_THR_BM_THR 0x0000FF00
+#define AR_RSSI_THR_BM_THR_S 8
+#define AR_RSSI_BCN_WEIGHT 0x1F000000
+#define AR_RSSI_BCN_WEIGHT_S 24
+#define AR_RSSI_BCN_RSSI_RST 0x20000000
+
+#define AR_USEC 0x801c
+#define AR_USEC_USEC 0x0000007F
+#define AR_USEC_TX_LAT 0x007FC000
+#define AR_USEC_TX_LAT_S 14
+#define AR_USEC_RX_LAT 0x1F800000
+#define AR_USEC_RX_LAT_S 23
+#define AR_USEC_ASYNC_FIFO 0x12E00074
+
+#define AR_RESET_TSF 0x8020
+#define AR_RESET_TSF_ONCE 0x01000000
+#define AR_RESET_TSF2_ONCE 0x02000000
+
+#define AR_MAX_CFP_DUR 0x8038
+#define AR_CFP_VAL 0x0000FFFF
+
+#define AR_RX_FILTER 0x803C
+
+#define AR_MCAST_FIL0 0x8040
+#define AR_MCAST_FIL1 0x8044
+
+/*
+ * AR_DIAG_SW - Register which can be used for diagnostics and testing purposes.
+ *
+ * The force RX abort (AR_DIAG_RX_ABORT, bit 25) can be used in conjunction with
+ * RX block (AR_DIAG_RX_DIS, bit 5) to help fast channel change to shut down
+ * receive. The force RX abort bit will kill any frame which is currently being
+ * transferred between the MAC and baseband. The RX block bit (AR_DIAG_RX_DIS)
+ * will prevent any new frames from getting started.
+ */
+#define AR_DIAG_SW 0x8048
+#define AR_DIAG_CACHE_ACK 0x00000001
+#define AR_DIAG_ACK_DIS 0x00000002
+#define AR_DIAG_CTS_DIS 0x00000004
+#define AR_DIAG_ENCRYPT_DIS 0x00000008
+#define AR_DIAG_DECRYPT_DIS 0x00000010
+#define AR_DIAG_RX_DIS 0x00000020 /* RX block */
+#define AR_DIAG_LOOP_BACK 0x00000040
+#define AR_DIAG_CORR_FCS 0x00000080
+#define AR_DIAG_CHAN_INFO 0x00000100
+#define AR_DIAG_SCRAM_SEED 0x0001FE00
+#define AR_DIAG_SCRAM_SEED_S 8
+#define AR_DIAG_FRAME_NV0 0x00020000
+#define AR_DIAG_OBS_PT_SEL1 0x000C0000
+#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S 27
+#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
+#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
+#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
+#define AR_DIAG_EIFS_CTRL_ENA 0x00800000
+#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000
+#define AR_DIAG_RX_ABORT 0x02000000 /* Force RX abort */
+#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000
+#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000
+
+#define AR_TSF_L32 0x804c
+#define AR_TSF_U32 0x8050
+
+#define AR_TST_ADDAC 0x8054
+#define AR_DEF_ANTENNA 0x8058
+
+#define AR_AES_MUTE_MASK0 0x805c
+#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
+#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
+#define AR_AES_MUTE_MASK0_QOS_S 16
+
+#define AR_AES_MUTE_MASK1 0x8060
+#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
+#define AR_AES_MUTE_MASK1_FC_MGMT 0xFFFF0000
+#define AR_AES_MUTE_MASK1_FC_MGMT_S 16
+
+#define AR_GATED_CLKS 0x8064
+#define AR_GATED_CLKS_TX 0x00000002
+#define AR_GATED_CLKS_RX 0x00000004
+#define AR_GATED_CLKS_REG 0x00000008
+
+#define AR_OBS_BUS_CTRL 0x8068
+#define AR_OBS_BUS_SEL_1 0x00040000
+#define AR_OBS_BUS_SEL_2 0x00080000
+#define AR_OBS_BUS_SEL_3 0x000C0000
+#define AR_OBS_BUS_SEL_4 0x08040000
+#define AR_OBS_BUS_SEL_5 0x08080000
+
+#define AR_OBS_BUS_1 0x806c
+#define AR_OBS_BUS_1_PCU 0x00000001
+#define AR_OBS_BUS_1_RX_END 0x00000002
+#define AR_OBS_BUS_1_RX_WEP 0x00000004
+#define AR_OBS_BUS_1_RX_BEACON 0x00000008
+#define AR_OBS_BUS_1_RX_FILTER 0x00000010
+#define AR_OBS_BUS_1_TX_HCF 0x00000020
+#define AR_OBS_BUS_1_QUIET_TIME 0x00000040
+#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080
+#define AR_OBS_BUS_1_TX_HOLD 0x00000100
+#define AR_OBS_BUS_1_TX_FRAME 0x00000200
+#define AR_OBS_BUS_1_RX_FRAME 0x00000400
+#define AR_OBS_BUS_1_RX_CLEAR 0x00000800
+#define AR_OBS_BUS_1_WEP_STATE 0x0003F000
+#define AR_OBS_BUS_1_WEP_STATE_S 12
+#define AR_OBS_BUS_1_RX_STATE 0x01F00000
+#define AR_OBS_BUS_1_RX_STATE_S 20
+#define AR_OBS_BUS_1_TX_STATE 0x7E000000
+#define AR_OBS_BUS_1_TX_STATE_S 25
+
+#define AR_LAST_TSTP 0x8080
+#define AR_NAV 0x8084
+#define AR_RTS_OK 0x8088
+#define AR_RTS_FAIL 0x808c
+#define AR_ACK_FAIL 0x8090
+#define AR_FCS_FAIL 0x8094
+#define AR_BEACON_CNT 0x8098
+
+#define AR_SLEEP1 0x80d4
+#define AR_SLEEP1_ASSUME_DTIM 0x00080000
+#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000
+#define AR_SLEEP1_CAB_TIMEOUT_S 21
+
+#define AR_SLEEP2 0x80d8
+#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
+#define AR_SLEEP2_BEACON_TIMEOUT_S 21
+
+#define AR_TPC 0x80e8
+#define AR_TPC_ACK 0x0000003f
+#define AR_TPC_ACK_S 0
+#define AR_TPC_CTS 0x00003f00
+#define AR_TPC_CTS_S 8
+#define AR_TPC_CHIRP 0x003f0000
+#define AR_TPC_CHIRP_S 16
+#define AR_TPC_RPT 0x3f000000
+#define AR_TPC_RPT_S 24
+
+#define AR_QUIET1 0x80fc
+#define AR_QUIET1_NEXT_QUIET_S 0
+#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
+#define AR_QUIET1_QUIET_ENABLE 0x00010000
+#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000
+#define AR_QUIET1_QUIET_ACK_CTS_ENABLE_S 17
+#define AR_QUIET2 0x8100
+#define AR_QUIET2_QUIET_PERIOD_S 0
+#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff
+#define AR_QUIET2_QUIET_DUR_S 16
+#define AR_QUIET2_QUIET_DUR 0xffff0000
+
+#define AR_TSF_PARM 0x8104
+#define AR_TSF_INCREMENT_M 0x000000ff
+#define AR_TSF_INCREMENT_S 0x00
+
+#define AR_QOS_NO_ACK 0x8108
+#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f
+#define AR_QOS_NO_ACK_TWO_BIT_S 0
+#define AR_QOS_NO_ACK_BIT_OFF 0x00000070
+#define AR_QOS_NO_ACK_BIT_OFF_S 4
+#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180
+#define AR_QOS_NO_ACK_BYTE_OFF_S 7
+
+#define AR_PHY_ERR 0x810c
+
+#define AR_PHY_ERR_DCHIRP 0x00000008
+#define AR_PHY_ERR_RADAR 0x00000020
+#define AR_PHY_ERR_OFDM_TIMING 0x00020000
+#define AR_PHY_ERR_CCK_TIMING 0x02000000
+
+#define AR_RXFIFO_CFG 0x8114
+
+
+#define AR_MIC_QOS_CONTROL 0x8118
+#define AR_MIC_QOS_SELECT 0x811c
+
+#define AR_PCU_MISC 0x8120
+#define AR_PCU_FORCE_BSSID_MATCH 0x00000001
+#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004
+#define AR_PCU_TX_ADD_TSF 0x00000008
+#define AR_PCU_CCK_SIFS_MODE 0x00000010
+#define AR_PCU_RX_ANT_UPDT 0x00000800
+#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000
+#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000
+#define AR_PCU_BUG_12306_FIX_ENA 0x00020000
+#define AR_PCU_FORCE_QUIET_COLL 0x00040000
+#define AR_PCU_TBTT_PROTECT 0x00200000
+#define AR_PCU_CLEAR_VMF 0x01000000
+#define AR_PCU_CLEAR_BA_VALID 0x04000000
+#define AR_PCU_ALWAYS_PERFORM_KEYSEARCH 0x10000000
+
+#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000
+#define AR_PCU_BT_ANT_PREVENT_RX_S 20
+
+#define AR_FILT_OFDM 0x8124
+#define AR_FILT_OFDM_COUNT 0x00FFFFFF
+
+#define AR_FILT_CCK 0x8128
+#define AR_FILT_CCK_COUNT 0x00FFFFFF
+
+#define AR_PHY_ERR_1 0x812c
+#define AR_PHY_ERR_1_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_1 0x8130
+
+#define AR_PHY_ERR_2 0x8134
+#define AR_PHY_ERR_2_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_2 0x8138
+
+#define AR_PHY_COUNTMAX (3 << 22)
+#define AR_MIBCNT_INTRMASK (3 << 22)
+
+#define AR_TSFOOR_THRESHOLD 0x813c
+#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
+
+#define AR_PHY_ERR_EIFS_MASK 0x8144
+
+#define AR_PHY_ERR_3 0x8168
+#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
+#define AR_PHY_ERR_MASK_3 0x816c
+
+#define AR_BT_COEX_MODE 0x8170
+#define AR_BT_TIME_EXTEND 0x000000ff
+#define AR_BT_TIME_EXTEND_S 0
+#define AR_BT_TXSTATE_EXTEND 0x00000100
+#define AR_BT_TXSTATE_EXTEND_S 8
+#define AR_BT_TX_FRAME_EXTEND 0x00000200
+#define AR_BT_TX_FRAME_EXTEND_S 9
+#define AR_BT_MODE 0x00000c00
+#define AR_BT_MODE_S 10
+#define AR_BT_QUIET 0x00001000
+#define AR_BT_QUIET_S 12
+#define AR_BT_QCU_THRESH 0x0001e000
+#define AR_BT_QCU_THRESH_S 13
+#define AR_BT_RX_CLEAR_POLARITY 0x00020000
+#define AR_BT_RX_CLEAR_POLARITY_S 17
+#define AR_BT_PRIORITY_TIME 0x00fc0000
+#define AR_BT_PRIORITY_TIME_S 18
+#define AR_BT_FIRST_SLOT_TIME 0xff000000
+#define AR_BT_FIRST_SLOT_TIME_S 24
+
+#define AR_BT_COEX_WEIGHT 0x8174
+#define AR_BT_COEX_WGHT 0xff55
+#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
+#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
+#define AR_STOMP_NONE_WLAN_WGHT 0x0000
+#define AR_BTCOEX_BT_WGHT 0x0000ffff
+#define AR_BTCOEX_BT_WGHT_S 0
+#define AR_BTCOEX_WL_WGHT 0xffff0000
+#define AR_BTCOEX_WL_WGHT_S 16
+
+#define AR_BT_COEX_WL_WEIGHTS0 0x8174
+#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
+
+#define AR9300_BT_WGHT 0xcccc4444
+
+#define AR_BT_COEX_MODE2 0x817c
+#define AR_BT_BCN_MISS_THRESH 0x000000ff
+#define AR_BT_BCN_MISS_THRESH_S 0
+#define AR_BT_BCN_MISS_CNT 0x0000ff00
+#define AR_BT_BCN_MISS_CNT_S 8
+#define AR_BT_HOLD_RX_CLEAR 0x00010000
+#define AR_BT_HOLD_RX_CLEAR_S 16
+#define AR_BT_DISABLE_BT_ANT 0x00100000
+#define AR_BT_DISABLE_BT_ANT_S 20
+
+#define AR_TXSIFS 0x81d0
+#define AR_TXSIFS_TIME 0x000000FF
+#define AR_TXSIFS_TX_LATENCY 0x00000F00
+#define AR_TXSIFS_TX_LATENCY_S 8
+#define AR_TXSIFS_ACK_SHIFT 0x00007000
+#define AR_TXSIFS_ACK_SHIFT_S 12
+
+#define AR_TXOP_X 0x81ec
+#define AR_TXOP_X_VAL 0x000000FF
+
+
+#define AR_TXOP_0_3 0x81f0
+#define AR_TXOP_4_7 0x81f4
+#define AR_TXOP_8_11 0x81f8
+#define AR_TXOP_12_15 0x81fc
+
+#define AR_NEXT_NDP2_TIMER 0x8180
+#define AR_GEN_TIMER_BANK_1_LEN 8
+#define AR_FIRST_NDP_TIMER 7
+#define AR_NDP2_PERIOD 0x81a0
+#define AR_NDP2_TIMER_MODE 0x81c0
+#define AR_GEN_TIMERS2_MODE_ENABLE_MASK 0x000000FF
+
+#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
+#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
+#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
+#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
+#define AR_NEXT_CFP AR_GEN_TIMERS(2)
+#define AR_NEXT_HCF AR_GEN_TIMERS(3)
+#define AR_NEXT_TIM AR_GEN_TIMERS(4)
+#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
+#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
+#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
+
+#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
+#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
+#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
+#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
+#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
+#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
+#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
+#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
+
+#define AR_TIMER_MODE 0x8240
+#define AR_TBTT_TIMER_EN 0x00000001
+#define AR_DBA_TIMER_EN 0x00000002
+#define AR_SWBA_TIMER_EN 0x00000004
+#define AR_HCF_TIMER_EN 0x00000008
+#define AR_TIM_TIMER_EN 0x00000010
+#define AR_DTIM_TIMER_EN 0x00000020
+#define AR_QUIET_TIMER_EN 0x00000040
+#define AR_NDP_TIMER_EN 0x00000080
+#define AR_TIMER_OVERFLOW_INDEX 0x00000700
+#define AR_TIMER_OVERFLOW_INDEX_S 8
+#define AR_TIMER_THRESH 0xFFFFF000
+#define AR_TIMER_THRESH_S 12
+
+#define AR_SLP32_MODE 0x8244
+#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF
+#define AR_SLP32_ENA 0x00100000
+#define AR_SLP32_TSF_WRITE_STATUS 0x00200000
+
+#define AR_SLP32_WAKE 0x8248
+#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF
+
+#define AR_SLP32_INC 0x824c
+#define AR_SLP32_TST_INC 0x000FFFFF
+
+#define AR_SLP_CNT 0x8250
+#define AR_SLP_CYCLE_CNT 0x8254
+
+#define AR_SLP_MIB_CTRL 0x8258
+#define AR_SLP_MIB_CLEAR 0x00000001
+#define AR_SLP_MIB_PENDING 0x00000002
+
+#define AR_MAC_PCU_LOGIC_ANALYZER 0x8264
+#define AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768 0x20000000
+
+
+#define AR_2040_MODE 0x8318
+#define AR_2040_JOINED_RX_CLEAR 0x00000001
+
+
+#define AR_EXTRCCNT 0x8328
+
+#define AR_SELFGEN_MASK 0x832c
+
+#define AR_PCU_TXBUF_CTRL 0x8340
+#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
+#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
+#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
+#define AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE 0x500
+
+#define AR_PCU_MISC_MODE2 0x8344
+#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002
+#define AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT 0x00000004
+
+#define AR_PCU_MISC_MODE2_RESERVED 0x00000038
+#define AR_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE 0x00000040
+#define AR_PCU_MISC_MODE2_CFP_IGNORE 0x00000080
+#define AR_PCU_MISC_MODE2_MGMT_QOS 0x0000FF00
+#define AR_PCU_MISC_MODE2_MGMT_QOS_S 8
+#define AR_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION 0x00010000
+#define AR_PCU_MISC_MODE2_ENABLE_AGGWEP 0x00020000
+#define AR_PCU_MISC_MODE2_HWWAR1 0x00100000
+#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000
+#define AR_PCU_MISC_MODE2_RESERVED2 0xFFFE0000
+
+#define AR_PCU_MISC_MODE3 0x83d0
+
+#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
+#define AR_MAC_PCU_GEN_TIMER_TSF_SEL 0x83d8
+
+#define AR_DIRECT_CONNECT 0x83a0
+#define AR_DC_AP_STA_EN 0x00000001
+#define AR_DC_TSF2_ENABLE 0x00000001
+
+#define AR_AES_MUTE_MASK0 0x805c
+#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
+#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
+#define AR_AES_MUTE_MASK0_QOS_S 16
+
+#define AR_AES_MUTE_MASK1 0x8060
+#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
+#define AR_AES_MUTE_MASK1_SEQ_S 0
+#define AR_AES_MUTE_MASK1_FC_MGMT 0xFFFF0000
+#define AR_AES_MUTE_MASK1_FC_MGMT_S 16
+
+#define AR_RATE_DURATION_0 0x8700
+#define AR_RATE_DURATION_31 0x87CC
+#define AR_RATE_DURATION_32 0x8780
+#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
+
+/* WoW - Wake On Wireless */
+
+#define AR_PMCTRL_AUX_PWR_DET 0x10000000 /* Puts Chip in L2 state */
+#define AR_PMCTRL_D3COLD_VAUX 0x00800000
+#define AR_PMCTRL_HOST_PME_EN 0x00400000 /* Send OOB WAKE_L on WoW
+ event */
+#define AR_PMCTRL_WOW_PME_CLR 0x00200000 /* Clear WoW event */
+#define AR_PMCTRL_PWR_STATE_MASK 0x0f000000 /* Power State Mask */
+#define AR_PMCTRL_PWR_STATE_D1D3 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D1D3_REAL 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D0 0x08000000 /* Activate D0 */
+#define AR_PMCTRL_PWR_PM_CTRL_ENA 0x00008000 /* Enable power mgmt */
+
+#define AR_WOW_BEACON_TIMO_MAX 0xffffffff
+
+#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
+#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
+
+#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
+#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
+ * based on both MAC Address and Key ID.
+ * If bit is 0, then Multicast search is
+ * based on MAC address only.
+ * For Merlin and above only.
+ */
+#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
+ * when it is enable, AGG_WEP would takes
+ * charge of the encryption interface of
+ * pcu_txsm.
+ */
+
+#define AR9300_SM_BASE 0xa200
+#define AR9002_PHY_AGC_CONTROL 0x9860
+#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
+#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
+#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
+#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
+#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
+#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
+#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
+#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
+#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
+#define AR_PHY_AGC_CONTROL_PKDET_CAL 0x00100000
+#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
+#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
new file mode 100644
index 000000000..955147ab4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_AIC_H
+#define REG_AIC_H
+
+#define AR_SM_BASE 0xa200
+#define AR_SM1_BASE 0xb200
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
+#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
+#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
+
+#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
+#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
+#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
+
+#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4)
+#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8)
+#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
+
+#define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4)
+
+#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
+#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
+
+#define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60)
+#define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64)
+
+/* AIC fields */
+#define AR_PHY_AIC_MON_ENABLE 0x80000000
+#define AR_PHY_AIC_MON_ENABLE_S 31
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000
+#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000
+#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17
+#define AR_PHY_AIC_F_WLAN 0x0001FC00
+#define AR_PHY_AIC_F_WLAN_S 10
+#define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200
+#define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9
+#define AR_PHY_AIC_CAL_ENABLE 0x00000100
+#define AR_PHY_AIC_CAL_ENABLE_S 8
+#define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE
+#define AR_PHY_AIC_BTTX_PWR_THR_S 1
+#define AR_PHY_AIC_ENABLE 0x00000001
+#define AR_PHY_AIC_ENABLE_S 0
+#define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000
+#define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20
+#define AR_PHY_AIC_BT_IDLE_CFG 0x00080000
+#define AR_PHY_AIC_BT_IDLE_CFG_S 19
+#define AR_PHY_AIC_STDBY_COND 0x00060000
+#define AR_PHY_AIC_STDBY_COND_S 17
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800
+#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11
+#define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700
+#define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8
+#define AR_PHY_AIC_RSSI_MAX 0x000000F0
+#define AR_PHY_AIC_RSSI_MAX_S 4
+#define AR_PHY_AIC_RSSI_MIN 0x0000000F
+#define AR_PHY_AIC_RSSI_MIN_S 0
+#define AR_PHY_AIC_RADIO_DELAY 0x7F000000
+#define AR_PHY_AIC_RADIO_DELAY_S 24
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000
+#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000
+#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000
+#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00
+#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200
+#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100
+#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF
+#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000
+#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000
+#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13
+#define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000
+#define AR_PHY_AIC_MON_PWR_EST_LONG_S 12
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00
+#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10
+#define AR_PHY_AIC_MON_PERF_THR 0x000003E0
+#define AR_PHY_AIC_MON_PERF_THR_S 5
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018
+#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006
+#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1
+#define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001
+#define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0
+#define AR_PHY_AIC_MON_DONE 0x80000000
+#define AR_PHY_AIC_MON_DONE_S 31
+#define AR_PHY_AIC_MON_ACTIVE 0x40000000
+#define AR_PHY_AIC_MON_ACTIVE_S 30
+#define AR_PHY_AIC_MEAS_COUNT 0x3F000000
+#define AR_PHY_AIC_MEAS_COUNT_S 24
+#define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000
+#define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18
+#define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800
+#define AR_PHY_AIC_CAL_HOP_COUNT_S 11
+#define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0
+#define AR_PHY_AIC_CAL_VALID_COUNT_S 4
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008
+#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004
+#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2
+#define AR_PHY_AIC_CAL_DONE 0x00000002
+#define AR_PHY_AIC_CAL_DONE_S 1
+#define AR_PHY_AIC_CAL_ACTIVE 0x00000001
+#define AR_PHY_AIC_CAL_ACTIVE_S 0
+
+#define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000
+#define AR_PHY_AIC_MEAS_MAG_MIN_S 22
+#define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000
+#define AR_PHY_AIC_MON_STALE_COUNT_S 15
+#define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00
+#define AR_PHY_AIC_MON_HOP_COUNT_S 8
+#define AR_PHY_AIC_CAL_AIC_SM 0x000000F8
+#define AR_PHY_AIC_CAL_AIC_SM_S 3
+#define AR_PHY_AIC_SM 0x00000007
+#define AR_PHY_AIC_SM_S 0
+#define AR_PHY_AIC_SRAM_VALID 0x00000001
+#define AR_PHY_AIC_SRAM_VALID_S 0
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E
+#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080
+#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00
+#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000
+#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000
+#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000
+#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00
+#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8
+#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006
+#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001
+#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0
+
+#endif /* REG_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/reg_mci.h b/drivers/net/wireless/ath/ath9k/reg_mci.h
new file mode 100644
index 000000000..625131070
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_mci.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_MCI_H
+#define REG_MCI_H
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/*
+ * 0 = no division,
+ * 1 = divide by 2,
+ * 2 = divide by 4,
+ * 3 = divide by 8
+ */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_PRIORITY 0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
+#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
+
+#define AR_MCI_MISC 0x1a74
+#define AR_MCI_MISC_HW_FIX_EN 0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S 0
+
+#define AR_MCI_DBG_CNT_CTRL 0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID 0x000007f8
+#define AR_MCI_DBG_CNT_CTRL_BT_LINKID_S 3
+
+#define MCI_STAT_ALL_BT_LINKID 0xffff
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#endif /* REG_MCI_H */
diff --git a/drivers/net/wireless/ath/ath9k/reg_wow.h b/drivers/net/wireless/ath/ath9k/reg_wow.h
new file mode 100644
index 000000000..453054078
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_wow.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REG_WOW_H
+#define REG_WOW_H
+
+#define AR_WOW_PATTERN 0x825C
+#define AR_WOW_COUNT 0x8260
+#define AR_WOW_BCN_EN 0x8270
+#define AR_WOW_BCN_TIMO 0x8274
+#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
+#define AR_WOW_KEEP_ALIVE 0x827c
+#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
+#define AR_WOW_PATTERN_MATCH 0x828c
+
+/*
+ * AR_WOW_LENGTH1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * AR_WOW_LENGTH3
+ * bit 31:24 pattern 8 length
+ * bit 23:16 pattern 9 length
+ * bit 15:8 pattern 10 length
+ * bit 7:0 pattern 11 length
+ *
+ * AR_WOW_LENGTH4
+ * bit 31:24 pattern 12 length
+ * bit 23:16 pattern 13 length
+ * bit 15:8 pattern 14 length
+ * bit 7:0 pattern 15 length
+ */
+#define AR_WOW_LENGTH1 0x8360
+#define AR_WOW_LENGTH2 0X8364
+#define AR_WOW_LENGTH3 0X8380
+#define AR_WOW_LENGTH4 0X8384
+
+#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
+#define AR_MAC_PCU_WOW4 0x8370
+
+#define AR_SW_WOW_CONTROL 0x20018
+#define AR_SW_WOW_ENABLE 0x1
+#define AR_SWITCH_TO_REFCLK 0x2
+#define AR_RESET_CONTROL 0x4
+#define AR_RESET_VALUE_MASK 0x8
+#define AR_HW_WOW_DISABLE 0x10
+#define AR_CLR_MAC_INTERRUPT 0x20
+#define AR_CLR_KA_INTERRUPT 0x40
+
+#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 27) /* in usecs */
+#define AR_WOW_MAC_INTR_EN 0x00040000
+#define AR_WOW_MAGIC_EN 0x00010000
+#define AR_WOW_PATTERN_EN(x) (x & 0xff)
+#define AR_WOW_PAT_FOUND_SHIFT 8
+#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
+#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
+#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
+#define AR_WOW_MAC_INTR 0x00080000
+#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
+#define AR_WOW_BEACON_FAIL 0x00200000
+
+#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
+ AR_WOW_MAGIC_PAT_FOUND | \
+ AR_WOW_KEEP_ALIVE_FAIL | \
+ AR_WOW_BEACON_FAIL))
+#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
+ AR_WOW_MAGIC_EN | \
+ AR_WOW_MAC_INTR_EN | \
+ AR_WOW_BEACON_FAIL | \
+ AR_WOW_KEEP_ALIVE_FAIL))
+
+#define AR_WOW2_PATTERN_EN(x) ((x & 0xff) << 0)
+#define AR_WOW2_PATTERN_FOUND_SHIFT 8
+#define AR_WOW2_PATTERN_FOUND(x) (x & (0xff << AR_WOW2_PATTERN_FOUND_SHIFT))
+#define AR_WOW2_PATTERN_FOUND_MASK ((0xff) << AR_WOW2_PATTERN_FOUND_SHIFT)
+
+#define AR_WOW_STATUS2(x) (x & AR_WOW2_PATTERN_FOUND_MASK)
+#define AR_WOW_CLEAR_EVENTS2(x) (x & ~(AR_WOW2_PATTERN_EN(0xff)))
+
+#define AR_WOW_AIFS_CNT(x) (x & 0xff)
+#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
+#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
+
+#define AR_WOW_BEACON_FAIL_EN 0x00000001
+#define AR_WOW_BEACON_TIMO 0x40000000
+#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
+#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
+#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
+#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
+#define AR_WOW_BMISSTHRESHOLD 0x20
+#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
+#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
+#define AR_WOW_PAT_BACKOFF 0x00000004
+#define AR_WOW_CNT_AIFS_CNT 0x00000022
+#define AR_WOW_CNT_SLOT_CNT 0x00000009
+#define AR_WOW_CNT_KA_CNT 0x00000008
+
+#define AR_WOW_TRANSMIT_BUFFER 0xe000
+#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
+#define AR_WOW_KA_DESC_WORD2 0xe000
+#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
+#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
+#define AR_WOW_PATTERN_SUPPORTED_LEGACY 0xff
+#define AR_WOW_PATTERN_SUPPORTED 0xffff
+#define AR_WOW_LENGTH_MAX 0xff
+#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
+#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
+#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
+#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
+#define AR_WOW_LEN3_SHIFT(_i) ((0xb - ((_i) & 0xb)) << 0x3)
+#define AR_WOW_LENGTH3_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN3_SHIFT(_i))
+#define AR_WOW_LEN4_SHIFT(_i) ((0xf - ((_i) & 0xf)) << 0x3)
+#define AR_WOW_LENGTH4_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN4_SHIFT(_i))
+
+#endif /* REG_WOW_H */
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
new file mode 100644
index 000000000..ac4781f37
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_drain_all_txq(sc);
+ ath_startrecv(sc);
+
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ ieee80211_wake_queues(sc->hw);
+
+ kfree_skb(sc->tx99_skb);
+ sc->tx99_skb = NULL;
+ sc->tx99_state = false;
+
+ ath9k_hw_tx99_stop(sc->sc_ah);
+ ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+ static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+ 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+ 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+ 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+ 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+ 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+ 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+ 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+ u32 len = 1200;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb;
+ struct ath_vif *avp;
+
+ if (!sc->tx99_vif)
+ return NULL;
+
+ avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, len);
+
+ memset(skb->data, 0, len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+ hdr->duration_id = 0;
+
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ memset(tx_info, 0, sizeof(*tx_info));
+ rate = &tx_info->control.rates[0];
+ tx_info->band = sc->cur_chan->chandef.chan->band;
+ tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.vif = sc->tx99_vif;
+ rate->count = 1;
+ if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ if (IS_CHAN_HT40(ah->curchan))
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ }
+
+ memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+ return skb;
+}
+
+static void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+ ath_reset(sc, NULL);
+
+ ath9k_ps_wakeup(sc);
+ ath9k_tx99_stop(sc);
+ ath9k_ps_restore(sc);
+}
+
+static int ath9k_tx99_init(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int r;
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_err(common,
+ "driver is in invalid state unable to use TX99");
+ return -EINVAL;
+ }
+
+ sc->tx99_skb = ath9k_build_tx99_skb(sc);
+ if (!sc->tx99_skb)
+ return -ENOMEM;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+ ath_reset(sc, NULL);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_hw_disable_interrupts(ah);
+ atomic_set(&ah->intr_ref_cnt, -1);
+ ath_drain_all_txq(sc);
+ ath_stoprecv(sc);
+
+ sc->tx99_state = true;
+
+ ieee80211_stop_queues(hw);
+
+ if (sc->tx99_power == MAX_RATE_POWER + 1)
+ sc->tx99_power = MAX_RATE_POWER;
+
+ ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+ r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+ if (r) {
+ ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+ return r;
+ }
+
+ ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ /* We leave the harware awake as it will be chugging on */
+
+ return 0;
+}
+
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[3];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->tx99_state);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ bool start;
+ ssize_t len;
+ int r;
+
+ if (sc->cur_chan->nvifs > 1)
+ return -EOPNOTSUPP;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (strtobool(buf, &start))
+ return -EINVAL;
+
+ if (start == sc->tx99_state) {
+ if (!start)
+ return count;
+ ath_dbg(common, XMIT, "Resetting TX99\n");
+ ath9k_tx99_deinit(sc);
+ }
+
+ if (!start) {
+ ath9k_tx99_deinit(sc);
+ return count;
+ }
+
+ r = ath9k_tx99_init(sc);
+ if (r)
+ return r;
+
+ return count;
+}
+
+static const struct file_operations fops_tx99 = {
+ .read = read_file_tx99,
+ .write = write_file_tx99,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d (%d dBm)\n",
+ sc->tx99_power,
+ sc->tx99_power / 2);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ int r;
+ u8 tx_power;
+
+ r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+ if (r)
+ return r;
+
+ if (tx_power > MAX_RATE_POWER)
+ return -EINVAL;
+
+ sc->tx99_power = tx_power;
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+ ath9k_ps_restore(sc);
+
+ return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+ .read = read_file_tx99_power,
+ .write = write_file_tx99_power,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_tx99_init_debug(struct ath_softc *sc)
+{
+ if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+ return;
+
+ debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99);
+ debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_tx99_power);
+}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 000000000..ca533b432
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
+{
+ switch (wmi_cmd) {
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMDID";
+ case WMI_ACCESS_MEMORY_CMDID:
+ return "WMI_ACCESS_MEMORY_CMDID";
+ case WMI_GET_FW_VERSION:
+ return "WMI_GET_FW_VERSION";
+ case WMI_DISABLE_INTR_CMDID:
+ return "WMI_DISABLE_INTR_CMDID";
+ case WMI_ENABLE_INTR_CMDID:
+ return "WMI_ENABLE_INTR_CMDID";
+ case WMI_ATH_INIT_CMDID:
+ return "WMI_ATH_INIT_CMDID";
+ case WMI_ABORT_TXQ_CMDID:
+ return "WMI_ABORT_TXQ_CMDID";
+ case WMI_STOP_TX_DMA_CMDID:
+ return "WMI_STOP_TX_DMA_CMDID";
+ case WMI_ABORT_TX_DMA_CMDID:
+ return "WMI_ABORT_TX_DMA_CMDID";
+ case WMI_DRAIN_TXQ_CMDID:
+ return "WMI_DRAIN_TXQ_CMDID";
+ case WMI_DRAIN_TXQ_ALL_CMDID:
+ return "WMI_DRAIN_TXQ_ALL_CMDID";
+ case WMI_START_RECV_CMDID:
+ return "WMI_START_RECV_CMDID";
+ case WMI_STOP_RECV_CMDID:
+ return "WMI_STOP_RECV_CMDID";
+ case WMI_FLUSH_RECV_CMDID:
+ return "WMI_FLUSH_RECV_CMDID";
+ case WMI_SET_MODE_CMDID:
+ return "WMI_SET_MODE_CMDID";
+ case WMI_NODE_CREATE_CMDID:
+ return "WMI_NODE_CREATE_CMDID";
+ case WMI_NODE_REMOVE_CMDID:
+ return "WMI_NODE_REMOVE_CMDID";
+ case WMI_VAP_REMOVE_CMDID:
+ return "WMI_VAP_REMOVE_CMDID";
+ case WMI_VAP_CREATE_CMDID:
+ return "WMI_VAP_CREATE_CMDID";
+ case WMI_REG_READ_CMDID:
+ return "WMI_REG_READ_CMDID";
+ case WMI_REG_WRITE_CMDID:
+ return "WMI_REG_WRITE_CMDID";
+ case WMI_REG_RMW_CMDID:
+ return "WMI_REG_RMW_CMDID";
+ case WMI_RC_STATE_CHANGE_CMDID:
+ return "WMI_RC_STATE_CHANGE_CMDID";
+ case WMI_RC_RATE_UPDATE_CMDID:
+ return "WMI_RC_RATE_UPDATE_CMDID";
+ case WMI_TARGET_IC_UPDATE_CMDID:
+ return "WMI_TARGET_IC_UPDATE_CMDID";
+ case WMI_TX_AGGR_ENABLE_CMDID:
+ return "WMI_TX_AGGR_ENABLE_CMDID";
+ case WMI_TGT_DETACH_CMDID:
+ return "WMI_TGT_DETACH_CMDID";
+ case WMI_NODE_UPDATE_CMDID:
+ return "WMI_NODE_UPDATE_CMDID";
+ case WMI_INT_STATS_CMDID:
+ return "WMI_INT_STATS_CMDID";
+ case WMI_TX_STATS_CMDID:
+ return "WMI_TX_STATS_CMDID";
+ case WMI_RX_STATS_CMDID:
+ return "WMI_RX_STATS_CMDID";
+ case WMI_BITRATE_MASK_CMDID:
+ return "WMI_BITRATE_MASK_CMDID";
+ }
+
+ return "Bogus";
+}
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ wmi->drv_priv = priv;
+ wmi->stopped = false;
+ skb_queue_head_init(&wmi->wmi_event_queue);
+ spin_lock_init(&wmi->wmi_lock);
+ spin_lock_init(&wmi->event_lock);
+ mutex_init(&wmi->op_mutex);
+ mutex_init(&wmi->multi_write_mutex);
+ mutex_init(&wmi->multi_rmw_mutex);
+ init_completion(&wmi->cmd_wait);
+ INIT_LIST_HEAD(&wmi->pending_tx_events);
+ tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
+ (unsigned long)wmi);
+
+ return wmi;
+}
+
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi = priv->wmi;
+
+ mutex_lock(&wmi->op_mutex);
+ wmi->stopped = true;
+ mutex_unlock(&wmi->op_mutex);
+
+ kfree(priv->wmi);
+}
+
+void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
+{
+ unsigned long flags;
+
+ tasklet_kill(&priv->wmi->wmi_event_tasklet);
+ spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
+ __skb_queue_purge(&priv->wmi->wmi_event_queue);
+ spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
+}
+
+void ath9k_wmi_event_tasklet(unsigned long data)
+{
+ struct wmi *wmi = (struct wmi *)data;
+ struct ath9k_htc_priv *priv = wmi->drv_priv;
+ struct wmi_cmd_hdr *hdr;
+ void *wmi_event;
+ struct wmi_event_swba *swba;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ u16 cmd_id;
+
+ do {
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ skb = __skb_dequeue(&wmi->wmi_event_queue);
+ if (!skb) {
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+ wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ switch (cmd_id) {
+ case WMI_SWBA_EVENTID:
+ swba = (struct wmi_event_swba *) wmi_event;
+ ath9k_htc_swba(priv, swba);
+ break;
+ case WMI_FATAL_EVENTID:
+ ieee80211_queue_work(wmi->drv_priv->hw,
+ &wmi->drv_priv->fatal_work);
+ break;
+ case WMI_TXSTATUS_EVENTID:
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
+ spin_unlock_bh(&priv->tx.tx_lock);
+ break;
+ }
+ spin_unlock_bh(&priv->tx.tx_lock);
+
+ ath9k_htc_txstatus(priv, wmi_event);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+ } while (1);
+}
+
+void ath9k_fatal_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ fatal_work);
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
+ ath9k_htc_reset(priv);
+}
+
+static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
+ memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
+
+ complete(&wmi->cmd_wait);
+}
+
+static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid)
+{
+ struct wmi *wmi = (struct wmi *) priv;
+ struct wmi_cmd_hdr *hdr;
+ u16 cmd_id;
+
+ if (unlikely(wmi->stopped))
+ goto free_skb;
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+
+ if (cmd_id & 0x1000) {
+ spin_lock(&wmi->wmi_lock);
+ __skb_queue_tail(&wmi->wmi_event_queue, skb);
+ spin_unlock(&wmi->wmi_lock);
+ tasklet_schedule(&wmi->wmi_event_tasklet);
+ return;
+ }
+
+ /* Check if there has been a timeout. */
+ spin_lock(&wmi->wmi_lock);
+ if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
+ spin_unlock(&wmi->wmi_lock);
+ goto free_skb;
+ }
+ spin_unlock(&wmi->wmi_lock);
+
+ /* WMI command response */
+ ath9k_wmi_rsp_callback(wmi, skb);
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid, bool txok)
+{
+ kfree_skb(skb);
+}
+
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid)
+{
+ struct htc_service_connreq connect;
+ int ret;
+
+ wmi->htc = htc;
+
+ memset(&connect, 0, sizeof(connect));
+
+ connect.ep_callbacks.priv = wmi;
+ connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
+ connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
+ connect.service_id = WMI_CONTROL_SVC;
+
+ ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
+ if (ret)
+ return ret;
+
+ *wmi_ctrl_epid = wmi->ctrl_epid;
+
+ return 0;
+}
+
+static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ struct sk_buff *skb,
+ enum wmi_cmd_id cmd, u16 len)
+{
+ struct wmi_cmd_hdr *hdr;
+ unsigned long flags;
+
+ hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
+ hdr->command_id = cpu_to_be16(cmd);
+ hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_seq_id = wmi->tx_seq_id;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
+ return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
+}
+
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout)
+{
+ struct ath_hw *ah = wmi->drv_priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 headroom = sizeof(struct htc_frame_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+ struct sk_buff *skb;
+ u8 *data;
+ int time_left, ret = 0;
+
+ if (ah->ah_flags & AH_UNPLUGGED)
+ return 0;
+
+ skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+
+ if (cmd_len != 0 && cmd_buf != NULL) {
+ data = (u8 *) skb_put(skb, cmd_len);
+ memcpy(data, cmd_buf, cmd_len);
+ }
+
+ mutex_lock(&wmi->op_mutex);
+
+ /* check if wmi stopped flag is set */
+ if (unlikely(wmi->stopped)) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ /* record the rsp buffer and length */
+ wmi->cmd_rsp_buf = rsp_buf;
+ wmi->cmd_rsp_len = rsp_len;
+
+ ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
+ if (ret)
+ goto out;
+
+ time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
+ if (!time_left) {
+ ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&wmi->op_mutex);
+
+ return 0;
+
+out:
+ ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 000000000..380175d5e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+struct wmi_event_txrate {
+ __be32 txrate;
+ struct {
+ u8 rssi_thresh;
+ u8 per;
+ } rc_stats;
+} __packed;
+
+struct wmi_cmd_hdr {
+ __be16 command_id;
+ __be16 seq_no;
+} __packed;
+
+struct wmi_fw_version {
+ __be16 major;
+ __be16 minor;
+
+} __packed;
+
+struct wmi_event_swba {
+ __be64 tsf;
+ u8 beacon_pending;
+} __packed;
+
+/*
+ * 64 - HTC header - WMI header - 1 / txstatus
+ * And some other hdr. space is also accounted for.
+ * 12 seems to be the magic number.
+ */
+#define HTC_MAX_TX_STATUS 12
+
+#define ATH9K_HTC_TXSTAT_ACK BIT(0)
+#define ATH9K_HTC_TXSTAT_FILT BIT(1)
+#define ATH9K_HTC_TXSTAT_RTC_CTS BIT(2)
+#define ATH9K_HTC_TXSTAT_MCS BIT(3)
+#define ATH9K_HTC_TXSTAT_CW40 BIT(4)
+#define ATH9K_HTC_TXSTAT_SGI BIT(5)
+
+/*
+ * Legacy rates are indicated as indices.
+ * HT rates are indicated as dot11 numbers.
+ * This allows us to resrict the rate field
+ * to 4 bits.
+ */
+#define ATH9K_HTC_TXSTAT_RATE 0x0f
+#define ATH9K_HTC_TXSTAT_RATE_S 0
+
+#define ATH9K_HTC_TXSTAT_EPID 0xf0
+#define ATH9K_HTC_TXSTAT_EPID_S 4
+
+struct __wmi_event_txstatus {
+ u8 cookie;
+ u8 ts_rate; /* Also holds EP ID */
+ u8 ts_flags;
+};
+
+struct wmi_event_txstatus {
+ u8 cnt;
+ struct __wmi_event_txstatus txstatus[HTC_MAX_TX_STATUS];
+} __packed;
+
+enum wmi_cmd_id {
+ WMI_ECHO_CMDID = 0x0001,
+ WMI_ACCESS_MEMORY_CMDID,
+
+ /* Commands to Target */
+ WMI_GET_FW_VERSION,
+ WMI_DISABLE_INTR_CMDID,
+ WMI_ENABLE_INTR_CMDID,
+ WMI_ATH_INIT_CMDID,
+ WMI_ABORT_TXQ_CMDID,
+ WMI_STOP_TX_DMA_CMDID,
+ WMI_ABORT_TX_DMA_CMDID,
+ WMI_DRAIN_TXQ_CMDID,
+ WMI_DRAIN_TXQ_ALL_CMDID,
+ WMI_START_RECV_CMDID,
+ WMI_STOP_RECV_CMDID,
+ WMI_FLUSH_RECV_CMDID,
+ WMI_SET_MODE_CMDID,
+ WMI_NODE_CREATE_CMDID,
+ WMI_NODE_REMOVE_CMDID,
+ WMI_VAP_REMOVE_CMDID,
+ WMI_VAP_CREATE_CMDID,
+ WMI_REG_READ_CMDID,
+ WMI_REG_WRITE_CMDID,
+ WMI_RC_STATE_CHANGE_CMDID,
+ WMI_RC_RATE_UPDATE_CMDID,
+ WMI_TARGET_IC_UPDATE_CMDID,
+ WMI_TX_AGGR_ENABLE_CMDID,
+ WMI_TGT_DETACH_CMDID,
+ WMI_NODE_UPDATE_CMDID,
+ WMI_INT_STATS_CMDID,
+ WMI_TX_STATS_CMDID,
+ WMI_RX_STATS_CMDID,
+ WMI_BITRATE_MASK_CMDID,
+ WMI_REG_RMW_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_TGT_RDY_EVENTID = 0x1001,
+ WMI_SWBA_EVENTID,
+ WMI_FATAL_EVENTID,
+ WMI_TXTO_EVENTID,
+ WMI_BMISS_EVENTID,
+ WMI_DELBA_EVENTID,
+ WMI_TXSTATUS_EVENTID,
+};
+
+#define MAX_CMD_NUMBER 62
+#define MAX_RMW_CMD_NUMBER 15
+
+struct register_write {
+ __be32 reg;
+ __be32 val;
+};
+
+struct register_rmw {
+ __be32 reg;
+ __be32 set;
+ __be32 clr;
+} __packed;
+
+struct ath9k_htc_tx_event {
+ int count;
+ struct __wmi_event_txstatus txs;
+ struct list_head list;
+};
+
+struct wmi {
+ struct ath9k_htc_priv *drv_priv;
+ struct htc_target *htc;
+ enum htc_endpoint_id ctrl_epid;
+ struct mutex op_mutex;
+ struct completion cmd_wait;
+ u16 last_seq_id;
+ struct sk_buff_head wmi_event_queue;
+ struct tasklet_struct wmi_event_tasklet;
+ u16 tx_seq_id;
+ u8 *cmd_rsp_buf;
+ u32 cmd_rsp_len;
+ bool stopped;
+
+ struct list_head pending_tx_events;
+ spinlock_t event_lock;
+
+ spinlock_t wmi_lock;
+
+ /* multi write section */
+ atomic_t mwrite_cnt;
+ struct register_write multi_write[MAX_CMD_NUMBER];
+ u32 multi_write_idx;
+ struct mutex multi_write_mutex;
+
+ /* multi rmw section */
+ atomic_t m_rmw_cnt;
+ struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
+ u32 multi_rmw_idx;
+ struct mutex multi_rmw_mutex;
+
+};
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid);
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout);
+void ath9k_wmi_event_tasklet(unsigned long data);
+void ath9k_fatal_work(struct work_struct *work);
+void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
+
+#define WMI_CMD(_wmi_cmd) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
+ (u8 *) &cmd_rsp, \
+ sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#define WMI_CMD_BUF(_wmi_cmd, _buf) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
+ (u8 *) _buf, sizeof(*_buf), \
+ &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
new file mode 100644
index 000000000..8d0b1730a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static const struct wiphy_wowlan_support ath9k_wowlan_support_legacy = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_USER_PATTERN,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
+
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = MAX_NUM_PATTERN - 2,
+ .pattern_min_len = 1,
+ .pattern_max_len = MAX_PATTERN_SIZE,
+};
+
+static u8 ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
+{
+ u8 wow_triggers = 0;
+
+ if (wowlan->disconnect)
+ wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
+ if (wowlan->magic_pkt)
+ wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
+
+ if (wowlan->n_patterns)
+ wow_triggers |= AH_WOW_USER_PATTERN_EN;
+
+ return wow_triggers;
+}
+
+static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int pattern_count = 0;
+ int ret, i, byte_cnt = 0;
+ u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+ u8 dis_deauth_mask[MAX_PATTERN_SIZE];
+
+ memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+ memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
+
+ /*
+ * Create Dissassociate / Deauthenticate packet filter
+ *
+ * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
+ * +--------------+----------+---------+--------+--------+----
+ * + Frame Control+ Duration + DA + SA + BSSID +
+ * +--------------+----------+---------+--------+--------+----
+ *
+ * The above is the management frame format for disassociate/
+ * deauthenticate pattern, from this we need to match the first byte
+ * of 'Frame Control' and DA, SA, and BSSID fields
+ * (skipping 2nd byte of FC and Duration feild.
+ *
+ * Disassociate pattern
+ * --------------------
+ * Frame control = 00 00 1010
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ *
+ * Deauthenticate pattern
+ * ----------------------
+ * Frame control = 00 00 1100
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ */
+
+ /* Fill out the mask with all FF's */
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+ dis_deauth_mask[i] = 0xff;
+
+ /* copy the first byte of frame control field */
+ dis_deauth_pattern[byte_cnt] = 0xa0;
+ byte_cnt++;
+
+ /* skip 2nd byte of frame control and Duration field */
+ byte_cnt += 3;
+
+ /*
+ * need not match the destination mac address, it can be a broadcast
+ * mac address or an unicast to this station
+ */
+ byte_cnt += 6;
+
+ /* copy the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ byte_cnt += 6;
+
+ /* copy the bssid, its same as the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ /* Create Disassociate pattern mask */
+ dis_deauth_mask[0] = 0xfe;
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
+
+ ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+ if (ret)
+ goto exit;
+
+ pattern_count++;
+ /*
+ * for de-authenticate pattern, only the first byte of the frame
+ * control field gets changed from 0xA0 to 0xC0
+ */
+ dis_deauth_pattern[0] = 0xC0;
+
+ ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+exit:
+ return ret;
+}
+
+static int ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ u8 wow_pattern[MAX_PATTERN_SIZE];
+ u8 wow_mask[MAX_PATTERN_SIZE];
+ int mask_len, ret = 0;
+ s8 i = 0;
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ mask_len = DIV_ROUND_UP(patterns[i].pattern_len, 8);
+ memset(wow_pattern, 0, MAX_PATTERN_SIZE);
+ memset(wow_mask, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern, patterns[i].pattern, patterns[i].pattern_len);
+ memcpy(wow_mask, patterns[i].mask, mask_len);
+
+ ret = ath9k_hw_wow_apply_pattern(ah,
+ wow_pattern,
+ wow_mask,
+ i + 2,
+ patterns[i].pattern_len);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 triggers;
+ int ret = 0;
+
+ ath9k_deinit_channel_context(sc);
+
+ mutex_lock(&sc->mutex);
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ ath_err(common, "Device not present\n");
+ ret = -ENODEV;
+ goto fail_wow;
+ }
+
+ if (WARN_ON(!wowlan)) {
+ ath_err(common, "None of the WoW triggers enabled\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
+
+ if (sc->cur_chan->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ if (ath9k_is_chanctx_enabled()) {
+ if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
+ ath_dbg(common, WOW,
+ "Multi-channel WOW is not supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+ }
+
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+ ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ triggers = ath9k_wow_map_triggers(sc, wowlan);
+ if (!triggers) {
+ ath_dbg(common, WOW, "No valid WoW triggers\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ ath_cancel_work(sc);
+ ath_stop_ani(sc);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_stop_btcoex(sc);
+
+ /*
+ * Enable wake up on recieving disassoc/deauth
+ * frame by default.
+ */
+ ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
+ if (ret) {
+ ath_err(common,
+ "Unable to add disassoc/deauth pattern: %d\n", ret);
+ goto fail_wow;
+ }
+
+ if (triggers & AH_WOW_USER_PATTERN_EN) {
+ ret = ath9k_wow_add_pattern(sc, wowlan);
+ if (ret) {
+ ath_err(common,
+ "Unable to add user pattern: %d\n", ret);
+ goto fail_wow;
+ }
+ }
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ /*
+ * To avoid false wake, we enable beacon miss interrupt only
+ * when we go to sleep. We save the current interrupt mask
+ * so we can restore it after the system wakes up
+ */
+ sc->wow_intr_before_sleep = ah->imask;
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /*
+ * we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock
+ */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+
+ ath9k_hw_wow_enable(ah, triggers);
+
+ ath9k_ps_restore(sc);
+ ath_dbg(common, WOW, "Suspend with WoW triggers: 0x%x\n", triggers);
+
+ set_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
+fail_wow:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
+
+int ath9k_resume(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 status;
+
+ mutex_lock(&sc->mutex);
+
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = sc->wow_intr_before_sleep;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ status = ath9k_hw_wow_wakeup(ah);
+ ath_dbg(common, WOW, "Resume with WoW status: 0x%x\n", status);
+
+ ath_restart_work(sc);
+ ath9k_start_btcoex(sc);
+
+ clear_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
+
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+
+ return 0;
+}
+
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ mutex_lock(&sc->mutex);
+ device_set_wakeup_enable(sc->dev, enabled);
+ mutex_unlock(&sc->mutex);
+
+ ath_dbg(common, WOW, "WoW wakeup source is %s\n",
+ (enabled) ? "enabled" : "disabled");
+}
+
+void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow) {
+ if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
+ hw->wiphy->wowlan = &ath9k_wowlan_support;
+ else
+ hw->wiphy->wowlan = &ath9k_wowlan_support_legacy;
+
+ device_init_wakeup(sc->dev, 1);
+ }
+}
+
+void ath9k_deinit_wow(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow)
+ device_init_wakeup(sc->dev, 0);
+}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
new file mode 100644
index 000000000..3ad79bb4f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -0,0 +1,2978 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "ath9k.h"
+#include "ar9003_mac.h"
+
+#define BITS_PER_BYTE 8
+#define OFDM_PLCP_BITS 22
+#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
+#define L_STF 8
+#define L_LTF 8
+#define L_SIG 4
+#define HT_SIG 8
+#define HT_STF 4
+#define HT_LTF(_ns) (4 * (_ns))
+#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
+#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
+#define TIME_SYMBOLS(t) ((t) >> 2)
+#define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18)
+#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
+#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
+
+
+static u16 bits_per_symbol[][2] = {
+ /* 20MHz 40MHz */
+ { 26, 54 }, /* 0: BPSK */
+ { 52, 108 }, /* 1: QPSK 1/2 */
+ { 78, 162 }, /* 2: QPSK 3/4 */
+ { 104, 216 }, /* 3: 16-QAM 1/2 */
+ { 156, 324 }, /* 4: 16-QAM 3/4 */
+ { 208, 432 }, /* 5: 64-QAM 2/3 */
+ { 234, 486 }, /* 6: 64-QAM 3/4 */
+ { 260, 540 }, /* 7: 64-QAM 5/6 */
+};
+
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct sk_buff *skb);
+static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ int tx_flags, struct ath_txq *txq);
+static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok);
+static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
+ struct list_head *head, bool internal);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok);
+static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ int seqno);
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct sk_buff *skb);
+
+enum {
+ MCS_HT20,
+ MCS_HT20_SGI,
+ MCS_HT40,
+ MCS_HT40_SGI,
+};
+
+/*********************/
+/* Aggregation logic */
+/*********************/
+
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+ __acquires(&txq->axq_lock)
+{
+ spin_lock_bh(&txq->axq_lock);
+}
+
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&q);
+ skb_queue_splice_init(&txq->complete_q, &q);
+ spin_unlock_bh(&txq->axq_lock);
+
+ while ((skb = __skb_dequeue(&q)))
+ ieee80211_tx_status(sc->hw, skb);
+}
+
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
+{
+ struct ath_atx_ac *ac = tid->ac;
+ struct list_head *list;
+ struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+ struct ath_chanctx *ctx = avp->chanctx;
+
+ if (!ctx)
+ return;
+
+ if (tid->sched)
+ return;
+
+ tid->sched = true;
+ list_add_tail(&tid->list, &ac->tid_q);
+
+ if (ac->sched)
+ return;
+
+ ac->sched = true;
+
+ list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+ list_add_tail(&ac->list, list);
+}
+
+static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ BUILD_BUG_ON(sizeof(struct ath_frame_info) >
+ sizeof(tx_info->rate_driver_data));
+ return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+}
+
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+ if (!tid->an->sta)
+ return;
+
+ ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+ seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
+static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ath_buf *bf)
+{
+ ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
+ ARRAY_SIZE(bf->rates));
+}
+
+static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ int q = fi->txq;
+
+ if (q < 0)
+ return;
+
+ txq = sc->tx.txq_map[q];
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
+
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
+ if (ath9k_is_chanctx_enabled())
+ ieee80211_wake_queue(sc->hw, info->hw_queue);
+ else
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = false;
+ }
+}
+
+static struct ath_atx_tid *
+ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+{
+ u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ return ATH_AN_2_TID(an, tidno);
+}
+
+static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
+{
+ return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
+}
+
+static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&tid->retry_q);
+ if (!skb)
+ skb = __skb_dequeue(&tid->buf_q);
+
+ return skb;
+}
+
+/*
+ * ath_tx_tid_change_state:
+ * - clears a-mpdu flag of previous session
+ * - force sequence number allocation to fix next BlockAck Window
+ */
+static void
+ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = tid->ac->txq;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb, *tskb;
+ struct ath_buf *bf;
+ struct ath_frame_info *fi;
+
+ skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ if (bf)
+ continue;
+
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ if (!bf) {
+ __skb_unlink(skb, &tid->buf_q);
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
+ }
+ }
+
+}
+
+static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = tid->ac->txq;
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ struct list_head bf_head;
+ struct ath_tx_status ts;
+ struct ath_frame_info *fi;
+ bool sendbar = false;
+
+ INIT_LIST_HEAD(&bf_head);
+
+ memset(&ts, 0, sizeof(ts));
+
+ while ((skb = __skb_dequeue(&tid->retry_q))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
+ }
+
+ if (fi->baw_tracked) {
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ sendbar = true;
+ }
+
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ }
+
+ if (sendbar) {
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, tid->seq_start);
+ ath_txq_lock(sc, txq);
+ }
+}
+
+static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ int seqno)
+{
+ int index, cindex;
+
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+
+ __clear_bit(cindex, tid->tx_buf);
+
+ while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+ INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ if (tid->bar_index >= 0)
+ tid->bar_index--;
+ }
+}
+
+static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ struct ath_buf *bf)
+{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
+ int index, cindex;
+
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
+ __set_bit(cindex, tid->tx_buf);
+ fi->baw_tracked = 1;
+
+ if (index >= ((tid->baw_tail - tid->baw_head) &
+ (ATH_TID_MAX_BUFS - 1))) {
+ tid->baw_tail = cindex;
+ INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
+ }
+}
+
+static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
+
+{
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ struct list_head bf_head;
+ struct ath_tx_status ts;
+ struct ath_frame_info *fi;
+
+ memset(&ts, 0, sizeof(ts));
+ INIT_LIST_HEAD(&bf_head);
+
+ while ((skb = ath_tid_dequeue(tid))) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+ if (!bf) {
+ ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+ continue;
+ }
+
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ }
+}
+
+static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
+ struct sk_buff *skb, int count)
+{
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_buf *bf = fi->bf;
+ struct ieee80211_hdr *hdr;
+ int prev = fi->retries;
+
+ TX_STAT_INC(txq->axq_qnum, a_retries);
+ fi->retries += count;
+
+ if (prev > 0)
+ return;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+}
+
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
+{
+ struct ath_buf *bf = NULL;
+
+ spin_lock_bh(&sc->tx.txbuflock);
+
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
+ spin_unlock_bh(&sc->tx.txbuflock);
+ return NULL;
+ }
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+
+ spin_unlock_bh(&sc->tx.txbuflock);
+
+ return bf;
+}
+
+static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
+{
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
+}
+
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_buf *tbf;
+
+ tbf = ath_tx_get_buffer(sc);
+ if (WARN_ON(!tbf))
+ return NULL;
+
+ ATH_TXBUF_RESET(tbf);
+
+ tbf->bf_mpdu = bf->bf_mpdu;
+ tbf->bf_buf_addr = bf->bf_buf_addr;
+ memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
+ tbf->bf_state = bf->bf_state;
+ tbf->bf_state.stale = false;
+
+ return tbf;
+}
+
+static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int txok,
+ int *nframes, int *nbad)
+{
+ struct ath_frame_info *fi;
+ u16 seq_st = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int ba_index;
+ int isaggr = 0;
+
+ *nbad = 0;
+ *nframes = 0;
+
+ isaggr = bf_isaggr(bf);
+ if (isaggr) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
+ }
+
+ while (bf) {
+ fi = get_frame_info(bf->bf_mpdu);
+ ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
+
+ (*nframes)++;
+ if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
+ (*nbad)++;
+
+ bf = bf->bf_next;
+ }
+}
+
+
+static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_buf *bf, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok)
+{
+ struct ath_node *an = NULL;
+ struct sk_buff *skb;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_atx_tid *tid = NULL;
+ struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
+ struct list_head bf_head;
+ struct sk_buff_head bf_pending;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
+ bool rc_update = true, isba;
+ struct ieee80211_tx_rate rates[4];
+ struct ath_frame_info *fi;
+ int nframes;
+ bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ int i, retries;
+ int bar_index = -1;
+
+ skb = bf->bf_mpdu;
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ memcpy(rates, bf->rates, sizeof(rates));
+
+ retries = ts->ts_longretry + 1;
+ for (i = 0; i < ts->ts_rateindex; i++)
+ retries += rates[i].count;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
+ if (!sta) {
+ rcu_read_unlock();
+
+ INIT_LIST_HEAD(&bf_head);
+ while (bf) {
+ bf_next = bf->bf_next;
+
+ if (!bf->bf_state.stale || bf_next != NULL)
+ list_move_tail(&bf->list, &bf_head);
+
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
+
+ bf = bf_next;
+ }
+ return;
+ }
+
+ an = (struct ath_node *)sta->drv_priv;
+ tid = ath_get_skb_tid(sc, an, skb);
+ seq_first = tid->seq_start;
+ isba = ts->ts_flags & ATH9K_TX_BA;
+
+ /*
+ * The hardware occasionally sends a tx status for the wrong TID.
+ * In this case, the BA status cannot be considered valid and all
+ * subframes need to be retransmitted
+ *
+ * Only BlockAcks have a TID and therefore normal Acks cannot be
+ * checked
+ */
+ if (isba && tid->tidno != ts->tid)
+ txok = false;
+
+ isaggr = bf_isaggr(bf);
+ memset(ba, 0, WME_BA_BMP_SIZE >> 3);
+
+ if (isaggr && txok) {
+ if (ts->ts_flags & ATH9K_TX_BA) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
+ } else {
+ /*
+ * AR5416 can become deaf/mute when BA
+ * issue happens. Chip needs to be reset.
+ * But AP code may have sychronization issues
+ * when perform internal reset in this routine.
+ * Only enable reset in STA mode for now.
+ */
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
+ needreset = 1;
+ }
+ }
+
+ __skb_queue_head_init(&bf_pending);
+
+ ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
+ while (bf) {
+ u16 seqno = bf->bf_state.seqno;
+
+ txfail = txpending = sendbar = 0;
+ bf_next = bf->bf_next;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ fi = get_frame_info(skb);
+
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
+ !tid->active) {
+ /*
+ * Outside of the current BlockAck window,
+ * maybe part of a previous session
+ */
+ txfail = 1;
+ } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
+ /* transmit completion, subframe is
+ * acked by block ack */
+ acked_cnt++;
+ } else if (!isaggr && txok) {
+ /* transmit completion */
+ acked_cnt++;
+ } else if (flush) {
+ txpending = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (txok || !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+ retries);
+
+ txpending = 1;
+ } else {
+ txfail = 1;
+ txfail_cnt++;
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ }
+
+ /*
+ * Make sure the last desc is reclaimed if it
+ * not a holding desc.
+ */
+ INIT_LIST_HEAD(&bf_head);
+ if (bf_next != NULL || !bf_last->bf_state.stale)
+ list_move_tail(&bf->list, &bf_head);
+
+ if (!txpending) {
+ /*
+ * complete the acked-ones/xretried ones; update
+ * block-ack window
+ */
+ ath_tx_update_baw(sc, tid, seqno);
+
+ if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
+ memcpy(tx_info->control.rates, rates, sizeof(rates));
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
+ rc_update = false;
+ if (bf == bf->bf_lastbf)
+ ath_dynack_sample_tx_ts(sc->sc_ah,
+ bf->bf_mpdu,
+ ts);
+ }
+
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ !txfail);
+ } else {
+ if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
+ tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+ ieee80211_sta_eosp(sta);
+ }
+ /* retry the un-acked ones */
+ if (bf->bf_next == NULL && bf_last->bf_state.stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ ath_tx_update_baw(sc, tid, seqno);
+
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head, ts, 0);
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ break;
+ }
+
+ fi->bf = tbf;
+ }
+
+ /*
+ * Put this buffer to the temporary pending
+ * queue to retain ordering
+ */
+ __skb_queue_tail(&bf_pending, skb);
+ }
+
+ bf = bf_next;
+ }
+
+ /* prepend un-acked frames to the beginning of the pending frame queue */
+ if (!skb_queue_empty(&bf_pending)) {
+ if (an->sleeping)
+ ieee80211_sta_set_buffered(sta, tid->tidno, true);
+
+ skb_queue_splice_tail(&bf_pending, &tid->retry_q);
+ if (!an->sleeping) {
+ ath_tx_queue_tid(sc, txq, tid);
+
+ if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
+ tid->ac->clear_ps_filter = true;
+ }
+ }
+
+ if (bar_index >= 0) {
+ u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
+
+ if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+ tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+ ath_txq_lock(sc, txq);
+ }
+
+ rcu_read_unlock();
+
+ if (needreset)
+ ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
+}
+
+static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+}
+
+static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_tx_status *ts, struct ath_buf *bf,
+ struct list_head *bf_head)
+{
+ struct ieee80211_tx_info *info;
+ bool txok, flush;
+
+ txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+ flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ txq->axq_tx_inprogress = false;
+
+ txq->axq_depth--;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
+
+ ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
+ ts->ts_rateindex);
+ if (!bf_isampdu(bf)) {
+ if (!flush) {
+ info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ memcpy(info->control.rates, bf->rates,
+ sizeof(info->control.rates));
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
+ ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
+ }
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+ } else
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+
+ if (!flush)
+ ath_txq_schedule(sc, txq);
+}
+
+static bool ath_lookup_legacy(struct ath_buf *bf)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rates;
+ int i;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ rates = tx_info->control.rates;
+
+ for (i = 0; i < 4; i++) {
+ if (!rates[i].count || rates[i].idx < 0)
+ break;
+
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
+ return true;
+ }
+
+ return false;
+}
+
+static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_atx_tid *tid)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rates;
+ u32 max_4ms_framelen, frmlen;
+ u16 aggr_limit, bt_aggr_limit, legacy = 0;
+ int q = tid->ac->txq->mac80211_qnum;
+ int i;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ rates = bf->rates;
+
+ /*
+ * Find the lowest frame length among the rate series that will have a
+ * 4ms (or TXOP limited) transmit duration.
+ */
+ max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
+
+ for (i = 0; i < 4; i++) {
+ int modeidx;
+
+ if (!rates[i].count)
+ continue;
+
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+ legacy = 1;
+ break;
+ }
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ modeidx = MCS_HT40;
+ else
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
+
+ frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
+ max_4ms_framelen = min(max_4ms_framelen, frmlen);
+ }
+
+ /*
+ * limit aggregate size by the minimum rate if rate selected is
+ * not a probe rate, if rate selected is a probe rate then
+ * avoid aggregation of this packet.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
+ return 0;
+
+ aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
+
+ /*
+ * Override the default aggregation limit for BTCOEX.
+ */
+ bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
+ if (bt_aggr_limit)
+ aggr_limit = bt_aggr_limit;
+
+ if (tid->an->maxampdu)
+ aggr_limit = min(aggr_limit, tid->an->maxampdu);
+
+ return aggr_limit;
+}
+
+/*
+ * Returns the number of delimiters to be added to
+ * meet the minimum required mpdudensity.
+ */
+static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
+ struct ath_buf *bf, u16 frmlen,
+ bool first_subfrm)
+{
+#define FIRST_DESC_NDELIMS 60
+ u32 nsymbits, nsymbols;
+ u16 minlen;
+ u8 flags, rix;
+ int width, streams, half_gi, ndelim, mindelim;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+
+ /* Select standard number of delimiters based on frame length alone */
+ ndelim = ATH_AGGR_GET_NDELIM(frmlen);
+
+ /*
+ * If encryption enabled, hardware requires some more padding between
+ * subframes.
+ * TODO - this could be improved to be dependent on the rate.
+ * The hardware can keep up at lower rates, but not higher rates
+ */
+ if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+ !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
+ ndelim += ATH_AGGR_ENCRYPTDELIM;
+
+ /*
+ * Add delimiter when using RTS/CTS with aggregation
+ * and non enterprise AR9003 card
+ */
+ if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
+ (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
+ ndelim = max(ndelim, FIRST_DESC_NDELIMS);
+
+ /*
+ * Convert desired mpdu density from microeconds to bytes based
+ * on highest rate in rate series (i.e. first rate) to determine
+ * required minimum length for subframe. Take into account
+ * whether high rate is 20 or 40Mhz and half or full GI.
+ *
+ * If there is no mpdu density restriction, no further calculation
+ * is needed.
+ */
+
+ if (tid->an->mpdudensity == 0)
+ return ndelim;
+
+ rix = bf->rates[0].idx;
+ flags = bf->rates[0].flags;
+ width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
+ half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
+
+ if (half_gi)
+ nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
+ else
+ nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
+
+ if (nsymbols == 0)
+ nsymbols = 1;
+
+ streams = HT_RC_2_STREAMS(rix);
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
+ minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
+
+ if (frmlen < minlen) {
+ mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
+ ndelim = max(mindelim, ndelim);
+ }
+
+ return ndelim;
+}
+
+static struct ath_buf *
+ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct sk_buff_head **q)
+{
+ struct ieee80211_tx_info *tx_info;
+ struct ath_frame_info *fi;
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ u16 seqno;
+
+ while (1) {
+ *q = &tid->retry_q;
+ if (skb_queue_empty(*q))
+ *q = &tid->buf_q;
+
+ skb = skb_peek(*q);
+ if (!skb)
+ break;
+
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ else
+ bf->bf_state.stale = false;
+
+ if (!bf) {
+ __skb_unlink(skb, *q);
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
+ }
+
+ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+ /*
+ * No aggregation session is running, but there may be frames
+ * from a previous session or a failed attempt in the queue.
+ * Send them out as normal data frames
+ */
+ if (!tid->active)
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = 0;
+ return bf;
+ }
+
+ bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
+ seqno = bf->bf_state.seqno;
+
+ /* do not step over block-ack window */
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
+ break;
+
+ if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+ struct ath_tx_status ts = {};
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+ __skb_unlink(skb, *q);
+ ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ continue;
+ }
+
+ return bf;
+ }
+
+ return NULL;
+}
+
+static bool
+ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q,
+ int *aggr_len)
+{
+#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ int nframes = 0, ndelim;
+ u16 aggr_limit = 0, al = 0, bpad = 0,
+ al_delta, h_baw = tid->baw_size / 2;
+ struct ieee80211_tx_info *tx_info;
+ struct ath_frame_info *fi;
+ struct sk_buff *skb;
+ bool closed = false;
+
+ bf = bf_first;
+ aggr_limit = ath_lookup_rate(sc, bf, tid);
+
+ do {
+ skb = bf->bf_mpdu;
+ fi = get_frame_info(skb);
+
+ /* do not exceed aggregation limit */
+ al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
+ if (nframes) {
+ if (aggr_limit < al + bpad + al_delta ||
+ ath_lookup_legacy(bf) || nframes >= h_baw)
+ break;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
+ break;
+ }
+
+ /* add padding for previous frame to aggregation length */
+ al += bpad + al_delta;
+
+ /*
+ * Get the delimiters needed to meet the MPDU
+ * density for this node.
+ */
+ ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
+ !nframes);
+ bpad = PADBYTES(al_delta) + (ndelim << 2);
+
+ nframes++;
+ bf->bf_next = NULL;
+
+ /* link buffers of this frame to the aggregate */
+ if (!fi->baw_tracked)
+ ath_tx_addto_baw(sc, tid, bf);
+ bf->bf_state.ndelim = ndelim;
+
+ __skb_unlink(skb, tid_q);
+ list_add_tail(&bf->list, bf_q);
+ if (bf_prev)
+ bf_prev->bf_next = bf;
+
+ bf_prev = bf;
+
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf) {
+ closed = true;
+ break;
+ }
+ } while (ath_tid_has_buffered(tid));
+
+ bf = bf_first;
+ bf->bf_lastbf = bf_prev;
+
+ if (bf == bf_prev) {
+ al = get_frame_info(bf->bf_mpdu)->framelen;
+ bf->bf_state.bf_type = BUF_AMPDU;
+ } else {
+ TX_STAT_INC(txq->axq_qnum, a_aggr);
+ }
+
+ *aggr_len = al;
+
+ return closed;
+#undef PADBYTES
+}
+
+/*
+ * rix - rate index
+ * pktlen - total bytes (delims + data + fcs + pads + pad delims)
+ * width - 0 for 20 MHz, 1 for 40 MHz
+ * half_gi - to use 4us v/s 3.6 us for symbol time
+ */
+static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+ int width, int half_gi, bool shortPreamble)
+{
+ u32 nbits, nsymbits, duration, nsymbols;
+ int streams;
+
+ /* find number of symbols: PLCP + data */
+ streams = HT_RC_2_STREAMS(rix);
+ nbits = (pktlen << 3) + OFDM_PLCP_BITS;
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
+ nsymbols = (nbits + nsymbits - 1) / nsymbits;
+
+ if (!half_gi)
+ duration = SYMBOL_TIME(nsymbols);
+ else
+ duration = SYMBOL_TIME_HALFGI(nsymbols);
+
+ /* addup duration for legacy/ht training and signal fields */
+ duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+
+ return duration;
+}
+
+static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
+{
+ int streams = HT_RC_2_STREAMS(mcs);
+ int symbols, bits;
+ int bytes = 0;
+
+ usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+ symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
+ bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
+ bits -= OFDM_PLCP_BITS;
+ bytes = bits / 8;
+ if (bytes > 65532)
+ bytes = 65532;
+
+ return bytes;
+}
+
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
+{
+ u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
+ int mcs;
+
+ /* 4ms is the default (and maximum) duration */
+ if (!txop || txop > 4096)
+ txop = 4096;
+
+ cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
+ cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
+ cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
+ cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
+ for (mcs = 0; mcs < 32; mcs++) {
+ cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
+ cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
+ cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
+ cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
+ }
+}
+
+static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
+ u8 rateidx, bool is_40, bool is_cck)
+{
+ u8 max_power;
+ struct sk_buff *skb;
+ struct ath_frame_info *fi;
+ struct ieee80211_tx_info *info;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (sc->tx99_state || !ah->tpc_enabled)
+ return MAX_RATE_POWER;
+
+ skb = bf->bf_mpdu;
+ fi = get_frame_info(skb);
+ info = IEEE80211_SKB_CB(skb);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ int txpower = fi->tx_power;
+
+ if (is_40) {
+ u8 power_ht40delta;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+
+ if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+ bool is_2ghz;
+ struct modal_eep_header *pmodal;
+
+ is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+ pmodal = &eep->modalHeader[is_2ghz];
+ power_ht40delta = pmodal->ht40PowerIncForPdadc;
+ } else {
+ power_ht40delta = 2;
+ }
+ txpower += power_ht40delta;
+ }
+
+ if (AR_SREV_9287(ah) || AR_SREV_9285(ah) ||
+ AR_SREV_9271(ah)) {
+ txpower -= 2 * AR9287_PWR_TABLE_OFFSET_DB;
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ s8 power_offset;
+
+ power_offset = ah->eep_ops->get_eeprom(ah,
+ EEP_PWR_TABLE_OFFSET);
+ txpower -= 2 * power_offset;
+ }
+
+ if (OLC_FOR_AR9280_20_LATER && is_cck)
+ txpower -= 2;
+
+ txpower = max(txpower, 0);
+ max_power = min_t(u8, ah->tx_power[rateidx], txpower);
+
+ /* XXX: clamp minimum TX power at 1 for AR9160 since if
+ * max_power is set to 0, frames are transmitted at max
+ * TX power
+ */
+ if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
+ max_power = 1;
+ } else if (!bf->bf_state.bfs_paprd) {
+ if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
+ max_power = min_t(u8, ah->tx_power_stbc[rateidx],
+ fi->tx_power);
+ else
+ max_power = min_t(u8, ah->tx_power[rateidx],
+ fi->tx_power);
+ } else {
+ max_power = ah->paprd_training_power;
+ }
+
+ return max_power;
+}
+
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_info *info, int len, bool rts)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rates;
+ const struct ieee80211_rate *rate;
+ struct ieee80211_hdr *hdr;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
+ int i;
+ u8 rix = 0;
+
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+ rates = bf->rates;
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ /* set dur_update_en for l-sig computation except for PS-Poll frames */
+ info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
+ info->rtscts_rate = fi->rtscts_rate;
+
+ for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
+ bool is_40, is_sgi, is_sp, is_cck;
+ int phy;
+
+ if (!rates[i].count || (rates[i].idx < 0))
+ continue;
+
+ rix = rates[i].idx;
+ info->rates[i].Tries = rates[i].count;
+
+ /*
+ * Handle RTS threshold for unaggregated HT frames.
+ */
+ if (bf_isampdu(bf) && !bf_isaggr(bf) &&
+ (rates[i].flags & IEEE80211_TX_RC_MCS) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+
+ if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ info->flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ info->flags |= ATH9K_TXDESC_CTSENA;
+ }
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
+
+ is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
+ is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
+ is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+ if (rates[i].flags & IEEE80211_TX_RC_MCS) {
+ /* MCS rates */
+ info->rates[i].Rate = rix | 0x80;
+ info->rates[i].ChSel = ath_txchainmask_reduction(sc,
+ ah->txchainmask, info->rates[i].Rate);
+ info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
+ is_40, is_sgi, is_sp);
+ if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
+
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
+ is_40, false);
+ continue;
+ }
+
+ /* legacy rates */
+ rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
+ if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ !(rate->flags & IEEE80211_RATE_ERP_G))
+ phy = WLAN_RC_PHY_CCK;
+ else
+ phy = WLAN_RC_PHY_OFDM;
+
+ info->rates[i].Rate = rate->hw_value;
+ if (rate->hw_value_short) {
+ if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ info->rates[i].Rate |= rate->hw_value_short;
+ } else {
+ is_sp = false;
+ }
+
+ if (bf->bf_state.bfs_paprd)
+ info->rates[i].ChSel = ah->txchainmask;
+ else
+ info->rates[i].ChSel = ath_txchainmask_reduction(sc,
+ ah->txchainmask, info->rates[i].Rate);
+
+ info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
+ phy, rate->bitrate * 100, len, rix, is_sp);
+
+ is_cck = IS_CCK_RATE(info->rates[i].Rate);
+ info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, false,
+ is_cck);
+ }
+
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
+ info->flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (info->flags & ATH9K_TXDESC_RTSENA)
+ info->flags &= ~ATH9K_TXDESC_CTSENA;
+}
+
+static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath9k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = ATH9K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = ATH9K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = ATH9K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = ATH9K_PKT_TYPE_PSPOLL;
+ else
+ htype = ATH9K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
+static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_txq *txq, int len)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_buf *bf_first = NULL;
+ struct ath_tx_info info;
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
+ bool rts = false;
+
+ memset(&info, 0, sizeof(info));
+ info.is_first = true;
+ info.is_last = true;
+ info.qcu = txq->axq_qnum;
+
+ while (bf) {
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
+
+ info.type = get_hw_packet_type(skb);
+ if (bf->bf_next)
+ info.link = bf->bf_next->bf_daddr;
+ else
+ info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
+
+ if (!bf_first) {
+ bf_first = bf;
+
+ if (!sc->tx99_state)
+ info.flags = ATH9K_TXDESC_INTREQ;
+ if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
+ txq == sc->tx.uapsdq)
+ info.flags |= ATH9K_TXDESC_CLRDMASK;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info.flags |= ATH9K_TXDESC_NOACK;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ info.flags |= ATH9K_TXDESC_LDPC;
+
+ if (bf->bf_state.bfs_paprd)
+ info.flags |= (u32) bf->bf_state.bfs_paprd <<
+ ATH9K_TXDESC_PAPRD_S;
+
+ /*
+ * mac80211 doesn't handle RTS threshold for HT because
+ * the decision has to be taken based on AMPDU length
+ * and aggregation is done entirely inside ath9k.
+ * Set the RTS/CTS flag for the first subframe based
+ * on the threshold.
+ */
+ if (aggr && (bf == bf_first) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ /*
+ * "len" is the size of the entire AMPDU.
+ */
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+
+ if (!aggr)
+ len = fi->framelen;
+
+ ath_buf_set_rate(sc, bf, &info, len, rts);
+ }
+
+ info.buf_addr[0] = bf->bf_buf_addr;
+ info.buf_len[0] = skb->len;
+ info.pkt_len = fi->framelen;
+ info.keyix = fi->keyix;
+ info.keytype = fi->keytype;
+
+ if (aggr) {
+ if (bf == bf_first)
+ info.aggr = AGGR_BUF_FIRST;
+ else if (bf == bf_first->bf_lastbf)
+ info.aggr = AGGR_BUF_LAST;
+ else
+ info.aggr = AGGR_BUF_MIDDLE;
+
+ info.ndelim = bf->bf_state.ndelim;
+ info.aggr_len = len;
+ }
+
+ if (bf == bf_first->bf_lastbf)
+ bf_first = NULL;
+
+ ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
+ bf = bf->bf_next;
+ }
+}
+
+static void
+ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q)
+{
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ struct sk_buff *skb;
+ int nframes = 0;
+
+ do {
+ struct ieee80211_tx_info *tx_info;
+ skb = bf->bf_mpdu;
+
+ nframes++;
+ __skb_unlink(skb, tid_q);
+ list_add_tail(&bf->list, bf_q);
+ if (bf_prev)
+ bf_prev->bf_next = bf;
+ bf_prev = bf;
+
+ if (nframes >= 2)
+ break;
+
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
+ break;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ break;
+
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ } while (1);
+}
+
+static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, bool *stop)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff_head *tid_q;
+ struct list_head bf_q;
+ int aggr_len = 0;
+ bool aggr, last = true;
+
+ if (!ath_tid_has_buffered(tid))
+ return false;
+
+ INIT_LIST_HEAD(&bf_q);
+
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
+ return false;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
+ if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
+ (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+ *stop = true;
+ return false;
+ }
+
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ if (aggr)
+ last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
+ tid_q, &aggr_len);
+ else
+ ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
+
+ if (list_empty(&bf_q))
+ return false;
+
+ if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
+ tid->ac->clear_ps_filter = false;
+ tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ }
+
+ ath_tx_fill_desc(sc, bf, txq, aggr_len);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+ return true;
+}
+
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ struct ath_atx_tid *txtid;
+ struct ath_txq *txq;
+ struct ath_node *an;
+ u8 density;
+
+ an = (struct ath_node *)sta->drv_priv;
+ txtid = ATH_AN_2_TID(an, tid);
+ txq = txtid->ac->txq;
+
+ ath_txq_lock(sc, txq);
+
+ /* update ampdu factor/density, they may have changed. This may happen
+ * in HT IBSS when a beacon with HT-info is received after the station
+ * has already been added.
+ */
+ if (sta->ht_cap.ht_supported) {
+ an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor)) - 1;
+ density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->mpdudensity = density;
+ }
+
+ /* force sequence number allocation for pending frames */
+ ath_tx_tid_change_state(sc, txtid);
+
+ txtid->active = true;
+ *ssn = txtid->seq_start = txtid->seq_next;
+ txtid->bar_index = -1;
+
+ memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+ txtid->baw_head = txtid->baw_tail = 0;
+
+ ath_txq_unlock_complete(sc, txq);
+
+ return 0;
+}
+
+void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
+ struct ath_txq *txq = txtid->ac->txq;
+
+ ath_txq_lock(sc, txq);
+ txtid->active = false;
+ ath_tx_flush_tid(sc, txtid);
+ ath_tx_tid_change_state(sc, txtid);
+ ath_txq_unlock_complete(sc, txq);
+}
+
+void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ struct ath_node *an)
+{
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ bool buffered;
+ int tidno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+
+ ac = tid->ac;
+ txq = ac->txq;
+
+ ath_txq_lock(sc, txq);
+
+ if (!tid->sched) {
+ ath_txq_unlock(sc, txq);
+ continue;
+ }
+
+ buffered = ath_tid_has_buffered(tid);
+
+ tid->sched = false;
+ list_del(&tid->list);
+
+ if (ac->sched) {
+ ac->sched = false;
+ list_del(&ac->list);
+ }
+
+ ath_txq_unlock(sc, txq);
+
+ ieee80211_sta_set_buffered(sta, tidno, buffered);
+ }
+}
+
+void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ int tidno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+
+ ac = tid->ac;
+ txq = ac->txq;
+
+ ath_txq_lock(sc, txq);
+ ac->clear_ps_filter = true;
+
+ if (ath_tid_has_buffered(tid)) {
+ ath_tx_queue_tid(sc, txq, tid);
+ ath_txq_schedule(sc, txq);
+ }
+
+ ath_txq_unlock_complete(sc, txq);
+ }
+}
+
+void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tidno)
+{
+ struct ath_atx_tid *tid;
+ struct ath_node *an;
+ struct ath_txq *txq;
+
+ an = (struct ath_node *)sta->drv_priv;
+ tid = ATH_AN_2_TID(an, tidno);
+ txq = tid->ac->txq;
+
+ ath_txq_lock(sc, txq);
+
+ tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
+
+ if (ath_tid_has_buffered(tid)) {
+ ath_tx_queue_tid(sc, txq, tid);
+ ath_txq_schedule(sc, txq);
+ }
+
+ ath_txq_unlock_complete(sc, txq);
+}
+
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ struct ath_txq *txq = sc->tx.uapsdq;
+ struct ieee80211_tx_info *info;
+ struct list_head bf_q;
+ struct ath_buf *bf_tail = NULL, *bf;
+ struct sk_buff_head *tid_q;
+ int sent = 0;
+ int i;
+
+ INIT_LIST_HEAD(&bf_q);
+ for (i = 0; tids && nframes; i++, tids >>= 1) {
+ struct ath_atx_tid *tid;
+
+ if (!(tids & 1))
+ continue;
+
+ tid = ATH_AN_2_TID(an, i);
+
+ ath_txq_lock(sc, tid->ac->txq);
+ while (nframes > 0) {
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
+ if (!bf)
+ break;
+
+ __skb_unlink(bf->bf_mpdu, tid_q);
+ list_add_tail(&bf->list, &bf_q);
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ if (bf_isampdu(bf)) {
+ ath_tx_addto_baw(sc, tid, bf);
+ bf->bf_state.bf_type &= ~BUF_AGGR;
+ }
+ if (bf_tail)
+ bf_tail->bf_next = bf;
+
+ bf_tail = bf;
+ nframes--;
+ sent++;
+ TX_STAT_INC(txq->axq_qnum, a_queued_hw);
+
+ if (an->sta && !ath_tid_has_buffered(tid))
+ ieee80211_sta_set_buffered(an->sta, i, false);
+ }
+ ath_txq_unlock_complete(sc, tid->ac->txq);
+ }
+
+ if (list_empty(&bf_q))
+ return;
+
+ info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
+ info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
+ ath_txq_lock(sc, txq);
+ ath_tx_fill_desc(sc, bf, txq, 0);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+ ath_txq_unlock(sc, txq);
+}
+
+/********************/
+/* Queue Management */
+/********************/
+
+struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_tx_queue_info qi;
+ static const int subtype_txq_to_hwq[] = {
+ [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+ [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+ [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+ [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
+ };
+ int axq_qnum, i;
+
+ memset(&qi, 0, sizeof(qi));
+ qi.tqi_subtype = subtype_txq_to_hwq[subtype];
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+
+ /*
+ * Enable interrupts only for EOL and DESC conditions.
+ * We mark tx descriptors to receive a DESC interrupt
+ * when a tx queue gets deep; otherwise waiting for the
+ * EOL to reap descriptors. Note that this is done to
+ * reduce interrupt load and this only defers reaping
+ * descriptors, never transmitting frames. Aside from
+ * reducing interrupts this also permits more concurrency.
+ * The only potential downside is if the tx queue backs
+ * up in which case the top half of the kernel may backup
+ * due to a lack of tx descriptors.
+ *
+ * The UAPSD queue is an exception, since we take a desc-
+ * based intr on the EOSP frames.
+ */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
+ } else {
+ if (qtype == ATH9K_TX_QUEUE_UAPSD)
+ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+ else
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+ TXQ_FLAG_TXDESCINT_ENABLE;
+ }
+ axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+ if (axq_qnum == -1) {
+ /*
+ * NB: don't print a message, this happens
+ * normally on parts with too few tx queues
+ */
+ return NULL;
+ }
+ if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+ struct ath_txq *txq = &sc->tx.txq[axq_qnum];
+
+ txq->axq_qnum = axq_qnum;
+ txq->mac80211_qnum = -1;
+ txq->axq_link = NULL;
+ __skb_queue_head_init(&txq->complete_q);
+ INIT_LIST_HEAD(&txq->axq_q);
+ spin_lock_init(&txq->axq_lock);
+ txq->axq_depth = 0;
+ txq->axq_ampdu_depth = 0;
+ txq->axq_tx_inprogress = false;
+ sc->tx.txqsetup |= 1<<axq_qnum;
+
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
+ INIT_LIST_HEAD(&txq->txq_fifo[i]);
+ }
+ return &sc->tx.txq[axq_qnum];
+}
+
+int ath_txq_update(struct ath_softc *sc, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin;
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+int ath_cabq_update(struct ath_softc *sc)
+{
+ struct ath9k_tx_queue_info qi;
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+ int qnum = sc->beacon.cabq->axq_qnum;
+
+ ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
+
+ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
+ ATH_CABQ_READY_TIME) / 100;
+ ath_txq_update(sc, qnum, &qi);
+
+ return 0;
+}
+
+static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
+ struct list_head *list)
+{
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+ struct ath_tx_status ts;
+
+ memset(&ts, 0, sizeof(ts));
+ ts.ts_status = ATH9K_TX_FLUSH;
+ INIT_LIST_HEAD(&bf_head);
+
+ while (!list_empty(list)) {
+ bf = list_first_entry(list, struct ath_buf, list);
+
+ if (bf->bf_state.stale) {
+ list_del(&bf->list);
+
+ ath_tx_return_buffer(sc, bf);
+ continue;
+ }
+
+ lastbf = bf->bf_lastbf;
+ list_cut_position(&bf_head, list, &lastbf->list);
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
+ }
+}
+
+/*
+ * Drain a given TX queue (could be Beacon or Data)
+ *
+ * This assumes output has been stopped and
+ * we do not need to block ath_tx_tasklet.
+ */
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
+{
+ ath_txq_lock(sc, txq);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ int idx = txq->txq_tailidx;
+
+ while (!list_empty(&txq->txq_fifo[idx])) {
+ ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
+
+ INCR(idx, ATH_TXFIFO_DEPTH);
+ }
+ txq->txq_tailidx = idx;
+ }
+
+ txq->axq_link = NULL;
+ txq->axq_tx_inprogress = false;
+ ath_drain_txq_list(sc, txq, &txq->axq_q);
+
+ ath_txq_unlock_complete(sc, txq);
+}
+
+bool ath_drain_all_txq(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq;
+ int i;
+ u32 npend = 0;
+
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
+ return true;
+
+ ath9k_hw_abort_tx_dma(ah);
+
+ /* Check if any queue remains active */
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ if (!sc->tx.txq[i].axq_depth)
+ continue;
+
+ if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
+ npend |= BIT(i);
+ }
+
+ if (npend)
+ ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ /*
+ * The caller will resume queues with ieee80211_wake_queues.
+ * Mark the queue as not stopped to prevent ath_tx_complete
+ * from waking the queue too early.
+ */
+ txq = &sc->tx.txq[i];
+ txq->stopped = false;
+ ath_draintxq(sc, txq);
+ }
+
+ return !npend;
+}
+
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
+{
+ ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
+ sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
+}
+
+/* For each acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_atx_ac *ac, *last_ac;
+ struct ath_atx_tid *tid, *last_tid;
+ struct list_head *ac_list;
+ bool sent = false;
+
+ if (txq->mac80211_qnum < 0)
+ return;
+
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ return;
+
+ spin_lock_bh(&sc->chan_lock);
+ ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+
+ if (list_empty(ac_list)) {
+ spin_unlock_bh(&sc->chan_lock);
+ return;
+ }
+
+ rcu_read_lock();
+
+ last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
+ while (!list_empty(ac_list)) {
+ bool stop = false;
+
+ if (sc->cur_chan->stopped)
+ break;
+
+ ac = list_first_entry(ac_list, struct ath_atx_ac, list);
+ last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+ list_del(&ac->list);
+ ac->sched = false;
+
+ while (!list_empty(&ac->tid_q)) {
+
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ list_del(&tid->list);
+ tid->sched = false;
+
+ if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+ sent = true;
+
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (ath_tid_has_buffered(tid))
+ ath_tx_queue_tid(sc, txq, tid);
+
+ if (stop || tid == last_tid)
+ break;
+ }
+
+ if (!list_empty(&ac->tid_q) && !ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, ac_list);
+ }
+
+ if (stop)
+ break;
+
+ if (ac == last_ac) {
+ if (!sent)
+ break;
+
+ sent = false;
+ last_ac = list_entry(ac_list->prev,
+ struct ath_atx_ac, list);
+ }
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&sc->chan_lock);
+}
+
+void ath_txq_schedule_all(struct ath_softc *sc)
+{
+ struct ath_txq *txq;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
+/***********/
+/* TX, DMA */
+/***********/
+
+/*
+ * Insert a chain of ath_buf (descriptors) on a txq and
+ * assume the descriptors are already chained together by caller.
+ */
+static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
+ struct list_head *head, bool internal)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_buf *bf, *bf_last;
+ bool puttxbuf = false;
+ bool edma;
+
+ /*
+ * Insert the frame on the outbound list and
+ * pass it on to the hardware.
+ */
+
+ if (list_empty(head))
+ return;
+
+ edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ bf = list_first_entry(head, struct ath_buf, list);
+ bf_last = list_entry(head->prev, struct ath_buf, list);
+
+ ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
+ txq->axq_qnum, txq->axq_depth);
+
+ if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
+ list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
+ INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+ puttxbuf = true;
+ } else {
+ list_splice_tail_init(head, &txq->axq_q);
+
+ if (txq->axq_link) {
+ ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
+ ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ } else if (!edma)
+ puttxbuf = true;
+
+ txq->axq_link = bf_last->bf_desc;
+ }
+
+ if (puttxbuf) {
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+ }
+
+ if (!edma || sc->tx99_state) {
+ TX_STAT_INC(txq->axq_qnum, txstart);
+ ath9k_hw_txstart(ah, txq->axq_qnum);
+ }
+
+ if (!internal) {
+ while (bf) {
+ txq->axq_depth++;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth++;
+
+ bf_last = bf->bf_lastbf;
+ bf = bf_last->bf_next;
+ bf_last->bf_next = NULL;
+ }
+ }
+}
+
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct list_head bf_head;
+ struct ath_buf *bf = fi->bf;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
+ bf->bf_state.bf_type = 0;
+ if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = BUF_AMPDU;
+ ath_tx_addto_baw(sc, tid, bf);
+ }
+
+ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+ ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
+ TX_STAT_INC(txq->axq_qnum, queued);
+}
+
+static void setup_frame_info(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ int framelen)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const struct ieee80211_rate *rate;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_node *an = NULL;
+ enum ath9k_key_type keytype;
+ bool short_preamble = false;
+ u8 txpower;
+
+ /*
+ * We check if Short Preamble is needed for the CTS rate by
+ * checking the BSS's global flag.
+ * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+ */
+ if (tx_info->control.vif &&
+ tx_info->control.vif->bss_conf.use_short_preamble)
+ short_preamble = true;
+
+ rate = ieee80211_get_rts_cts_rate(hw, tx_info);
+ keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
+
+ if (sta)
+ an = (struct ath_node *) sta->drv_priv;
+
+ if (tx_info->control.vif) {
+ struct ieee80211_vif *vif = tx_info->control.vif;
+
+ txpower = 2 * vif->bss_conf.txpower;
+ } else {
+ struct ath_softc *sc = hw->priv;
+
+ txpower = sc->cur_chan->cur_txpower;
+ }
+
+ memset(fi, 0, sizeof(*fi));
+ fi->txq = -1;
+ if (hw_key)
+ fi->keyix = hw_key->hw_key_idx;
+ else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
+ fi->keyix = an->ps_key;
+ else
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->keytype = keytype;
+ fi->framelen = framelen;
+ fi->tx_power = txpower;
+
+ if (!rate)
+ return;
+ fi->rtscts_rate = rate->hw_value;
+ if (short_preamble)
+ fi->rtscts_rate |= rate->hw_value_short;
+}
+
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *curchan = ah->curchan;
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
+ (chainmask == 0x7) && (rate < 0x90))
+ return 0x3;
+ else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
+ IS_CCK_RATE(rate))
+ return 0x2;
+ else
+ return chainmask;
+}
+
+/*
+ * Assign a descriptor (and sequence number if necessary,
+ * and map buffer for DMA. Frees skb on error
+ */
+static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct sk_buff *skb)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath_buf *bf;
+ int fragno;
+ u16 seqno;
+
+ bf = ath_tx_get_buffer(sc);
+ if (!bf) {
+ ath_dbg(common, XMIT, "TX buffers are full\n");
+ return NULL;
+ }
+
+ ATH_TXBUF_RESET(bf);
+
+ if (tid && ieee80211_is_data_present(hdr->frame_control)) {
+ fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seqno = tid->seq_next;
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
+
+ if (fragno)
+ hdr->seq_ctrl |= cpu_to_le16(fragno);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control))
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+
+ bf->bf_state.seqno = seqno;
+ }
+
+ bf->bf_mpdu = skb;
+
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
+ bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "dma_mapping_error() on TX\n");
+ ath_tx_return_buffer(sc, bf);
+ return NULL;
+ }
+
+ fi->bf = bf;
+
+ return bf;
+}
+
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath_vif *avp;
+
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ return;
+
+ if (!vif)
+ return;
+
+ avp = (struct ath_vif *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ avp->seq_no += 0x10;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+}
+
+static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txctl->sta;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath_vif *avp;
+ struct ath_softc *sc = hw->priv;
+ int frmlen = skb->len + FCS_LEN;
+ int padpos, padsize;
+
+ /* NOTE: sta can be NULL according to net/mac80211.h */
+ if (sta)
+ txctl->an = (struct ath_node *)sta->drv_priv;
+ else if (vif && ieee80211_is_data(hdr->frame_control)) {
+ avp = (void *)vif->drv_priv;
+ txctl->an = &avp->mcast_node;
+ }
+
+ if (info->control.hw_key)
+ frmlen += info->control.hw_key->icv_len;
+
+ ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb);
+
+ if ((vif && vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_AP_VLAN) ||
+ !ieee80211_is_data(hdr->frame_control))
+ info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -ENOMEM;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ setup_frame_info(hw, sta, skb, frmlen);
+ return 0;
+}
+
+
+/* Upon failure caller should free skb */
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txctl->sta;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_vif *avp = NULL;
+ struct ath_softc *sc = hw->priv;
+ struct ath_txq *txq = txctl->txq;
+ struct ath_atx_tid *tid = NULL;
+ struct ath_buf *bf;
+ bool queue, skip_uapsd = false, ps_resp;
+ int q, ret;
+
+ if (vif)
+ avp = (void *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ txctl->force_channel = true;
+
+ ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
+
+ ret = ath_tx_prepare(hw, skb, txctl);
+ if (ret)
+ return ret;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ /*
+ * At this point, the vif, hw_key and sta pointers in the tx control
+ * info are no longer valid (overwritten by the ath_frame_info data.
+ */
+
+ q = skb_get_queue_mapping(skb);
+
+ ath_txq_lock(sc, txq);
+ if (txq == sc->tx.txq_map[q]) {
+ fi->txq = q;
+ if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
+ !txq->stopped) {
+ if (ath9k_is_chanctx_enabled())
+ ieee80211_stop_queue(sc->hw, info->hw_queue);
+ else
+ ieee80211_stop_queue(sc->hw, q);
+ txq->stopped = true;
+ }
+ }
+
+ queue = ieee80211_is_data_present(hdr->frame_control);
+
+ /* Force queueing of all frames that belong to a virtual interface on
+ * a different channel context, to ensure that they are sent on the
+ * correct channel.
+ */
+ if (((avp && avp->chanctx != sc->cur_chan) ||
+ sc->cur_chan->stopped) && !txctl->force_channel) {
+ if (!txctl->an)
+ txctl->an = &avp->mcast_node;
+ queue = true;
+ skip_uapsd = true;
+ }
+
+ if (txctl->an && queue)
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
+
+ if (!skip_uapsd && ps_resp) {
+ ath_txq_unlock(sc, txq);
+ txq = sc->tx.uapsdq;
+ ath_txq_lock(sc, txq);
+ } else if (txctl->an && queue) {
+ WARN_ON(tid->ac->txq != txctl->txq);
+
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+ tid->ac->clear_ps_filter = true;
+
+ /*
+ * Add this frame to software queue for scheduling later
+ * for aggregation.
+ */
+ TX_STAT_INC(txq->axq_qnum, a_queued_sw);
+ __skb_queue_tail(&tid->buf_q, skb);
+ if (!txctl->an->sleeping)
+ ath_tx_queue_tid(sc, txq, tid);
+
+ ath_txq_schedule(sc, txq);
+ goto out;
+ }
+
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
+ if (txctl->paprd)
+ dev_kfree_skb_any(skb);
+ else
+ ieee80211_free_txskb(sc->hw, skb);
+ goto out;
+ }
+
+ bf->bf_state.bfs_paprd = txctl->paprd;
+
+ if (txctl->paprd)
+ bf->bf_state.bfs_paprd_timestamp = jiffies;
+
+ ath_set_rates(vif, sta, bf);
+ ath_tx_send_normal(sc, txq, tid, skb);
+
+out:
+ ath_txq_unlock(sc, txq);
+
+ return 0;
+}
+
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_tx_control txctl = {
+ .txq = sc->beacon.cabq
+ };
+ struct ath_tx_info info = {};
+ struct ieee80211_hdr *hdr;
+ struct ath_buf *bf_tail = NULL;
+ struct ath_buf *bf;
+ LIST_HEAD(bf_q);
+ int duration = 0;
+ int max_duration;
+
+ max_duration =
+ sc->cur_chan->beacon.beacon_interval * 1000 *
+ sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
+
+ do {
+ struct ath_frame_info *fi = get_frame_info(skb);
+
+ if (ath_tx_prepare(hw, skb, &txctl))
+ break;
+
+ bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
+ if (!bf)
+ break;
+
+ bf->bf_lastbf = bf;
+ ath_set_rates(vif, NULL, bf);
+ ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
+ duration += info.rates[0].PktDuration;
+ if (bf_tail)
+ bf_tail->bf_next = bf;
+
+ list_add_tail(&bf->list, &bf_q);
+ bf_tail = bf;
+ skb = NULL;
+
+ if (duration > max_duration)
+ break;
+
+ skb = ieee80211_get_buffered_bc(hw, vif);
+ } while(skb);
+
+ if (skb)
+ ieee80211_free_txskb(hw, skb);
+
+ if (list_empty(&bf_q))
+ return;
+
+ bf = list_first_entry(&bf_q, struct ath_buf, list);
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+
+ if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
+ hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ sizeof(*hdr), DMA_TO_DEVICE);
+ }
+
+ ath_txq_lock(sc, txctl.txq);
+ ath_tx_fill_desc(sc, bf, txctl.txq, 0);
+ ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
+ TX_STAT_INC(txctl.txq->axq_qnum, queued);
+ ath_txq_unlock(sc, txctl.txq);
+}
+
+/*****************/
+/* TX Completion */
+/*****************/
+
+static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+ int tx_flags, struct ath_txq *txq)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ int padpos, padsize;
+ unsigned long flags;
+
+ ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
+
+ if (sc->sc_ah->caldata)
+ set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
+
+ if (!(tx_flags & ATH_TX_ERROR)) {
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
+ /*
+ * Remove MAC header padding before giving the frame back to
+ * mac80211.
+ */
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
+ ath_dbg(common, PS,
+ "Going back to sleep after having received TX status (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+
+ __skb_queue_tail(&txq->complete_q, skb);
+ ath_txq_skb_done(sc, txq, skb);
+}
+
+static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok)
+{
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ int tx_flags = 0;
+
+ if (!txok)
+ tx_flags |= ATH_TX_ERROR;
+
+ if (ts->ts_status & ATH9K_TXERR_FILT)
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+
+ dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
+ bf->bf_buf_addr = 0;
+ if (sc->tx99_state)
+ goto skip_tx_complete;
+
+ if (bf->bf_state.bfs_paprd) {
+ if (time_after(jiffies,
+ bf->bf_state.bfs_paprd_timestamp +
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
+ dev_kfree_skb_any(skb);
+ else
+ complete(&sc->paprd_complete);
+ } else {
+ ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
+ ath_tx_complete(sc, skb, tx_flags, txq);
+ }
+skip_tx_complete:
+ /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
+ * accidentally reference it later.
+ */
+ bf->bf_mpdu = NULL;
+
+ /*
+ * Return the list of ath_buf of this mpdu to free queue
+ */
+ spin_lock_irqsave(&sc->tx.txbuflock, flags);
+ list_splice_tail_init(bf_q, &sc->tx.txbuf);
+ spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
+}
+
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok)
+{
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ u8 i, tx_rateindex;
+
+ if (txok)
+ tx_info->status.ack_signal = ts->ts_rssi;
+
+ tx_rateindex = ts->ts_rateindex;
+ WARN_ON(tx_rateindex >= hw->max_rates);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+
+ BUG_ON(nbad > nframes);
+ }
+ tx_info->status.ampdu_len = nframes;
+ tx_info->status.ampdu_ack_len = nframes - nbad;
+
+ if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
+ (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
+ /*
+ * If an underrun error is seen assume it as an excessive
+ * retry only if max frame trigger level has been reached
+ * (2 KB for single stream, and 4 KB for dual stream).
+ * Adjust the long retry as if the frame was tried
+ * hw->max_rate_tries times to affect how rate control updates
+ * PER for the failed rate.
+ * In case of congestion on the bus penalizing this type of
+ * underruns should help hardware actually transmit new frames
+ * successfully by eventually preferring slower rates.
+ * This itself should also alleviate congestion on the bus.
+ */
+ if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
+ ATH9K_TX_DELIM_UNDERRUN)) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
+ tx_info->status.rates[tx_rateindex].count =
+ hw->max_rate_tries;
+ }
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+}
+
+static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_buf *bf, *lastbf, *bf_held = NULL;
+ struct list_head bf_head;
+ struct ath_desc *ds;
+ struct ath_tx_status ts;
+ int status;
+
+ ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
+ txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
+ txq->axq_link);
+
+ ath_txq_lock(sc, txq);
+ for (;;) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ break;
+
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ ath_txq_schedule(sc, txq);
+ break;
+ }
+ bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+
+ /*
+ * There is a race condition that a BH gets scheduled
+ * after sw writes TxE and before hw re-load the last
+ * descriptor to get the newly chained one.
+ * Software must keep the last DONE descriptor as a
+ * holding descriptor - software does so by marking
+ * it with the STALE flag.
+ */
+ bf_held = NULL;
+ if (bf->bf_state.stale) {
+ bf_held = bf;
+ if (list_is_last(&bf_held->list, &txq->axq_q))
+ break;
+
+ bf = list_entry(bf_held->list.next, struct ath_buf,
+ list);
+ }
+
+ lastbf = bf->bf_lastbf;
+ ds = lastbf->bf_desc;
+
+ memset(&ts, 0, sizeof(ts));
+ status = ath9k_hw_txprocdesc(ah, ds, &ts);
+ if (status == -EINPROGRESS)
+ break;
+
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
+
+ /*
+ * Remove ath_buf's of the same transmit unit from txq,
+ * however leave the last descriptor back as the holding
+ * descriptor for hw.
+ */
+ lastbf->bf_state.stale = true;
+ INIT_LIST_HEAD(&bf_head);
+ if (!list_is_singular(&lastbf->list))
+ list_cut_position(&bf_head,
+ &txq->axq_q, lastbf->list.prev);
+
+ if (bf_held) {
+ list_del(&bf_held->list);
+ ath_tx_return_buffer(sc, bf_held);
+ }
+
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
+ }
+ ath_txq_unlock_complete(sc, txq);
+}
+
+void ath_tx_tasklet(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
+ int i;
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
+ ath_tx_processq(sc, &sc->tx.txq[i]);
+ }
+}
+
+void ath_tx_edma_tasklet(struct ath_softc *sc)
+{
+ struct ath_tx_status ts;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_txq *txq;
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+ struct list_head *fifo_list;
+ int status;
+
+ for (;;) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ break;
+
+ status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
+ if (status == -EINPROGRESS)
+ break;
+ if (status == -EIO) {
+ ath_dbg(common, XMIT, "Error processing tx status\n");
+ break;
+ }
+
+ /* Process beacon completions separately */
+ if (ts.qid == sc->beacon.beaconq) {
+ sc->beacon.tx_processed = true;
+ sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+
+ if (ath9k_is_chanctx_enabled()) {
+ ath_chanctx_event(sc, NULL,
+ ATH_CHANCTX_EVENT_BEACON_SENT);
+ }
+
+ ath9k_csa_update(sc);
+ continue;
+ }
+
+ txq = &sc->tx.txq[ts.qid];
+
+ ath_txq_lock(sc, txq);
+
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
+
+ fifo_list = &txq->txq_fifo[txq->txq_tailidx];
+ if (list_empty(fifo_list)) {
+ ath_txq_unlock(sc, txq);
+ return;
+ }
+
+ bf = list_first_entry(fifo_list, struct ath_buf, list);
+ if (bf->bf_state.stale) {
+ list_del(&bf->list);
+ ath_tx_return_buffer(sc, bf);
+ bf = list_first_entry(fifo_list, struct ath_buf, list);
+ }
+
+ lastbf = bf->bf_lastbf;
+
+ INIT_LIST_HEAD(&bf_head);
+ if (list_is_last(&lastbf->list, fifo_list)) {
+ list_splice_tail_init(fifo_list, &bf_head);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+
+ if (!list_empty(&txq->axq_q)) {
+ struct list_head bf_q;
+
+ INIT_LIST_HEAD(&bf_q);
+ txq->axq_link = NULL;
+ list_splice_tail_init(&txq->axq_q, &bf_q);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, true);
+ }
+ } else {
+ lastbf->bf_state.stale = true;
+ if (bf != lastbf)
+ list_cut_position(&bf_head, fifo_list,
+ lastbf->list.prev);
+ }
+
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
+ ath_txq_unlock_complete(sc, txq);
+ }
+}
+
+/*****************/
+/* Init, Cleanup */
+/*****************/
+
+static int ath_txstatus_setup(struct ath_softc *sc, int size)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+ u8 txs_len = sc->sc_ah->caps.txs_len;
+
+ dd->dd_desc_len = size * txs_len;
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ath_tx_edma_init(struct ath_softc *sc)
+{
+ int err;
+
+ err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
+ if (!err)
+ ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
+ sc->txsdma.dd_desc_paddr,
+ ATH_TXSTATUS_RING_SIZE);
+
+ return err;
+}
+
+int ath_tx_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int error = 0;
+
+ spin_lock_init(&sc->tx.txbuflock);
+
+ error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
+ "tx", nbufs, 1, 1);
+ if (error != 0) {
+ ath_err(common,
+ "Failed to allocate tx descriptors: %d\n", error);
+ return error;
+ }
+
+ error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
+ "beacon", ATH_BCBUF, 1, 1);
+ if (error != 0) {
+ ath_err(common,
+ "Failed to allocate beacon descriptors: %d\n", error);
+ return error;
+ }
+
+ INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ error = ath_tx_edma_init(sc);
+
+ return error;
+}
+
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ int tidno, acno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS;
+ tidno++, tid++) {
+ tid->an = an;
+ tid->tidno = tidno;
+ tid->seq_start = tid->seq_next = 0;
+ tid->baw_size = WME_MAX_BA;
+ tid->baw_head = tid->baw_tail = 0;
+ tid->sched = false;
+ tid->active = false;
+ __skb_queue_head_init(&tid->buf_q);
+ __skb_queue_head_init(&tid->retry_q);
+ acno = TID_TO_WME_AC(tidno);
+ tid->ac = &an->ac[acno];
+ }
+
+ for (acno = 0, ac = &an->ac[acno];
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
+ ac->sched = false;
+ ac->clear_ps_filter = true;
+ ac->txq = sc->tx.txq_map[acno];
+ INIT_LIST_HEAD(&ac->tid_q);
+ }
+}
+
+void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
+ struct ath_txq *txq;
+ int tidno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+
+ ac = tid->ac;
+ txq = ac->txq;
+
+ ath_txq_lock(sc, txq);
+
+ if (tid->sched) {
+ list_del(&tid->list);
+ tid->sched = false;
+ }
+
+ if (ac->sched) {
+ list_del(&ac->list);
+ tid->ac->sched = false;
+ }
+
+ ath_tid_drain(sc, txq, tid);
+ tid->active = false;
+
+ ath_txq_unlock(sc, txq);
+ }
+}
+
+#ifdef CONFIG_ATH9K_TX99
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_buf *bf;
+ int padpos, padsize;
+
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize) {
+ ath_dbg(common, XMIT,
+ "tx99 padding failed\n");
+ return -EINVAL;
+ }
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->framelen = skb->len + FCS_LEN;
+ fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+ bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+ if (!bf) {
+ ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+ return -EINVAL;
+ }
+
+ ath_set_rates(sc->tx99_vif, NULL, bf);
+
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+ ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+ ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+ return 0;
+}
+
+#endif /* CONFIG_ATH9K_TX99 */
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
new file mode 100644
index 000000000..1a796e5f6
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -0,0 +1,56 @@
+config CARL9170
+ tristate "Linux Community AR9170 802.11n USB support"
+ depends on USB && MAC80211
+ select ATH_COMMON
+ select FW_LOADER
+ select CRC32
+ help
+ This is another driver for the Atheros "otus" 802.11n USB devices.
+
+ This driver provides more features than the original,
+ but it needs a special firmware (carl9170-1.fw) to do that.
+
+ The firmware can be downloaded from our wiki here:
+ <http://wireless.kernel.org/en/users/Drivers/carl9170>
+
+ If you choose to build a module, it'll be called carl9170.
+
+config CARL9170_LEDS
+ bool "SoftLED Support"
+ depends on CARL9170
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ default y
+ help
+ This option is necessary, if you want your device' LEDs to blink
+
+ Say Y, unless you need the LEDs for firmware debugging.
+
+config CARL9170_DEBUGFS
+ bool "DebugFS Support"
+ depends on CARL9170 && DEBUG_FS && MAC80211_DEBUGFS
+ default n
+ help
+ Export several driver and device internals to user space.
+
+ Say N.
+
+config CARL9170_WPC
+ bool
+ depends on CARL9170 && (INPUT = y || INPUT = CARL9170)
+ default y
+
+config CARL9170_HWRNG
+ bool "Random number generator"
+ depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
+ default n
+ help
+ Provides a hardware random number generator to the kernel.
+
+ SECURITY WARNING: It's relatively easy to eavesdrop all
+ generated random numbers from the transport stream with
+ usbmon [software] or special usb sniffer hardware.
+
+ Say N, unless your setup[i.e.: embedded system] has no
+ other rng source and you can afford to take the risk.
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
new file mode 100644
index 000000000..f64ed76af
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -0,0 +1,4 @@
+carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
+carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
+
+obj-$(CONFIG_CARL9170) += carl9170.o
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
new file mode 100644
index 000000000..237d0cda1
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -0,0 +1,670 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * Driver specific definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CARL9170_H
+#define __CARL9170_H
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/hw_random.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <linux/usb.h>
+#ifdef CONFIG_CARL9170_LEDS
+#include <linux/leds.h>
+#endif /* CONFIG_CARL9170_LEDS */
+#ifdef CONFIG_CARL9170_WPC
+#include <linux/input.h>
+#endif /* CONFIG_CARL9170_WPC */
+#include "eeprom.h"
+#include "wlan.h"
+#include "hw.h"
+#include "fwdesc.h"
+#include "fwcmd.h"
+#include "../regd.h"
+
+#ifdef CONFIG_CARL9170_DEBUGFS
+#include "debug.h"
+#endif /* CONFIG_CARL9170_DEBUGFS */
+
+#define CARL9170FW_NAME "carl9170-1.fw"
+
+#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
+
+static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 };
+
+#define CARL9170_MAX_RX_BUFFER_SIZE 8192
+
+enum carl9170_device_state {
+ CARL9170_UNKNOWN_STATE,
+ CARL9170_STOPPED,
+ CARL9170_IDLE,
+ CARL9170_STARTED,
+};
+
+#define WME_BA_BMP_SIZE 64
+#define CARL9170_TX_USER_RATE_TRIES 3
+
+#define TID_TO_WME_AC(_tid) \
+ ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
+
+#define SEQ_DIFF(_start, _seq) \
+ (((_start) - (_seq)) & 0x0fff)
+#define SEQ_PREV(_seq) \
+ (((_seq) - 1) & 0x0fff)
+#define SEQ_NEXT(_seq) \
+ (((_seq) + 1) & 0x0fff)
+#define BAW_WITHIN(_start, _bawsz, _seqno) \
+ ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
+
+enum carl9170_tid_state {
+ CARL9170_TID_STATE_INVALID,
+ CARL9170_TID_STATE_KILLED,
+ CARL9170_TID_STATE_SHUTDOWN,
+ CARL9170_TID_STATE_SUSPEND,
+ CARL9170_TID_STATE_PROGRESS,
+ CARL9170_TID_STATE_IDLE,
+ CARL9170_TID_STATE_XMIT,
+};
+
+#define CARL9170_BAW_BITS (2 * WME_BA_BMP_SIZE)
+#define CARL9170_BAW_SIZE (BITS_TO_LONGS(CARL9170_BAW_BITS))
+#define CARL9170_BAW_LEN (DIV_ROUND_UP(CARL9170_BAW_BITS, BITS_PER_BYTE))
+
+struct carl9170_sta_tid {
+ /* must be the first entry! */
+ struct list_head list;
+
+ /* temporary list for RCU unlink procedure */
+ struct list_head tmp_list;
+
+ /* lock for the following data structures */
+ spinlock_t lock;
+
+ unsigned int counter;
+ enum carl9170_tid_state state;
+ u8 tid; /* TID number ( 0 - 15 ) */
+ u16 max; /* max. AMPDU size */
+
+ u16 snx; /* awaiting _next_ frame */
+ u16 hsn; /* highest _queued_ sequence */
+ u16 bsn; /* base of the tx/agg bitmap */
+ unsigned long bitmap[CARL9170_BAW_SIZE];
+
+ /* Preaggregation reorder queue */
+ struct sk_buff_head queue;
+
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+};
+
+#define CARL9170_QUEUE_TIMEOUT 256
+#define CARL9170_BUMP_QUEUE 1000
+#define CARL9170_TX_TIMEOUT 2500
+#define CARL9170_JANITOR_DELAY 128
+#define CARL9170_QUEUE_STUCK_TIMEOUT 5500
+#define CARL9170_STAT_WORK 30000
+
+#define CARL9170_NUM_TX_AGG_MAX 30
+
+/*
+ * Tradeoff between stability/latency and speed.
+ *
+ * AR9170_TXQ_DEPTH is devised by dividing the amount of available
+ * tx buffers with the size of a full ethernet frame + overhead.
+ *
+ * Naturally: The higher the limit, the faster the device CAN send.
+ * However, even a slight over-commitment at the wrong time and the
+ * hardware is doomed to send all already-queued frames at suboptimal
+ * rates. This in turn leads to an enormous amount of unsuccessful
+ * retries => Latency goes up, whereas the throughput goes down. CRASH!
+ */
+#define CARL9170_NUM_TX_LIMIT_HARD ((AR9170_TXQ_DEPTH * 3) / 2)
+#define CARL9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH)
+
+struct carl9170_tx_queue_stats {
+ unsigned int count;
+ unsigned int limit;
+ unsigned int len;
+};
+
+struct carl9170_vif {
+ unsigned int id;
+ struct ieee80211_vif __rcu *vif;
+};
+
+struct carl9170_vif_info {
+ struct list_head list;
+ bool active;
+ unsigned int id;
+ struct sk_buff *beacon;
+ bool enable_beacon;
+};
+
+#define AR9170_NUM_RX_URBS 16
+#define AR9170_NUM_RX_URBS_MUL 2
+#define AR9170_NUM_TX_URBS 8
+#define AR9170_NUM_RX_URBS_POOL (AR9170_NUM_RX_URBS_MUL * AR9170_NUM_RX_URBS)
+
+enum carl9170_device_features {
+ CARL9170_WPS_BUTTON = BIT(0),
+ CARL9170_ONE_LED = BIT(1),
+};
+
+#ifdef CONFIG_CARL9170_LEDS
+struct ar9170;
+
+struct carl9170_led {
+ struct ar9170 *ar;
+ struct led_classdev l;
+ char name[32];
+ unsigned int toggled;
+ bool last_state;
+ bool registered;
+};
+#endif /* CONFIG_CARL9170_LEDS */
+
+enum carl9170_restart_reasons {
+ CARL9170_RR_NO_REASON = 0,
+ CARL9170_RR_FATAL_FIRMWARE_ERROR,
+ CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
+ CARL9170_RR_WATCHDOG,
+ CARL9170_RR_STUCK_TX,
+ CARL9170_RR_UNRESPONSIVE_DEVICE,
+ CARL9170_RR_COMMAND_TIMEOUT,
+ CARL9170_RR_TOO_MANY_PHY_ERRORS,
+ CARL9170_RR_LOST_RSP,
+ CARL9170_RR_INVALID_RSP,
+ CARL9170_RR_USER_REQUEST,
+
+ __CARL9170_RR_LAST,
+};
+
+enum carl9170_erp_modes {
+ CARL9170_ERP_INVALID,
+ CARL9170_ERP_AUTO,
+ CARL9170_ERP_MAC80211,
+ CARL9170_ERP_OFF,
+ CARL9170_ERP_CTS,
+ CARL9170_ERP_RTS,
+ __CARL9170_ERP_NUM,
+};
+
+struct ar9170 {
+ struct ath_common common;
+ struct ieee80211_hw *hw;
+ struct mutex mutex;
+ enum carl9170_device_state state;
+ spinlock_t state_lock;
+ enum carl9170_restart_reasons last_reason;
+ bool registered;
+
+ /* USB */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct usb_anchor rx_anch;
+ struct usb_anchor rx_work;
+ struct usb_anchor rx_pool;
+ struct usb_anchor tx_wait;
+ struct usb_anchor tx_anch;
+ struct usb_anchor tx_cmd;
+ struct usb_anchor tx_err;
+ struct tasklet_struct usb_tasklet;
+ atomic_t tx_cmd_urbs;
+ atomic_t tx_anch_urbs;
+ atomic_t rx_anch_urbs;
+ atomic_t rx_work_urbs;
+ atomic_t rx_pool_urbs;
+ kernel_ulong_t features;
+ bool usb_ep_cmd_is_bulk;
+
+ /* firmware settings */
+ struct completion fw_load_wait;
+ struct completion fw_boot_wait;
+ struct {
+ const struct carl9170fw_desc_head *desc;
+ const struct firmware *fw;
+ unsigned int offset;
+ unsigned int address;
+ unsigned int cmd_bufs;
+ unsigned int api_version;
+ unsigned int vif_num;
+ unsigned int err_counter;
+ unsigned int bug_counter;
+ u32 beacon_addr;
+ unsigned int beacon_max_len;
+ bool rx_stream;
+ bool tx_stream;
+ bool rx_filter;
+ bool hw_counters;
+ unsigned int mem_blocks;
+ unsigned int mem_block_size;
+ unsigned int rx_size;
+ unsigned int tx_seq_table;
+ bool ba_filter;
+ bool disable_offload_fw;
+ } fw;
+
+ /* interface configuration combinations */
+ struct ieee80211_iface_limit if_comb_limits[1];
+ struct ieee80211_iface_combination if_combs[1];
+
+ /* reset / stuck frames/queue detection */
+ struct work_struct restart_work;
+ struct work_struct ping_work;
+ unsigned int restart_counter;
+ unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
+ unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
+ bool needs_full_reset;
+ bool force_usb_reset;
+ atomic_t pending_restarts;
+
+ /* interface mode settings */
+ struct list_head vif_list;
+ unsigned long vif_bitmap;
+ unsigned int vifs;
+ struct carl9170_vif vif_priv[AR9170_MAX_VIRTUAL_MAC];
+
+ /* beaconing */
+ spinlock_t beacon_lock;
+ unsigned int global_pretbtt;
+ unsigned int global_beacon_int;
+ struct carl9170_vif_info __rcu *beacon_iter;
+ unsigned int beacon_enabled;
+
+ /* cryptographic engine */
+ u64 usedkeys;
+ bool rx_software_decryption;
+ bool disable_offload;
+
+ /* filter settings */
+ u64 cur_mc_hash;
+ u32 cur_filter;
+ unsigned int filter_state;
+ unsigned int rx_filter_caps;
+ bool sniffer_enabled;
+
+ /* MAC */
+ enum carl9170_erp_modes erp_mode;
+
+ /* PHY */
+ struct ieee80211_channel *channel;
+ unsigned int num_channels;
+ int noise[4];
+ unsigned int chan_fail;
+ unsigned int total_chan_fail;
+ u8 heavy_clip;
+ u8 ht_settings;
+ struct {
+ u64 active; /* usec */
+ u64 cca; /* usec */
+ u64 tx_time; /* usec */
+ u64 rx_total;
+ u64 rx_overrun;
+ } tally;
+ struct delayed_work stat_work;
+ struct survey_info *survey;
+
+ /* power calibration data */
+ u8 power_5G_leg[4];
+ u8 power_2G_cck[4];
+ u8 power_2G_ofdm[4];
+ u8 power_5G_ht20[8];
+ u8 power_5G_ht40[8];
+ u8 power_2G_ht20[8];
+ u8 power_2G_ht40[8];
+
+#ifdef CONFIG_CARL9170_LEDS
+ /* LED */
+ struct delayed_work led_work;
+ struct carl9170_led leds[AR9170_NUM_LEDS];
+#endif /* CONFIG_CARL9170_LEDS */
+
+ /* qos queue settings */
+ spinlock_t tx_stats_lock;
+ struct carl9170_tx_queue_stats tx_stats[__AR9170_NUM_TXQ];
+ struct ieee80211_tx_queue_params edcf[5];
+ struct completion tx_flush;
+
+ /* CMD */
+ int cmd_seq;
+ int readlen;
+ u8 *readbuf;
+ spinlock_t cmd_lock;
+ struct completion cmd_wait;
+ union {
+ __le32 cmd_buf[PAYLOAD_MAX + 1];
+ struct carl9170_cmd cmd;
+ struct carl9170_rsp rsp;
+ };
+
+ /* statistics */
+ unsigned int tx_dropped;
+ unsigned int tx_ack_failures;
+ unsigned int tx_fcs_errors;
+ unsigned int rx_dropped;
+
+ /* EEPROM */
+ struct ar9170_eeprom eeprom;
+
+ /* tx queuing */
+ struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
+ struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
+ struct delayed_work tx_janitor;
+ unsigned long tx_janitor_last_run;
+ bool tx_schedule;
+
+ /* tx ampdu */
+ struct work_struct ampdu_work;
+ spinlock_t tx_ampdu_list_lock;
+ struct carl9170_sta_tid __rcu *tx_ampdu_iter;
+ struct list_head tx_ampdu_list;
+ atomic_t tx_ampdu_upload;
+ atomic_t tx_ampdu_scheduler;
+ atomic_t tx_total_pending;
+ atomic_t tx_total_queued;
+ unsigned int tx_ampdu_list_len;
+ int current_density;
+ int current_factor;
+ bool tx_ampdu_schedule;
+
+ /* internal memory management */
+ spinlock_t mem_lock;
+ unsigned long *mem_bitmap;
+ atomic_t mem_free_blocks;
+ atomic_t mem_allocs;
+
+ /* rxstream mpdu merge */
+ struct ar9170_rx_head rx_plcp;
+ bool rx_has_plcp;
+ struct sk_buff *rx_failover;
+ int rx_failover_missing;
+ u32 ampdu_ref;
+
+ /* FIFO for collecting outstanding BlockAckRequest */
+ struct list_head bar_list[__AR9170_NUM_TXQ];
+ spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
+
+#ifdef CONFIG_CARL9170_WPC
+ struct {
+ bool pbc_state;
+ struct input_dev *pbc;
+ char name[32];
+ char phys[32];
+ } wps;
+#endif /* CONFIG_CARL9170_WPC */
+
+#ifdef CONFIG_CARL9170_DEBUGFS
+ struct carl9170_debug debug;
+ struct dentry *debug_dir;
+#endif /* CONFIG_CARL9170_DEBUGFS */
+
+ /* PSM */
+ struct work_struct ps_work;
+ struct {
+ unsigned int dtim_counter;
+ unsigned long last_beacon;
+ unsigned long last_action;
+ unsigned long last_slept;
+ unsigned int sleep_ms;
+ unsigned int off_override;
+ bool state;
+ } ps;
+
+#ifdef CONFIG_CARL9170_HWRNG
+# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
+ struct {
+ struct hwrng rng;
+ bool initialized;
+ char name[30 + 1];
+ u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
+ unsigned int cache_idx;
+ } rng;
+#endif /* CONFIG_CARL9170_HWRNG */
+};
+
+enum carl9170_ps_off_override_reasons {
+ PS_OFF_VIF = BIT(0),
+ PS_OFF_BCN = BIT(1),
+};
+
+struct carl9170_bar_list_entry {
+ struct list_head list;
+ struct rcu_head head;
+ struct sk_buff *skb;
+};
+
+struct carl9170_ba_stats {
+ u8 ampdu_len;
+ u8 ampdu_ack_len;
+ bool clear;
+ bool req;
+};
+
+struct carl9170_sta_info {
+ bool ht_sta;
+ bool sleeping;
+ atomic_t pending_frames;
+ unsigned int ampdu_max_len;
+ struct carl9170_sta_tid __rcu *agg[IEEE80211_NUM_TIDS];
+ struct carl9170_ba_stats stats[IEEE80211_NUM_TIDS];
+};
+
+struct carl9170_tx_info {
+ unsigned long timeout;
+ struct ar9170 *ar;
+ struct kref ref;
+};
+
+#define CHK_DEV_STATE(a, s) (((struct ar9170 *)a)->state >= (s))
+#define IS_INITIALIZED(a) (CHK_DEV_STATE(a, CARL9170_STOPPED))
+#define IS_ACCEPTING_CMD(a) (CHK_DEV_STATE(a, CARL9170_IDLE))
+#define IS_STARTED(a) (CHK_DEV_STATE(a, CARL9170_STARTED))
+
+static inline void __carl9170_set_state(struct ar9170 *ar,
+ enum carl9170_device_state newstate)
+{
+ ar->state = newstate;
+}
+
+static inline void carl9170_set_state(struct ar9170 *ar,
+ enum carl9170_device_state newstate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar->state_lock, flags);
+ __carl9170_set_state(ar, newstate);
+ spin_unlock_irqrestore(&ar->state_lock, flags);
+}
+
+static inline void carl9170_set_state_when(struct ar9170 *ar,
+ enum carl9170_device_state min, enum carl9170_device_state newstate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar->state_lock, flags);
+ if (CHK_DEV_STATE(ar, min))
+ __carl9170_set_state(ar, newstate);
+ spin_unlock_irqrestore(&ar->state_lock, flags);
+}
+
+/* exported interface */
+void *carl9170_alloc(size_t priv_size);
+int carl9170_register(struct ar9170 *ar);
+void carl9170_unregister(struct ar9170 *ar);
+void carl9170_free(struct ar9170 *ar);
+void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r);
+void carl9170_ps_check(struct ar9170 *ar);
+
+/* USB back-end */
+int carl9170_usb_open(struct ar9170 *ar);
+void carl9170_usb_stop(struct ar9170 *ar);
+void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb);
+void carl9170_usb_handle_tx_err(struct ar9170 *ar);
+int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids,
+ u32 plen, void *payload, u32 rlen, void *resp);
+int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
+ const bool free_buf);
+int carl9170_usb_restart(struct ar9170 *ar);
+void carl9170_usb_reset(struct ar9170 *ar);
+
+/* MAC */
+int carl9170_init_mac(struct ar9170 *ar);
+int carl9170_set_qos(struct ar9170 *ar);
+int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
+int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
+ const u8 *mac);
+int carl9170_set_operating_mode(struct ar9170 *ar);
+int carl9170_set_beacon_timers(struct ar9170 *ar);
+int carl9170_set_dyn_sifs_ack(struct ar9170 *ar);
+int carl9170_set_rts_cts_rate(struct ar9170 *ar);
+int carl9170_set_ampdu_settings(struct ar9170 *ar);
+int carl9170_set_slot_time(struct ar9170 *ar);
+int carl9170_set_mac_rates(struct ar9170 *ar);
+int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
+int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
+ const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
+int carl9170_disable_key(struct ar9170 *ar, const u8 id);
+int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel);
+
+/* RX */
+void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
+void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
+
+/* TX */
+void carl9170_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void carl9170_tx_janitor(struct work_struct *work);
+void carl9170_tx_process_status(struct ar9170 *ar,
+ const struct carl9170_rsp *cmd);
+void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
+ const bool success);
+void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
+void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
+void carl9170_tx_scheduler(struct ar9170 *ar);
+void carl9170_tx_get_skb(struct sk_buff *skb);
+int carl9170_tx_put_skb(struct sk_buff *skb);
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
+
+/* LEDs */
+#ifdef CONFIG_CARL9170_LEDS
+int carl9170_led_register(struct ar9170 *ar);
+void carl9170_led_unregister(struct ar9170 *ar);
+#endif /* CONFIG_CARL9170_LEDS */
+int carl9170_led_init(struct ar9170 *ar);
+int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state);
+
+/* PHY / RF */
+int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
+ enum nl80211_channel_type bw);
+int carl9170_get_noisefloor(struct ar9170 *ar);
+
+/* FW */
+int carl9170_parse_firmware(struct ar9170 *ar);
+
+extern struct ieee80211_rate __carl9170_ratetable[];
+extern int modparam_noht;
+
+static inline struct ar9170 *carl9170_get_priv(struct carl9170_vif *carl_vif)
+{
+ return container_of(carl_vif, struct ar9170,
+ vif_priv[carl_vif->id]);
+}
+
+static inline struct ieee80211_hdr *carl9170_get_hdr(struct sk_buff *skb)
+{
+ return (void *)((struct _carl9170_tx_superframe *)
+ skb->data)->frame_data;
+}
+
+static inline u16 get_seq_h(struct ieee80211_hdr *hdr)
+{
+ return le16_to_cpu(hdr->seq_ctrl) >> 4;
+}
+
+static inline u16 carl9170_get_seq(struct sk_buff *skb)
+{
+ return get_seq_h(carl9170_get_hdr(skb));
+}
+
+static inline u16 get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static inline u16 carl9170_get_tid(struct sk_buff *skb)
+{
+ return get_tid_h(carl9170_get_hdr(skb));
+}
+
+static inline struct ieee80211_vif *
+carl9170_get_vif(struct carl9170_vif_info *priv)
+{
+ return container_of((void *)priv, struct ieee80211_vif, drv_priv);
+}
+
+/* Protected by ar->mutex or RCU */
+static inline struct ieee80211_vif *carl9170_get_main_vif(struct ar9170 *ar)
+{
+ struct carl9170_vif_info *cvif;
+
+ list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
+ if (cvif->active)
+ return carl9170_get_vif(cvif);
+ }
+
+ return NULL;
+}
+
+static inline bool is_main_vif(struct ar9170 *ar, struct ieee80211_vif *vif)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = (carl9170_get_main_vif(ar) == vif);
+ rcu_read_unlock();
+ return ret;
+}
+
+#endif /* __CARL9170_H */
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
new file mode 100644
index 000000000..f2b4f537e
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -0,0 +1,222 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * Basic HW register/memory/command access functions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/div64.h>
+#include "carl9170.h"
+#include "cmd.h"
+
+int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
+{
+ const __le32 buf[2] = {
+ cpu_to_le32(reg),
+ cpu_to_le32(val),
+ };
+ int err;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf),
+ (u8 *) buf, 0, NULL);
+ if (err) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "writing reg %#x "
+ "(val %#x) failed (%d)\n", reg, val, err);
+ }
+ }
+ return err;
+}
+
+int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
+ const u32 *regs, u32 *out)
+{
+ int i, err;
+ __le32 *offs, *res;
+
+ /* abuse "out" for the register offsets, must be same length */
+ offs = (__le32 *)out;
+ for (i = 0; i < nregs; i++)
+ offs[i] = cpu_to_le32(regs[i]);
+
+ /* also use the same buffer for the input */
+ res = (__le32 *)out;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
+ 4 * nregs, (u8 *)offs,
+ 4 * nregs, (u8 *)res);
+ if (err) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n",
+ err);
+ }
+ return err;
+ }
+
+ /* convert result to cpu endian */
+ for (i = 0; i < nregs; i++)
+ out[i] = le32_to_cpu(res[i]);
+
+ return 0;
+}
+
+int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
+{
+ return carl9170_read_mreg(ar, 1, &reg, val);
+}
+
+int carl9170_echo_test(struct ar9170 *ar, const u32 v)
+{
+ u32 echores;
+ int err;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO,
+ 4, (u8 *)&v,
+ 4, (u8 *)&echores);
+ if (err)
+ return err;
+
+ if (v != echores) {
+ wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
+ const enum carl9170_cmd_oids cmd, const unsigned int len)
+{
+ struct carl9170_cmd *tmp;
+
+ tmp = kzalloc(sizeof(struct carl9170_cmd_head) + len, GFP_ATOMIC);
+ if (tmp) {
+ tmp->hdr.cmd = cmd;
+ tmp->hdr.len = len;
+ }
+
+ return tmp;
+}
+
+int carl9170_reboot(struct ar9170 *ar)
+{
+ struct carl9170_cmd *cmd;
+ int err;
+
+ cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = __carl9170_exec_cmd(ar, cmd, true);
+ return err;
+}
+
+int carl9170_mac_reset(struct ar9170 *ar)
+{
+ return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST,
+ 0, NULL, 0, NULL);
+}
+
+int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
+ const u32 mode, const u32 addr, const u32 len)
+{
+ struct carl9170_cmd *cmd;
+
+ cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC,
+ sizeof(struct carl9170_bcn_ctrl_cmd));
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->bcn_ctrl.vif_id = cpu_to_le32(vif_id);
+ cmd->bcn_ctrl.mode = cpu_to_le32(mode);
+ cmd->bcn_ctrl.bcn_addr = cpu_to_le32(addr);
+ cmd->bcn_ctrl.bcn_len = cpu_to_le32(len);
+
+ return __carl9170_exec_cmd(ar, cmd, true);
+}
+
+int carl9170_collect_tally(struct ar9170 *ar)
+{
+ struct carl9170_tally_rsp tally;
+ struct survey_info *info;
+ unsigned int tick;
+ int err;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_TALLY, 0, NULL,
+ sizeof(tally), (u8 *)&tally);
+ if (err)
+ return err;
+
+ tick = le32_to_cpu(tally.tick);
+ if (tick) {
+ ar->tally.active += le32_to_cpu(tally.active) / tick;
+ ar->tally.cca += le32_to_cpu(tally.cca) / tick;
+ ar->tally.tx_time += le32_to_cpu(tally.tx_time) / tick;
+ ar->tally.rx_total += le32_to_cpu(tally.rx_total);
+ ar->tally.rx_overrun += le32_to_cpu(tally.rx_overrun);
+
+ if (ar->channel) {
+ info = &ar->survey[ar->channel->hw_value];
+ info->time = ar->tally.active;
+ info->time_busy = ar->tally.cca;
+ info->time_tx = ar->tally.tx_time;
+ do_div(info->time, 1000);
+ do_div(info->time_busy, 1000);
+ do_div(info->time_tx, 1000);
+ }
+ }
+ return 0;
+}
+
+int carl9170_powersave(struct ar9170 *ar, const bool ps)
+{
+ struct carl9170_cmd *cmd;
+ u32 state;
+
+ cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC,
+ sizeof(struct carl9170_psm));
+ if (!cmd)
+ return -ENOMEM;
+
+ if (ps) {
+ /* Sleep until next TBTT */
+ state = CARL9170_PSM_SLEEP | 1;
+ } else {
+ /* wake up immediately */
+ state = 1;
+ }
+
+ cmd->psm.state = cpu_to_le32(state);
+ return __carl9170_exec_cmd(ar, cmd, true);
+}
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
new file mode 100644
index 000000000..65919c902
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -0,0 +1,174 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * Basic HW register/memory/command access functions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CMD_H
+#define __CMD_H
+
+#include "carl9170.h"
+
+/* basic HW access */
+int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
+int carl9170_read_reg(struct ar9170 *ar, const u32 reg, u32 *val);
+int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
+ const u32 *regs, u32 *out);
+int carl9170_echo_test(struct ar9170 *ar, u32 v);
+int carl9170_reboot(struct ar9170 *ar);
+int carl9170_mac_reset(struct ar9170 *ar);
+int carl9170_powersave(struct ar9170 *ar, const bool power_on);
+int carl9170_collect_tally(struct ar9170 *ar);
+int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
+ const u32 mode, const u32 addr, const u32 len);
+
+static inline int carl9170_flush_cab(struct ar9170 *ar,
+ const unsigned int vif_id)
+{
+ return carl9170_bcn_ctrl(ar, vif_id, CARL9170_BCN_CTRL_DRAIN, 0, 0);
+}
+
+static inline int carl9170_rx_filter(struct ar9170 *ar,
+ const unsigned int _rx_filter)
+{
+ __le32 rx_filter = cpu_to_le32(_rx_filter);
+
+ return carl9170_exec_cmd(ar, CARL9170_CMD_RX_FILTER,
+ sizeof(rx_filter), (u8 *)&rx_filter,
+ 0, NULL);
+}
+
+struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
+ const enum carl9170_cmd_oids cmd, const unsigned int len);
+
+/*
+ * Macros to facilitate writing multiple registers in a single
+ * write-combining USB command. Note that when the first group
+ * fails the whole thing will fail without any others attempted,
+ * but you won't know which write in the group failed.
+ */
+#define carl9170_regwrite_begin(ar) \
+do { \
+ int __nreg = 0, __err = 0; \
+ struct ar9170 *__ar = ar;
+
+#define carl9170_regwrite(r, v) do { \
+ __ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
+ __ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
+ __nreg++; \
+ if ((__nreg >= PAYLOAD_MAX / 2)) { \
+ if (IS_ACCEPTING_CMD(__ar)) \
+ __err = carl9170_exec_cmd(__ar, \
+ CARL9170_CMD_WREG, 8 * __nreg, \
+ (u8 *) &__ar->cmd_buf[1], 0, NULL); \
+ else \
+ goto __regwrite_out; \
+ \
+ __nreg = 0; \
+ if (__err) \
+ goto __regwrite_out; \
+ } \
+} while (0)
+
+#define carl9170_regwrite_finish() \
+__regwrite_out : \
+ if (__err == 0 && __nreg) { \
+ if (IS_ACCEPTING_CMD(__ar)) \
+ __err = carl9170_exec_cmd(__ar, \
+ CARL9170_CMD_WREG, 8 * __nreg, \
+ (u8 *) &__ar->cmd_buf[1], 0, NULL); \
+ __nreg = 0; \
+ }
+
+#define carl9170_regwrite_result() \
+ __err; \
+} while (0)
+
+
+#define carl9170_async_regwrite_get_buf() \
+do { \
+ __nreg = 0; \
+ __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
+ CARL9170_MAX_CMD_PAYLOAD_LEN); \
+ if (__cmd == NULL) { \
+ __err = -ENOMEM; \
+ goto __async_regwrite_out; \
+ } \
+} while (0)
+
+#define carl9170_async_regwrite_begin(carl) \
+do { \
+ struct ar9170 *__carl = carl; \
+ struct carl9170_cmd *__cmd; \
+ unsigned int __nreg; \
+ int __err = 0; \
+ carl9170_async_regwrite_get_buf(); \
+
+#define carl9170_async_regwrite_flush() \
+do { \
+ if (__cmd == NULL || __nreg == 0) \
+ break; \
+ \
+ if (IS_ACCEPTING_CMD(__carl) && __nreg) { \
+ __cmd->hdr.len = 8 * __nreg; \
+ __err = __carl9170_exec_cmd(__carl, __cmd, true); \
+ __cmd = NULL; \
+ break; \
+ } \
+ goto __async_regwrite_out; \
+} while (0)
+
+#define carl9170_async_regwrite(r, v) do { \
+ if (__cmd == NULL) \
+ carl9170_async_regwrite_get_buf(); \
+ __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
+ __cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
+ __nreg++; \
+ if ((__nreg >= PAYLOAD_MAX / 2)) \
+ carl9170_async_regwrite_flush(); \
+} while (0)
+
+#define carl9170_async_regwrite_finish() do { \
+__async_regwrite_out: \
+ if (__cmd != NULL && __err == 0) \
+ carl9170_async_regwrite_flush(); \
+ kfree(__cmd); \
+} while (0) \
+
+#define carl9170_async_regwrite_result() \
+ __err; \
+} while (0)
+
+#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
new file mode 100644
index 000000000..6808db433
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -0,0 +1,884 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * debug(fs) probing
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2008-2009 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include "carl9170.h"
+#include "cmd.h"
+
+#define ADD(buf, off, max, fmt, args...) \
+ off += snprintf(&buf[off], max - off, fmt, ##args);
+
+
+struct carl9170_debugfs_fops {
+ unsigned int read_bufsize;
+ umode_t attr;
+ char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
+ ssize_t *len);
+ ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
+ const struct file_operations fops;
+
+ enum carl9170_device_state req_dev_state;
+};
+
+static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct carl9170_debugfs_fops *dfops;
+ struct ar9170 *ar;
+ char *buf = NULL, *res_buf = NULL;
+ ssize_t ret = 0;
+ int err = 0;
+
+ if (!count)
+ return 0;
+
+ ar = file->private_data;
+
+ if (!ar)
+ return -ENODEV;
+ dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
+
+ if (!dfops->read)
+ return -ENOSYS;
+
+ if (dfops->read_bufsize) {
+ buf = vmalloc(dfops->read_bufsize);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ar->mutex);
+ if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
+ err = -ENODEV;
+ res_buf = buf;
+ goto out_free;
+ }
+
+ res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret);
+
+ if (ret > 0)
+ err = simple_read_from_buffer(userbuf, count, ppos,
+ res_buf, ret);
+ else
+ err = ret;
+
+ WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf));
+
+out_free:
+ vfree(res_buf);
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static ssize_t carl9170_debugfs_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct carl9170_debugfs_fops *dfops;
+ struct ar9170 *ar;
+ char *buf = NULL;
+ int err = 0;
+
+ if (!count)
+ return 0;
+
+ if (count > PAGE_SIZE)
+ return -E2BIG;
+
+ ar = file->private_data;
+
+ if (!ar)
+ return -ENODEV;
+ dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
+
+ if (!dfops->write)
+ return -ENOSYS;
+
+ buf = vmalloc(count);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, userbuf, count)) {
+ err = -EFAULT;
+ goto out_free;
+ }
+
+ if (mutex_trylock(&ar->mutex) == 0) {
+ err = -EAGAIN;
+ goto out_free;
+ }
+
+ if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ err = dfops->write(ar, buf, count);
+ if (err)
+ goto out_unlock;
+
+out_unlock:
+ mutex_unlock(&ar->mutex);
+
+out_free:
+ vfree(buf);
+ return err;
+}
+
+#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
+ _attr, _dstate) \
+static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
+ .read_bufsize = _read_bufsize, \
+ .read = _read, \
+ .write = _write, \
+ .attr = _attr, \
+ .req_dev_state = _dstate, \
+ .fops = { \
+ .open = simple_open, \
+ .read = carl9170_debugfs_read, \
+ .write = carl9170_debugfs_write, \
+ .owner = THIS_MODULE \
+ }, \
+}
+
+#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
+ __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
+ _attr, CARL9170_STARTED) \
+
+#define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \
+ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
+ NULL, _read_bufsize, S_IRUSR)
+
+#define DEBUGFS_DECLARE_WO_FILE(name) \
+ DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\
+ 0, S_IWUSR)
+
+#define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \
+ DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
+ carl9170_debugfs_##name ##_write, \
+ _read_bufsize, S_IRUSR | S_IWUSR)
+
+#define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \
+ __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
+ carl9170_debugfs_##name ##_write, \
+ _read_bufsize, S_IRUSR | S_IWUSR, _dstate)
+
+#define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \
+static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \
+ char *buf, size_t buf_size,\
+ ssize_t *len) \
+{ \
+ ADD(buf, *len, buf_size, fmt "\n", ##value); \
+ return buf; \
+} \
+DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize)
+
+static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ spin_lock_bh(&ar->mem_lock);
+
+ ADD(buf, *len, bufsize, "jar: [%*pb]\n",
+ ar->fw.mem_blocks, ar->mem_bitmap);
+
+ ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
+ bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
+ ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
+
+ ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
+ atomic_read(&ar->mem_free_blocks),
+ (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024,
+ (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024);
+
+ spin_unlock_bh(&ar->mem_lock);
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(mem_usage, 512);
+
+static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" :
+ "Software");
+
+ ADD(buf, *len, bufsize, "[ VO VI "
+ " BE BK ]\n");
+
+ spin_lock_bh(&ar->tx_stats_lock);
+ ADD(buf, *len, bufsize, "[length/limit length/limit "
+ "length/limit length/limit ]\n"
+ "[ %3d/%3d %3d/%3d "
+ " %3d/%3d %3d/%3d ]\n\n",
+ ar->tx_stats[0].len, ar->tx_stats[0].limit,
+ ar->tx_stats[1].len, ar->tx_stats[1].limit,
+ ar->tx_stats[2].len, ar->tx_stats[2].limit,
+ ar->tx_stats[3].len, ar->tx_stats[3].limit);
+
+ ADD(buf, *len, bufsize, "[ total total "
+ " total total ]\n"
+ "[%10d %10d %10d %10d ]\n\n",
+ ar->tx_stats[0].count, ar->tx_stats[1].count,
+ ar->tx_stats[2].count, ar->tx_stats[3].count);
+
+ spin_unlock_bh(&ar->tx_stats_lock);
+
+ ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx "
+ " pend/waittx pend/waittx]\n"
+ "[ %3d/%3d %3d/%3d "
+ " %3d/%3d %3d/%3d ]\n\n",
+ skb_queue_len(&ar->tx_pending[0]),
+ skb_queue_len(&ar->tx_status[0]),
+ skb_queue_len(&ar->tx_pending[1]),
+ skb_queue_len(&ar->tx_status[1]),
+ skb_queue_len(&ar->tx_pending[2]),
+ skb_queue_len(&ar->tx_status[2]),
+ skb_queue_len(&ar->tx_pending[3]),
+ skb_queue_len(&ar->tx_status[3]));
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(qos_stat, 512);
+
+static void carl9170_debugfs_format_frame(struct ar9170 *ar,
+ struct sk_buff *skb, const char *prefix, char *buf,
+ ssize_t *off, ssize_t bufsize)
+{
+ struct _carl9170_tx_superframe *txc = (void *) skb->data;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
+ struct ieee80211_hdr *hdr = (void *) txc->frame_data;
+
+ ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, "
+ "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie,
+ ieee80211_get_DA(hdr), get_seq_h(hdr),
+ le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control),
+ jiffies_to_msecs(jiffies - arinfo->timeout));
+}
+
+
+static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ struct carl9170_sta_tid *iter;
+ struct sk_buff *skb;
+ int cnt = 0, fc;
+ int offset;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
+
+ spin_lock_bh(&iter->lock);
+ ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, "
+ "SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n",
+ cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
+ iter->max, iter->state, iter->counter);
+
+ ADD(buf, *len, bufsize, "\tWindow: [%*pb,W]\n",
+ CARL9170_BAW_BITS, iter->bitmap);
+
+#define BM_STR_OFF(offset) \
+ ((CARL9170_BAW_BITS - (offset) - 1) / 4 + \
+ (CARL9170_BAW_BITS - (offset) - 1) / 32 + 1)
+
+ offset = BM_STR_OFF(0);
+ ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T");
+
+ offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
+ ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W");
+
+ offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
+ CARL9170_BAW_BITS);
+ ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N");
+
+ ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: "
+ " currently queued:%d\n", skb_queue_len(&iter->queue));
+
+ fc = 0;
+ skb_queue_walk(&iter->queue, skb) {
+ char prefix[32];
+
+ snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc);
+ carl9170_debugfs_format_frame(ar, skb, prefix, buf,
+ len, bufsize);
+
+ fc++;
+ }
+ spin_unlock_bh(&iter->lock);
+ cnt++;
+ }
+ rcu_read_unlock();
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000);
+
+static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf,
+ ssize_t *len, size_t bufsize, struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+ char prefix[16];
+ int fc = 0;
+
+ spin_lock_bh(&queue->lock);
+ skb_queue_walk(queue, skb) {
+ snprintf(prefix, sizeof(prefix), "%3d :", fc);
+ carl9170_debugfs_format_frame(ar, skb, prefix, buf,
+ len, bufsize);
+ fc++;
+ }
+ spin_unlock_bh(&queue->lock);
+}
+
+#define DEBUGFS_QUEUE_DUMP(q, qi) \
+static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
+ char *buf, size_t bufsize, ssize_t *len) \
+{ \
+ carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
+ return buf; \
+} \
+DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000);
+
+static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ?
+ "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM")));
+
+ ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms);
+ ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n",
+ jiffies_to_msecs(jiffies - ar->ps.last_action));
+ ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n",
+ jiffies_to_msecs(jiffies - ar->ps.last_slept));
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(sta_psm, 160);
+
+static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ int i;
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n",
+ i, ieee80211_queue_stopped(ar->hw, i) ?
+ jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0,
+ jiffies_to_msecs(ar->max_queue_stop_timeout[i]));
+
+ ar->max_queue_stop_timeout[i] = 0;
+ }
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180);
+
+static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ int err;
+
+ err = carl9170_get_noisefloor(ar);
+ if (err) {
+ *len = err;
+ return buf;
+ }
+
+ ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n",
+ ar->noise[0], ar->noise[2]);
+ ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n",
+ ar->noise[1], ar->noise[3]);
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(phy_noise, 180);
+
+static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *len)
+{
+ struct carl9170_vif_info *iter;
+ int i = 0;
+
+ ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n",
+ ar->vifs, ar->fw.vif_num);
+
+ ADD(buf, *len, bufsize, "VIF bitmap: [%*pb]\n",
+ ar->fw.vif_num, &ar->vif_bitmap);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &ar->vif_list, list) {
+ struct ieee80211_vif *vif = carl9170_get_vif(iter);
+ ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x "
+ " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ?
+ "Master" : " Slave"), iter->id, vif->type, vif->addr,
+ iter->enable_beacon ? "beaconing " : "");
+ i++;
+ }
+ rcu_read_unlock();
+
+ return buf;
+}
+DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000);
+
+#define UPDATE_COUNTER(ar, name) ({ \
+ u32 __tmp[ARRAY_SIZE(name##_regs)]; \
+ unsigned int __i, __err = -ENODEV; \
+ \
+ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
+ __tmp[__i] = name##_regs[__i].reg; \
+ ar->debug.stats.name##_counter[__i] = 0; \
+ } \
+ \
+ if (IS_STARTED(ar)) \
+ __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \
+ __tmp, ar->debug.stats.name##_counter); \
+ (__err); })
+
+#define TALLY_SUM_UP(ar, name) do { \
+ unsigned int __i; \
+ \
+ for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
+ ar->debug.stats.name##_sum[__i] += \
+ ar->debug.stats.name##_counter[__i]; \
+ } \
+} while (0)
+
+#define DEBUGFS_HW_TALLY_FILE(name, f) \
+static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
+ char *dum, size_t bufsize, ssize_t *ret) \
+{ \
+ char *buf; \
+ int i, max_len, err; \
+ \
+ max_len = ARRAY_SIZE(name##_regs) * 80; \
+ buf = vmalloc(max_len); \
+ if (!buf) \
+ return NULL; \
+ \
+ err = UPDATE_COUNTER(ar, name); \
+ if (err) { \
+ *ret = err; \
+ return buf; \
+ } \
+ \
+ TALLY_SUM_UP(ar, name); \
+ \
+ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
+ ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \
+ name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\
+ ar->debug.stats.name ##_counter[i]); \
+ } \
+ \
+ return buf; \
+} \
+DEBUGFS_DECLARE_RO_FILE(name, 0);
+
+#define DEBUGFS_HW_REG_FILE(name, f) \
+static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
+ char *dum, size_t bufsize, ssize_t *ret) \
+{ \
+ char *buf; \
+ int i, max_len, err; \
+ \
+ max_len = ARRAY_SIZE(name##_regs) * 80; \
+ buf = vmalloc(max_len); \
+ if (!buf) \
+ return NULL; \
+ \
+ err = UPDATE_COUNTER(ar, name); \
+ if (err) { \
+ *ret = err; \
+ return buf; \
+ } \
+ \
+ for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
+ ADD(buf, *ret, max_len, "%22s = %" f "\n", \
+ name##_regs[i].nreg, \
+ ar->debug.stats.name##_counter[i]); \
+ } \
+ \
+ return buf; \
+} \
+DEBUGFS_DECLARE_RO_FILE(name, 0);
+
+static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar,
+ const char *buf, size_t count)
+{
+ int err = 0, i, n = 0, max_len = 32, res;
+ unsigned int reg, tmp;
+
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -E2BIG;
+
+ res = sscanf(buf, "0x%X %d", &reg, &n);
+ if (res < 1) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (res == 1)
+ n = 1;
+
+ if (n > 15) {
+ err = -EMSGSIZE;
+ goto out;
+ }
+
+ if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ if (reg & 3) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < n; i++) {
+ err = carl9170_read_reg(ar, reg + (i << 2), &tmp);
+ if (err)
+ goto out;
+
+ ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2);
+ ar->debug.ring[ar->debug.ring_tail].value = tmp;
+ ar->debug.ring_tail++;
+ ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE;
+ }
+
+out:
+ return err ? err : count;
+}
+
+static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *ret)
+{
+ int i = 0;
+
+ while (ar->debug.ring_head != ar->debug.ring_tail) {
+ ADD(buf, *ret, bufsize, "%.8x = %.8x\n",
+ ar->debug.ring[ar->debug.ring_head].reg,
+ ar->debug.ring[ar->debug.ring_head].value);
+
+ ar->debug.ring_head++;
+ ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE;
+
+ if (i++ == 64)
+ break;
+ }
+ ar->debug.ring_head = ar->debug.ring_tail;
+ return buf;
+}
+DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40);
+
+static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf,
+ size_t count)
+{
+ int err;
+
+ if (count < 1)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case 'F':
+ ar->needs_full_reset = true;
+ break;
+
+ case 'R':
+ if (!IS_STARTED(ar)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ ar->needs_full_reset = false;
+ break;
+
+ case 'M':
+ err = carl9170_mac_reset(ar);
+ if (err < 0)
+ count = err;
+
+ goto out;
+
+ case 'P':
+ err = carl9170_set_channel(ar, ar->hw->conf.chandef.chan,
+ cfg80211_get_chandef_type(&ar->hw->conf.chandef));
+ if (err < 0)
+ count = err;
+
+ goto out;
+
+ default:
+ return -EINVAL;
+ }
+
+ carl9170_restart(ar, CARL9170_RR_USER_REQUEST);
+
+out:
+ return count;
+}
+
+static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *ret)
+{
+ ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, "
+ "[M]ac reset\n");
+ ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n",
+ ar->restart_counter, ar->last_reason);
+ ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n",
+ ar->total_chan_fail, ar->chan_fail);
+ ADD(buf, *ret, bufsize, "reported firmware errors:%d\n",
+ ar->fw.err_counter);
+ ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
+ ar->fw.bug_counter);
+ ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
+ atomic_read(&ar->pending_restarts));
+ return buf;
+}
+__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
+
+static const char *const erp_modes[] = {
+ [CARL9170_ERP_INVALID] = "INVALID",
+ [CARL9170_ERP_AUTO] = "Automatic",
+ [CARL9170_ERP_MAC80211] = "Set by MAC80211",
+ [CARL9170_ERP_OFF] = "Force Off",
+ [CARL9170_ERP_RTS] = "Force RTS",
+ [CARL9170_ERP_CTS] = "Force CTS"
+};
+
+static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf,
+ size_t bufsize, ssize_t *ret)
+{
+ ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode,
+ erp_modes[ar->erp_mode]);
+ return buf;
+}
+
+static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf,
+ size_t count)
+{
+ int res, val;
+
+ if (count < 1)
+ return -EINVAL;
+
+ res = sscanf(buf, "%d", &val);
+ if (res != 1)
+ return -EINVAL;
+
+ if (!((val > CARL9170_ERP_INVALID) &&
+ (val < __CARL9170_ERP_NUM)))
+ return -EINVAL;
+
+ ar->erp_mode = val;
+ return count;
+}
+
+DEBUGFS_DECLARE_RW_FILE(erp, 80);
+
+static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar,
+ const char *buf, size_t count)
+{
+ int err = 0, max_len = 22, res;
+ u32 reg, val;
+
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -E2BIG;
+
+ res = sscanf(buf, "0x%X 0x%X", &reg, &val);
+ if (res != 2) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (reg <= 0x100000 || reg >= 0x280000) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ if (reg & 3) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = carl9170_write_reg(ar, reg, val);
+ if (err)
+ goto out;
+
+out:
+ return err ? err : count;
+}
+DEBUGFS_DECLARE_WO_FILE(hw_iowrite32);
+
+DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u");
+DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u");
+DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u");
+DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x");
+DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x");
+DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x");
+DEBUGFS_QUEUE_DUMP(tx_status, 0);
+DEBUGFS_QUEUE_DUMP(tx_status, 1);
+DEBUGFS_QUEUE_DUMP(tx_status, 2);
+DEBUGFS_QUEUE_DUMP(tx_status, 3);
+DEBUGFS_QUEUE_DUMP(tx_pending, 0);
+DEBUGFS_QUEUE_DUMP(tx_pending, 1);
+DEBUGFS_QUEUE_DUMP(tx_pending, 2);
+DEBUGFS_QUEUE_DUMP(tx_pending, 3);
+DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d",
+ atomic_read(&ar->tx_anch_urbs));
+DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d",
+ atomic_read(&ar->rx_anch_urbs));
+DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d",
+ atomic_read(&ar->rx_work_urbs));
+DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
+ atomic_read(&ar->rx_pool_urbs));
+
+DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
+ atomic_read(&ar->tx_total_queued));
+DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
+ atomic_read(&ar->tx_ampdu_scheduler));
+
+DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
+ atomic_read(&ar->tx_total_pending));
+
+DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d",
+ ar->tx_ampdu_list_len);
+
+DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d",
+ atomic_read(&ar->tx_ampdu_upload));
+
+DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago",
+ jiffies_to_msecs(jiffies - ar->tx_janitor_last_run));
+
+DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped);
+
+DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped);
+
+DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled);
+DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d",
+ ar->rx_software_decryption);
+DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d",
+ ar->current_factor);
+DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d",
+ ar->current_density);
+
+DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int);
+DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt);
+
+void carl9170_debugfs_register(struct ar9170 *ar)
+{
+ ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME,
+ ar->hw->wiphy->debugfsdir);
+
+#define DEBUGFS_ADD(name) \
+ debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
+ ar->debug_dir, ar, \
+ &carl_debugfs_##name ## _ops.fops);
+
+ DEBUGFS_ADD(usb_tx_anch_urbs);
+ DEBUGFS_ADD(usb_rx_pool_urbs);
+ DEBUGFS_ADD(usb_rx_anch_urbs);
+ DEBUGFS_ADD(usb_rx_work_urbs);
+
+ DEBUGFS_ADD(tx_total_queued);
+ DEBUGFS_ADD(tx_total_pending);
+ DEBUGFS_ADD(tx_dropped);
+ DEBUGFS_ADD(tx_stuck);
+ DEBUGFS_ADD(tx_ampdu_upload);
+ DEBUGFS_ADD(tx_ampdu_scheduler);
+ DEBUGFS_ADD(tx_ampdu_list_len);
+
+ DEBUGFS_ADD(rx_dropped);
+ DEBUGFS_ADD(sniffer_enabled);
+ DEBUGFS_ADD(rx_software_decryption);
+
+ DEBUGFS_ADD(mem_usage);
+ DEBUGFS_ADD(qos_stat);
+ DEBUGFS_ADD(sta_psm);
+ DEBUGFS_ADD(ampdu_state);
+
+ DEBUGFS_ADD(hw_tx_tally);
+ DEBUGFS_ADD(hw_rx_tally);
+ DEBUGFS_ADD(hw_phy_errors);
+ DEBUGFS_ADD(phy_noise);
+
+ DEBUGFS_ADD(hw_wlan_queue);
+ DEBUGFS_ADD(hw_pta_queue);
+ DEBUGFS_ADD(hw_ampdu_info);
+
+ DEBUGFS_ADD(ampdu_density);
+ DEBUGFS_ADD(ampdu_factor);
+
+ DEBUGFS_ADD(tx_janitor_last_run);
+
+ DEBUGFS_ADD(tx_status_0);
+ DEBUGFS_ADD(tx_status_1);
+ DEBUGFS_ADD(tx_status_2);
+ DEBUGFS_ADD(tx_status_3);
+
+ DEBUGFS_ADD(tx_pending_0);
+ DEBUGFS_ADD(tx_pending_1);
+ DEBUGFS_ADD(tx_pending_2);
+ DEBUGFS_ADD(tx_pending_3);
+
+ DEBUGFS_ADD(hw_ioread32);
+ DEBUGFS_ADD(hw_iowrite32);
+ DEBUGFS_ADD(bug);
+
+ DEBUGFS_ADD(erp);
+
+ DEBUGFS_ADD(vif_dump);
+
+ DEBUGFS_ADD(beacon_int);
+ DEBUGFS_ADD(pretbtt);
+
+#undef DEBUGFS_ADD
+}
+
+void carl9170_debugfs_unregister(struct ar9170 *ar)
+{
+ debugfs_remove_recursive(ar->debug_dir);
+}
diff --git a/drivers/net/wireless/ath/carl9170/debug.h b/drivers/net/wireless/ath/carl9170/debug.h
new file mode 100644
index 000000000..ea4b97524
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.h
@@ -0,0 +1,134 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * debug header
+ *
+ * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __DEBUG_H
+#define __DEBUG_H
+
+#include "eeprom.h"
+#include "wlan.h"
+#include "hw.h"
+#include "fwdesc.h"
+#include "fwcmd.h"
+#include "../regd.h"
+
+struct hw_stat_reg_entry {
+ u32 reg;
+ char nreg[32];
+};
+
+#define STAT_MAC_REG(reg) \
+ { (AR9170_MAC_REG_##reg), #reg }
+
+#define STAT_PTA_REG(reg) \
+ { (AR9170_PTA_REG_##reg), #reg }
+
+#define STAT_USB_REG(reg) \
+ { (AR9170_USB_REG_##reg), #reg }
+
+static const struct hw_stat_reg_entry hw_rx_tally_regs[] = {
+ STAT_MAC_REG(RX_CRC32), STAT_MAC_REG(RX_CRC16),
+ STAT_MAC_REG(RX_TIMEOUT_COUNT), STAT_MAC_REG(RX_ERR_DECRYPTION_UNI),
+ STAT_MAC_REG(RX_ERR_DECRYPTION_MUL), STAT_MAC_REG(RX_MPDU),
+ STAT_MAC_REG(RX_DROPPED_MPDU), STAT_MAC_REG(RX_DEL_MPDU),
+};
+
+static const struct hw_stat_reg_entry hw_phy_errors_regs[] = {
+ STAT_MAC_REG(RX_PHY_MISC_ERROR), STAT_MAC_REG(RX_PHY_XR_ERROR),
+ STAT_MAC_REG(RX_PHY_OFDM_ERROR), STAT_MAC_REG(RX_PHY_CCK_ERROR),
+ STAT_MAC_REG(RX_PHY_HT_ERROR), STAT_MAC_REG(RX_PHY_TOTAL),
+};
+
+static const struct hw_stat_reg_entry hw_tx_tally_regs[] = {
+ STAT_MAC_REG(TX_TOTAL), STAT_MAC_REG(TX_UNDERRUN),
+ STAT_MAC_REG(TX_RETRY),
+};
+
+static const struct hw_stat_reg_entry hw_wlan_queue_regs[] = {
+ STAT_MAC_REG(DMA_STATUS), STAT_MAC_REG(DMA_TRIGGER),
+ STAT_MAC_REG(DMA_TXQ0_ADDR), STAT_MAC_REG(DMA_TXQ0_CURR_ADDR),
+ STAT_MAC_REG(DMA_TXQ1_ADDR), STAT_MAC_REG(DMA_TXQ1_CURR_ADDR),
+ STAT_MAC_REG(DMA_TXQ2_ADDR), STAT_MAC_REG(DMA_TXQ2_CURR_ADDR),
+ STAT_MAC_REG(DMA_TXQ3_ADDR), STAT_MAC_REG(DMA_TXQ3_CURR_ADDR),
+ STAT_MAC_REG(DMA_RXQ_ADDR), STAT_MAC_REG(DMA_RXQ_CURR_ADDR),
+};
+
+static const struct hw_stat_reg_entry hw_ampdu_info_regs[] = {
+ STAT_MAC_REG(AMPDU_DENSITY), STAT_MAC_REG(AMPDU_FACTOR),
+};
+
+static const struct hw_stat_reg_entry hw_pta_queue_regs[] = {
+ STAT_PTA_REG(DN_CURR_ADDRH), STAT_PTA_REG(DN_CURR_ADDRL),
+ STAT_PTA_REG(UP_CURR_ADDRH), STAT_PTA_REG(UP_CURR_ADDRL),
+ STAT_PTA_REG(DMA_STATUS), STAT_PTA_REG(DMA_MODE_CTRL),
+};
+
+#define DEFINE_TALLY(name) \
+ u32 name##_sum[ARRAY_SIZE(name##_regs)], \
+ name##_counter[ARRAY_SIZE(name##_regs)] \
+
+#define DEFINE_STAT(name) \
+ u32 name##_counter[ARRAY_SIZE(name##_regs)] \
+
+struct ath_stats {
+ DEFINE_TALLY(hw_tx_tally);
+ DEFINE_TALLY(hw_rx_tally);
+ DEFINE_TALLY(hw_phy_errors);
+ DEFINE_STAT(hw_wlan_queue);
+ DEFINE_STAT(hw_pta_queue);
+ DEFINE_STAT(hw_ampdu_info);
+};
+
+struct carl9170_debug_mem_rbe {
+ u32 reg;
+ u32 value;
+};
+
+#define CARL9170_DEBUG_RING_SIZE 64
+
+struct carl9170_debug {
+ struct ath_stats stats;
+ struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE];
+ struct mutex ring_lock;
+ unsigned int ring_head, ring_tail;
+ struct delayed_work update_tally;
+};
+
+struct ar9170;
+
+void carl9170_debugfs_register(struct ar9170 *ar);
+void carl9170_debugfs_unregister(struct ar9170 *ar);
+#endif /* __DEBUG_H */
diff --git a/drivers/net/wireless/ath/carl9170/eeprom.h b/drivers/net/wireless/ath/carl9170/eeprom.h
new file mode 100644
index 000000000..7cff40ac7
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/eeprom.h
@@ -0,0 +1,216 @@
+/*
+ * Shared Atheros AR9170 Header
+ *
+ * EEPROM layout
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CARL9170_SHARED_EEPROM_H
+#define __CARL9170_SHARED_EEPROM_H
+
+#define AR9170_EEPROM_START 0x1600
+
+#define AR5416_MAX_CHAINS 2
+#define AR5416_MODAL_SPURS 5
+
+struct ar9170_eeprom_modal {
+ __le32 antCtrlChain[AR5416_MAX_CHAINS];
+ __le32 antCtrlCommon;
+ s8 antennaGainCh[AR5416_MAX_CHAINS];
+ u8 switchSettling;
+ u8 txRxAttenCh[AR5416_MAX_CHAINS];
+ u8 rxTxMarginCh[AR5416_MAX_CHAINS];
+ s8 adcDesiredSize;
+ s8 pgaDesiredSize;
+ u8 xlnaGainCh[AR5416_MAX_CHAINS];
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
+ u8 xpdGain;
+ u8 xpd;
+ s8 iqCalICh[AR5416_MAX_CHAINS];
+ s8 iqCalQCh[AR5416_MAX_CHAINS];
+ u8 pdGainOverlap;
+ u8 ob;
+ u8 db;
+ u8 xpaBiasLvl;
+ u8 pwrDecreaseFor2Chain;
+ u8 pwrDecreaseFor3Chain;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 ht40PowerIncForPdadc;
+ u8 bswAtten[AR5416_MAX_CHAINS];
+ u8 bswMargin[AR5416_MAX_CHAINS];
+ u8 swSettleHt40;
+ u8 reserved[22];
+ struct spur_channel {
+ __le16 spurChan;
+ u8 spurRangeLow;
+ u8 spurRangeHigh;
+ } __packed spur_channels[AR5416_MODAL_SPURS];
+} __packed;
+
+#define AR5416_NUM_PD_GAINS 4
+#define AR5416_PD_GAIN_ICEPTS 5
+
+struct ar9170_calibration_data_per_freq {
+ u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+} __packed;
+
+#define AR5416_NUM_5G_CAL_PIERS 8
+#define AR5416_NUM_2G_CAL_PIERS 4
+
+#define AR5416_NUM_5G_TARGET_PWRS 8
+#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
+#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
+#define AR5416_MAX_NUM_TGT_PWRS 8
+
+struct ar9170_calibration_target_power_legacy {
+ u8 freq;
+ u8 power[4];
+} __packed;
+
+struct ar9170_calibration_target_power_ht {
+ u8 freq;
+ u8 power[8];
+} __packed;
+
+#define AR5416_NUM_CTLS 24
+
+struct ar9170_calctl_edges {
+ u8 channel;
+#define AR9170_CALCTL_EDGE_FLAGS 0xC0
+ u8 power_flags;
+} __packed;
+
+#define AR5416_NUM_BAND_EDGES 8
+
+struct ar9170_calctl_data {
+ struct ar9170_calctl_edges
+ control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
+} __packed;
+
+struct ar9170_eeprom {
+ __le16 length;
+ __le16 checksum;
+ __le16 version;
+ u8 operating_flags;
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
+ u8 misc;
+ __le16 reg_domain[2];
+ u8 mac_address[6];
+ u8 rx_mask;
+ u8 tx_mask;
+ __le16 rf_silent;
+ __le16 bluetooth_options;
+ __le16 device_capabilities;
+ __le32 build_number;
+ u8 deviceType;
+ u8 reserved[33];
+
+ u8 customer_data[64];
+
+ struct ar9170_eeprom_modal
+ modal_header[2];
+
+ u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
+ u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
+
+ struct ar9170_calibration_data_per_freq
+ cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
+ cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
+
+ /* power calibration data */
+ struct ar9170_calibration_target_power_legacy
+ cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
+ struct ar9170_calibration_target_power_ht
+ cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
+ cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
+
+ struct ar9170_calibration_target_power_legacy
+ cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
+ cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
+ struct ar9170_calibration_target_power_ht
+ cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
+ cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
+
+ /* conformance testing limits */
+ u8 ctl_index[AR5416_NUM_CTLS];
+ struct ar9170_calctl_data
+ ctl_data[AR5416_NUM_CTLS];
+
+ u8 pad;
+ __le16 subsystem_id;
+} __packed;
+
+#define AR9170_LED_MODE_POWER_ON 0x0001
+#define AR9170_LED_MODE_RESERVED 0x0002
+#define AR9170_LED_MODE_DISABLE_STATE 0x0004
+#define AR9170_LED_MODE_OFF_IN_PSM 0x0008
+
+/* AR9170_LED_MODE BIT is set */
+#define AR9170_LED_MODE_FREQUENCY_S 4
+#define AR9170_LED_MODE_FREQUENCY 0x0030
+#define AR9170_LED_MODE_FREQUENCY_1HZ 0x0000
+#define AR9170_LED_MODE_FREQUENCY_0_5HZ 0x0010
+#define AR9170_LED_MODE_FREQUENCY_0_25HZ 0x0020
+#define AR9170_LED_MODE_FREQUENCY_0_125HZ 0x0030
+
+/* AR9170_LED_MODE BIT is not set */
+#define AR9170_LED_MODE_CONN_STATE_S 4
+#define AR9170_LED_MODE_CONN_STATE 0x0030
+#define AR9170_LED_MODE_CONN_STATE_FORCE_OFF 0x0000
+#define AR9170_LED_MODE_CONN_STATE_FORCE_ON 0x0010
+/* Idle off / Active on */
+#define AR9170_LED_MODE_CONN_STATE_IOFF_AON 0x0020
+/* Idle on / Active off */
+#define AR9170_LED_MODE_CONN_STATE_ION_AOFF 0x0010
+
+#define AR9170_LED_MODE_MODE 0x0040
+#define AR9170_LED_MODE_RESERVED2 0x0080
+
+#define AR9170_LED_MODE_TON_SCAN_S 8
+#define AR9170_LED_MODE_TON_SCAN 0x0f00
+
+#define AR9170_LED_MODE_TOFF_SCAN_S 12
+#define AR9170_LED_MODE_TOFF_SCAN 0xf000
+
+struct ar9170_led_mode {
+ __le16 led;
+};
+
+#endif /* __CARL9170_SHARED_EEPROM_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
new file mode 100644
index 000000000..47d5c2e91
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -0,0 +1,447 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * firmware parser
+ *
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include "carl9170.h"
+#include "fwcmd.h"
+#include "version.h"
+
+static const u8 otus_magic[4] = { OTUS_MAGIC };
+
+static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
+ const unsigned int len, const u8 compatible_revision)
+{
+ const struct carl9170fw_desc_head *iter;
+
+ carl9170fw_for_each_hdr(iter, ar->fw.desc) {
+ if (carl9170fw_desc_cmp(iter, descid, len,
+ compatible_revision))
+ return (void *)iter;
+ }
+
+ /* needed to find the LAST desc */
+ if (carl9170fw_desc_cmp(iter, descid, len,
+ compatible_revision))
+ return (void *)iter;
+
+ return NULL;
+}
+
+static int carl9170_fw_verify_descs(struct ar9170 *ar,
+ const struct carl9170fw_desc_head *head, unsigned int max_len)
+{
+ const struct carl9170fw_desc_head *pos;
+ unsigned long pos_addr, end_addr;
+ unsigned int pos_length;
+
+ if (max_len < sizeof(*pos))
+ return -ENODATA;
+
+ max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len);
+
+ pos = head;
+ pos_addr = (unsigned long) pos;
+ end_addr = pos_addr + max_len;
+
+ while (pos_addr < end_addr) {
+ if (pos_addr + sizeof(*head) > end_addr)
+ return -E2BIG;
+
+ pos_length = le16_to_cpu(pos->length);
+
+ if (pos_length < sizeof(*head))
+ return -EBADMSG;
+
+ if (pos_length > max_len)
+ return -EOVERFLOW;
+
+ if (pos_addr + pos_length > end_addr)
+ return -EMSGSIZE;
+
+ if (carl9170fw_desc_cmp(pos, LAST_MAGIC,
+ CARL9170FW_LAST_DESC_SIZE,
+ CARL9170FW_LAST_DESC_CUR_VER))
+ return 0;
+
+ pos_addr += pos_length;
+ pos = (void *)pos_addr;
+ max_len -= pos_length;
+ }
+ return -EINVAL;
+}
+
+static void carl9170_fw_info(struct ar9170 *ar)
+{
+ const struct carl9170fw_motd_desc *motd_desc;
+ unsigned int str_ver_len;
+ u32 fw_date;
+
+ dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n",
+ CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR,
+ CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY,
+ CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER);
+
+ motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC,
+ sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER);
+
+ if (motd_desc) {
+ str_ver_len = strnlen(motd_desc->release,
+ CARL9170FW_MOTD_RELEASE_LEN);
+
+ fw_date = le32_to_cpu(motd_desc->fw_year_month_day);
+
+ dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n",
+ str_ver_len, motd_desc->release,
+ CARL9170FW_GET_YEAR(fw_date),
+ CARL9170FW_GET_MONTH(fw_date),
+ CARL9170FW_GET_DAY(fw_date));
+
+ strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
+ sizeof(ar->hw->wiphy->fw_version));
+ }
+}
+
+static bool valid_dma_addr(const u32 address)
+{
+ if (address >= AR9170_SRAM_OFFSET &&
+ address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE))
+ return true;
+
+ return false;
+}
+
+static bool valid_cpu_addr(const u32 address)
+{
+ if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET &&
+ address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE)))
+ return true;
+
+ return false;
+}
+
+static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
+ size_t len)
+{
+ const struct carl9170fw_otus_desc *otus_desc;
+ const struct carl9170fw_last_desc *last_desc;
+ const struct carl9170fw_chk_desc *chk_desc;
+ unsigned long fin, diff;
+ unsigned int dsc_len;
+ u32 crc32;
+
+ last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
+ sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
+ if (!last_desc)
+ return -EINVAL;
+
+ otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+ sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+ if (!otus_desc) {
+ dev_err(&ar->udev->dev, "failed to find compatible firmware "
+ "descriptor.\n");
+ return -ENODATA;
+ }
+
+ chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
+ sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
+
+ if (!chk_desc) {
+ dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ return 0;
+ }
+
+ dsc_len = min_t(unsigned int, len,
+ (unsigned long)chk_desc - (unsigned long)otus_desc);
+
+ fin = (unsigned long) last_desc + sizeof(*last_desc);
+ diff = fin - (unsigned long) otus_desc;
+
+ if (diff < len)
+ len -= diff;
+
+ if (len < 256)
+ return -EIO;
+
+ crc32 = crc32_le(~0, data, len);
+ if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
+ dev_err(&ar->udev->dev, "fw checksum test failed.\n");
+ return -ENOEXEC;
+ }
+
+ crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
+ if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
+ dev_err(&ar->udev->dev, "descriptor check failed.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int carl9170_fw_tx_sequence(struct ar9170 *ar)
+{
+ const struct carl9170fw_txsq_desc *txsq_desc;
+
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
+ CARL9170FW_TXSQ_DESC_CUR_VER);
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
+ return -EINVAL;
+ } else {
+ ar->fw.tx_seq_table = 0;
+ }
+
+ return 0;
+}
+
+static void carl9170_fw_set_if_combinations(struct ar9170 *ar,
+ u16 if_comb_types)
+{
+ if (ar->fw.vif_num < 2)
+ return;
+
+ ar->if_comb_limits[0].max = ar->fw.vif_num;
+ ar->if_comb_limits[0].types = if_comb_types;
+
+ ar->if_combs[0].num_different_channels = 1;
+ ar->if_combs[0].max_interfaces = ar->fw.vif_num;
+ ar->if_combs[0].limits = ar->if_comb_limits;
+ ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
+
+ ar->hw->wiphy->iface_combinations = ar->if_combs;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
+}
+
+static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+{
+ const struct carl9170fw_otus_desc *otus_desc;
+ int err;
+ u16 if_comb_types;
+
+ err = carl9170_fw_checksum(ar, data, len);
+ if (err)
+ return err;
+
+ otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+ sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+ if (!otus_desc) {
+ return -ENODATA;
+ }
+
+#define SUPP(feat) \
+ (carl9170fw_supports(otus_desc->feature_set, feat))
+
+ if (!SUPP(CARL9170FW_DUMMY_FEATURE)) {
+ dev_err(&ar->udev->dev, "invalid firmware descriptor "
+ "format detected.\n");
+ return -EINVAL;
+ }
+
+ ar->fw.api_version = otus_desc->api_ver;
+
+ if (ar->fw.api_version < CARL9170FW_API_MIN_VER ||
+ ar->fw.api_version > CARL9170FW_API_MAX_VER) {
+ dev_err(&ar->udev->dev, "unsupported firmware api version.\n");
+ return -EINVAL;
+ }
+
+ if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) ||
+ !SUPP(CARL9170FW_HANDLE_BACK_REQ)) {
+ dev_err(&ar->udev->dev, "firmware does support "
+ "mandatory features.\n");
+ return -ECANCELED;
+ }
+
+ if (ilog2(le32_to_cpu(otus_desc->feature_set)) >=
+ __CARL9170FW_FEATURE_NUM) {
+ dev_warn(&ar->udev->dev, "driver does not support all "
+ "firmware features.\n");
+ }
+
+ if (!SUPP(CARL9170FW_COMMAND_CAM)) {
+ dev_info(&ar->udev->dev, "crypto offloading is disabled "
+ "by firmware.\n");
+ ar->fw.disable_offload_fw = true;
+ }
+
+ if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
+ ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
+ if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
+ dev_err(&ar->udev->dev, "firmware does not provide "
+ "mandatory interfaces.\n");
+ return -EINVAL;
+ }
+
+ if (SUPP(CARL9170FW_MINIBOOT))
+ ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size);
+ else
+ ar->fw.offset = 0;
+
+ if (SUPP(CARL9170FW_USB_DOWN_STREAM)) {
+ ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream);
+ ar->fw.tx_stream = true;
+ }
+
+ if (SUPP(CARL9170FW_USB_UP_STREAM))
+ ar->fw.rx_stream = true;
+
+ if (SUPP(CARL9170FW_RX_FILTER)) {
+ ar->fw.rx_filter = true;
+ ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
+ FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
+ FIF_PROMISC_IN_BSS;
+ }
+
+ if (SUPP(CARL9170FW_HW_COUNTERS))
+ ar->fw.hw_counters = true;
+
+ if (SUPP(CARL9170FW_WOL))
+ device_set_wakeup_enable(&ar->udev->dev, true);
+
+ if (SUPP(CARL9170FW_RX_BA_FILTER))
+ ar->fw.ba_filter = true;
+
+ if_comb_types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+
+ ar->fw.vif_num = otus_desc->vif_num;
+ ar->fw.cmd_bufs = otus_desc->cmd_bufs;
+ ar->fw.address = le32_to_cpu(otus_desc->fw_address);
+ ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len);
+ ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe);
+ atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
+ ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len);
+
+ if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num ||
+ ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs ||
+ ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 ||
+ ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 ||
+ !valid_cpu_addr(ar->fw.address)) {
+ dev_err(&ar->udev->dev, "firmware shows obvious signs of "
+ "malicious tampering.\n");
+ return -EINVAL;
+ }
+
+ ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr);
+ ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len);
+
+ if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >=
+ AR9170_MAC_BCN_LENGTH_MAX) {
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
+ if (SUPP(CARL9170FW_WLANTX_CAB)) {
+ if_comb_types |=
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+#ifdef CONFIG_MAC80211_MESH
+ if_comb_types |=
+ BIT(NL80211_IFTYPE_MESH_POINT);
+#endif /* CONFIG_MAC80211_MESH */
+ }
+ }
+
+ carl9170_fw_set_if_combinations(ar, if_comb_types);
+
+ ar->hw->wiphy->interface_modes |= if_comb_types;
+
+ ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /* As IBSS Encryption is software-based, IBSS RSN is supported. */
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_SUPPORTS_TDLS;
+
+#undef SUPPORTED
+ return carl9170_fw_tx_sequence(ar);
+}
+
+static struct carl9170fw_desc_head *
+carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len)
+
+{
+ int scan = 0, found = 0;
+
+ if (!carl9170fw_size_check(len)) {
+ dev_err(&ar->udev->dev, "firmware size is out of bound.\n");
+ return NULL;
+ }
+
+ while (scan < len - sizeof(struct carl9170fw_desc_head)) {
+ if (fw_data[scan++] == otus_magic[found])
+ found++;
+ else
+ found = 0;
+
+ if (scan >= len)
+ break;
+
+ if (found == sizeof(otus_magic))
+ break;
+ }
+
+ if (found != sizeof(otus_magic))
+ return NULL;
+
+ return (void *)&fw_data[scan - found];
+}
+
+int carl9170_parse_firmware(struct ar9170 *ar)
+{
+ const struct carl9170fw_desc_head *fw_desc = NULL;
+ const struct firmware *fw = ar->fw.fw;
+ unsigned long header_offset = 0;
+ int err;
+
+ if (WARN_ON(!fw))
+ return -EINVAL;
+
+ fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size);
+
+ if (!fw_desc) {
+ dev_err(&ar->udev->dev, "unsupported firmware.\n");
+ return -ENODATA;
+ }
+
+ header_offset = (unsigned long)fw_desc - (unsigned long)fw->data;
+
+ err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset);
+ if (err) {
+ dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err);
+ return err;
+ }
+
+ ar->fw.desc = fw_desc;
+
+ carl9170_fw_info(ar);
+
+ err = carl9170_fw(ar, fw->data, fw->size);
+ if (err) {
+ dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
new file mode 100644
index 000000000..9111d4ffc
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -0,0 +1,326 @@
+/*
+ * Shared Atheros AR9170 Header
+ *
+ * Firmware command interface definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __CARL9170_SHARED_FWCMD_H
+#define __CARL9170_SHARED_FWCMD_H
+
+#define CARL9170_MAX_CMD_LEN 64
+#define CARL9170_MAX_CMD_PAYLOAD_LEN 60
+
+#define CARL9170FW_API_MIN_VER 1
+#define CARL9170FW_API_MAX_VER 1
+
+enum carl9170_cmd_oids {
+ CARL9170_CMD_RREG = 0x00,
+ CARL9170_CMD_WREG = 0x01,
+ CARL9170_CMD_ECHO = 0x02,
+ CARL9170_CMD_SWRST = 0x03,
+ CARL9170_CMD_REBOOT = 0x04,
+ CARL9170_CMD_BCN_CTRL = 0x05,
+ CARL9170_CMD_READ_TSF = 0x06,
+ CARL9170_CMD_RX_FILTER = 0x07,
+ CARL9170_CMD_WOL = 0x08,
+ CARL9170_CMD_TALLY = 0x09,
+
+ /* CAM */
+ CARL9170_CMD_EKEY = 0x10,
+ CARL9170_CMD_DKEY = 0x11,
+
+ /* RF / PHY */
+ CARL9170_CMD_FREQUENCY = 0x20,
+ CARL9170_CMD_RF_INIT = 0x21,
+ CARL9170_CMD_SYNTH = 0x22,
+ CARL9170_CMD_FREQ_START = 0x23,
+ CARL9170_CMD_PSM = 0x24,
+
+ /* Asychronous command flag */
+ CARL9170_CMD_ASYNC_FLAG = 0x40,
+ CARL9170_CMD_WREG_ASYNC = (CARL9170_CMD_WREG |
+ CARL9170_CMD_ASYNC_FLAG),
+ CARL9170_CMD_REBOOT_ASYNC = (CARL9170_CMD_REBOOT |
+ CARL9170_CMD_ASYNC_FLAG),
+ CARL9170_CMD_BCN_CTRL_ASYNC = (CARL9170_CMD_BCN_CTRL |
+ CARL9170_CMD_ASYNC_FLAG),
+ CARL9170_CMD_PSM_ASYNC = (CARL9170_CMD_PSM |
+ CARL9170_CMD_ASYNC_FLAG),
+
+ /* responses and traps */
+ CARL9170_RSP_FLAG = 0xc0,
+ CARL9170_RSP_PRETBTT = 0xc0,
+ CARL9170_RSP_TXCOMP = 0xc1,
+ CARL9170_RSP_BEACON_CONFIG = 0xc2,
+ CARL9170_RSP_ATIM = 0xc3,
+ CARL9170_RSP_WATCHDOG = 0xc6,
+ CARL9170_RSP_TEXT = 0xca,
+ CARL9170_RSP_HEXDUMP = 0xcc,
+ CARL9170_RSP_RADAR = 0xcd,
+ CARL9170_RSP_GPIO = 0xce,
+ CARL9170_RSP_BOOT = 0xcf,
+};
+
+struct carl9170_set_key_cmd {
+ __le16 user;
+ __le16 keyId;
+ __le16 type;
+ u8 macAddr[6];
+ u32 key[4];
+} __packed __aligned(4);
+#define CARL9170_SET_KEY_CMD_SIZE 28
+
+struct carl9170_disable_key_cmd {
+ __le16 user;
+ __le16 padding;
+} __packed __aligned(4);
+#define CARL9170_DISABLE_KEY_CMD_SIZE 4
+
+struct carl9170_u32_list {
+ u32 vals[0];
+} __packed;
+
+struct carl9170_reg_list {
+ __le32 regs[0];
+} __packed;
+
+struct carl9170_write_reg {
+ struct {
+ __le32 addr;
+ __le32 val;
+ } regs[0] __packed;
+} __packed;
+
+#define CARL9170FW_PHY_HT_ENABLE 0x4
+#define CARL9170FW_PHY_HT_DYN2040 0x8
+#define CARL9170FW_PHY_HT_EXT_CHAN_OFF 0x3
+#define CARL9170FW_PHY_HT_EXT_CHAN_OFF_S 2
+
+struct carl9170_rf_init {
+ __le32 freq;
+ u8 ht_settings;
+ u8 padding2[3];
+ __le32 delta_slope_coeff_exp;
+ __le32 delta_slope_coeff_man;
+ __le32 delta_slope_coeff_exp_shgi;
+ __le32 delta_slope_coeff_man_shgi;
+ __le32 finiteLoopCount;
+} __packed;
+#define CARL9170_RF_INIT_SIZE 28
+
+struct carl9170_rf_init_result {
+ __le32 ret; /* AR9170_PHY_REG_AGC_CONTROL */
+} __packed;
+#define CARL9170_RF_INIT_RESULT_SIZE 4
+
+#define CARL9170_PSM_SLEEP 0x1000
+#define CARL9170_PSM_SOFTWARE 0
+#define CARL9170_PSM_WAKE 0 /* internally used. */
+#define CARL9170_PSM_COUNTER 0xfff
+#define CARL9170_PSM_COUNTER_S 0
+
+struct carl9170_psm {
+ __le32 state;
+} __packed;
+#define CARL9170_PSM_SIZE 4
+
+/*
+ * Note: If a bit in rx_filter is set, then it
+ * means that the particular frames which matches
+ * the condition are FILTERED/REMOVED/DISCARDED!
+ * (This is can be a bit confusing, especially
+ * because someone people think it's the exact
+ * opposite way, so watch out!)
+ */
+struct carl9170_rx_filter_cmd {
+ __le32 rx_filter;
+} __packed;
+#define CARL9170_RX_FILTER_CMD_SIZE 4
+
+#define CARL9170_RX_FILTER_BAD 0x01
+#define CARL9170_RX_FILTER_OTHER_RA 0x02
+#define CARL9170_RX_FILTER_DECRY_FAIL 0x04
+#define CARL9170_RX_FILTER_CTL_OTHER 0x08
+#define CARL9170_RX_FILTER_CTL_PSPOLL 0x10
+#define CARL9170_RX_FILTER_CTL_BACKR 0x20
+#define CARL9170_RX_FILTER_MGMT 0x40
+#define CARL9170_RX_FILTER_DATA 0x80
+#define CARL9170_RX_FILTER_EVERYTHING (~0)
+
+struct carl9170_bcn_ctrl_cmd {
+ __le32 vif_id;
+ __le32 mode;
+ __le32 bcn_addr;
+ __le32 bcn_len;
+} __packed;
+#define CARL9170_BCN_CTRL_CMD_SIZE 16
+
+#define CARL9170_BCN_CTRL_DRAIN 0
+#define CARL9170_BCN_CTRL_CAB_TRIGGER 1
+
+struct carl9170_wol_cmd {
+ __le32 flags;
+ u8 mac[6];
+ u8 bssid[6];
+ __le32 null_interval;
+ __le32 free_for_use2;
+ __le32 mask;
+ u8 pattern[32];
+} __packed;
+
+#define CARL9170_WOL_CMD_SIZE 60
+
+#define CARL9170_WOL_DISCONNECT 1
+#define CARL9170_WOL_MAGIC_PKT 2
+
+struct carl9170_cmd_head {
+ union {
+ struct {
+ u8 len;
+ u8 cmd;
+ u8 seq;
+ u8 ext;
+ } __packed;
+
+ u32 hdr_data;
+ } __packed;
+} __packed;
+
+struct carl9170_cmd {
+ struct carl9170_cmd_head hdr;
+ union {
+ struct carl9170_set_key_cmd setkey;
+ struct carl9170_disable_key_cmd disablekey;
+ struct carl9170_u32_list echo;
+ struct carl9170_reg_list rreg;
+ struct carl9170_write_reg wreg;
+ struct carl9170_rf_init rf_init;
+ struct carl9170_psm psm;
+ struct carl9170_wol_cmd wol;
+ struct carl9170_bcn_ctrl_cmd bcn_ctrl;
+ struct carl9170_rx_filter_cmd rx_filter;
+ u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
+ } __packed;
+} __packed __aligned(4);
+
+#define CARL9170_TX_STATUS_QUEUE 3
+#define CARL9170_TX_STATUS_QUEUE_S 0
+#define CARL9170_TX_STATUS_RIX_S 2
+#define CARL9170_TX_STATUS_RIX (3 << CARL9170_TX_STATUS_RIX_S)
+#define CARL9170_TX_STATUS_TRIES_S 4
+#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
+#define CARL9170_TX_STATUS_SUCCESS 0x80
+
+#ifdef __CARL9170FW__
+/*
+ * NOTE:
+ * Both structs [carl9170_tx_status and _carl9170_tx_status]
+ * need to be "bit for bit" in sync.
+ */
+struct carl9170_tx_status {
+ /*
+ * Beware of compiler bugs in all gcc pre 4.4!
+ */
+
+ u8 cookie;
+ u8 queue:2;
+ u8 rix:2;
+ u8 tries:3;
+ u8 success:1;
+} __packed;
+#endif /* __CARL9170FW__ */
+
+struct _carl9170_tx_status {
+ /*
+ * This version should be immune to all alignment bugs.
+ */
+
+ u8 cookie;
+ u8 info;
+} __packed;
+#define CARL9170_TX_STATUS_SIZE 2
+
+#define CARL9170_RSP_TX_STATUS_NUM (CARL9170_MAX_CMD_PAYLOAD_LEN / \
+ sizeof(struct _carl9170_tx_status))
+
+#define CARL9170_TX_MAX_RATE_TRIES 7
+
+#define CARL9170_TX_MAX_RATES 4
+#define CARL9170_TX_MAX_RETRY_RATES (CARL9170_TX_MAX_RATES - 1)
+#define CARL9170_ERR_MAGIC "ERR:"
+#define CARL9170_BUG_MAGIC "BUG:"
+
+struct carl9170_gpio {
+ __le32 gpio;
+} __packed;
+#define CARL9170_GPIO_SIZE 4
+
+struct carl9170_tsf_rsp {
+ union {
+ __le32 tsf[2];
+ __le64 tsf_64;
+ } __packed;
+} __packed;
+#define CARL9170_TSF_RSP_SIZE 8
+
+struct carl9170_tally_rsp {
+ __le32 active;
+ __le32 cca;
+ __le32 tx_time;
+ __le32 rx_total;
+ __le32 rx_overrun;
+ __le32 tick;
+} __packed;
+
+struct carl9170_rsp {
+ struct carl9170_cmd_head hdr;
+
+ union {
+ struct carl9170_rf_init_result rf_init_res;
+ struct carl9170_u32_list rreg_res;
+ struct carl9170_u32_list echo;
+#ifdef __CARL9170FW__
+ struct carl9170_tx_status tx_status[0];
+#endif /* __CARL9170FW__ */
+ struct _carl9170_tx_status _tx_status[0];
+ struct carl9170_gpio gpio;
+ struct carl9170_tsf_rsp tsf;
+ struct carl9170_psm psm;
+ struct carl9170_tally_rsp tally;
+ u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
+ } __packed;
+} __packed __aligned(4);
+
+#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
new file mode 100644
index 000000000..66848d47c
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -0,0 +1,277 @@
+/*
+ * Shared CARL9170 Header
+ *
+ * Firmware descriptor format
+ *
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ */
+
+#ifndef __CARL9170_SHARED_FWDESC_H
+#define __CARL9170_SHARED_FWDESC_H
+
+/* NOTE: Don't mess with the order of the flags! */
+enum carl9170fw_feature_list {
+ /* Always set */
+ CARL9170FW_DUMMY_FEATURE,
+
+ /*
+ * Indicates that this image has special boot block which prevents
+ * legacy drivers to drive the firmware.
+ */
+ CARL9170FW_MINIBOOT,
+
+ /* usb registers are initialized by the firmware */
+ CARL9170FW_USB_INIT_FIRMWARE,
+
+ /* command traps & notifications are send through EP2 */
+ CARL9170FW_USB_RESP_EP2,
+
+ /* usb download (app -> fw) stream */
+ CARL9170FW_USB_DOWN_STREAM,
+
+ /* usb upload (fw -> app) stream */
+ CARL9170FW_USB_UP_STREAM,
+
+ /* unusable - reserved to flag non-functional debug firmwares */
+ CARL9170FW_UNUSABLE,
+
+ /* AR9170_CMD_RF_INIT, AR9170_CMD_FREQ_START, AR9170_CMD_FREQUENCY */
+ CARL9170FW_COMMAND_PHY,
+
+ /* AR9170_CMD_EKEY, AR9170_CMD_DKEY */
+ CARL9170FW_COMMAND_CAM,
+
+ /* Firmware has a software Content After Beacon Queueing mechanism */
+ CARL9170FW_WLANTX_CAB,
+
+ /* The firmware is capable of responding to incoming BAR frames */
+ CARL9170FW_HANDLE_BACK_REQ,
+
+ /* GPIO Interrupt | CARL9170_RSP_GPIO */
+ CARL9170FW_GPIO_INTERRUPT,
+
+ /* Firmware PSM support | CARL9170_CMD_PSM */
+ CARL9170FW_PSM,
+
+ /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
+ CARL9170FW_RX_FILTER,
+
+ /* Wake up on WLAN */
+ CARL9170FW_WOL,
+
+ /* Firmware supports PSM in the 5GHZ Band */
+ CARL9170FW_FIXED_5GHZ_PSM,
+
+ /* HW (ANI, CCA, MIB) tally counters */
+ CARL9170FW_HW_COUNTERS,
+
+ /* Firmware will pass BA when BARs are queued */
+ CARL9170FW_RX_BA_FILTER,
+
+ /* KEEP LAST */
+ __CARL9170FW_FEATURE_NUM
+};
+
+#define OTUS_MAGIC "OTAR"
+#define MOTD_MAGIC "MOTD"
+#define FIX_MAGIC "FIX\0"
+#define DBG_MAGIC "DBG\0"
+#define CHK_MAGIC "CHK\0"
+#define TXSQ_MAGIC "TXSQ"
+#define WOL_MAGIC "WOL\0"
+#define LAST_MAGIC "LAST"
+
+#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
+#define CARL9170FW_SET_MONTH(m) ((((m) - 1) % 12) * 31)
+#define CARL9170FW_SET_YEAR(y) (((y) - 10) * 372)
+
+#define CARL9170FW_GET_DAY(d) (((d) % 31) + 1)
+#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
+#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
+
+#define CARL9170FW_MAGIC_SIZE 4
+
+struct carl9170fw_desc_head {
+ u8 magic[CARL9170FW_MAGIC_SIZE];
+ __le16 length;
+ u8 min_ver;
+ u8 cur_ver;
+} __packed;
+#define CARL9170FW_DESC_HEAD_SIZE \
+ (sizeof(struct carl9170fw_desc_head))
+
+#define CARL9170FW_OTUS_DESC_MIN_VER 6
+#define CARL9170FW_OTUS_DESC_CUR_VER 7
+struct carl9170fw_otus_desc {
+ struct carl9170fw_desc_head head;
+ __le32 feature_set;
+ __le32 fw_address;
+ __le32 bcn_addr;
+ __le16 bcn_len;
+ __le16 miniboot_size;
+ __le16 tx_frag_len;
+ __le16 rx_max_frame_len;
+ u8 tx_descs;
+ u8 cmd_bufs;
+ u8 api_ver;
+ u8 vif_num;
+} __packed;
+#define CARL9170FW_OTUS_DESC_SIZE \
+ (sizeof(struct carl9170fw_otus_desc))
+
+#define CARL9170FW_MOTD_STRING_LEN 24
+#define CARL9170FW_MOTD_RELEASE_LEN 20
+#define CARL9170FW_MOTD_DESC_MIN_VER 1
+#define CARL9170FW_MOTD_DESC_CUR_VER 2
+struct carl9170fw_motd_desc {
+ struct carl9170fw_desc_head head;
+ __le32 fw_year_month_day;
+ char desc[CARL9170FW_MOTD_STRING_LEN];
+ char release[CARL9170FW_MOTD_RELEASE_LEN];
+} __packed;
+#define CARL9170FW_MOTD_DESC_SIZE \
+ (sizeof(struct carl9170fw_motd_desc))
+
+#define CARL9170FW_FIX_DESC_MIN_VER 1
+#define CARL9170FW_FIX_DESC_CUR_VER 2
+struct carl9170fw_fix_entry {
+ __le32 address;
+ __le32 mask;
+ __le32 value;
+} __packed;
+
+struct carl9170fw_fix_desc {
+ struct carl9170fw_desc_head head;
+ struct carl9170fw_fix_entry data[0];
+} __packed;
+#define CARL9170FW_FIX_DESC_SIZE \
+ (sizeof(struct carl9170fw_fix_desc))
+
+#define CARL9170FW_DBG_DESC_MIN_VER 1
+#define CARL9170FW_DBG_DESC_CUR_VER 3
+struct carl9170fw_dbg_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 bogoclock_addr;
+ __le32 counter_addr;
+ __le32 rx_total_addr;
+ __le32 rx_overrun_addr;
+ __le32 rx_filter;
+
+ /* Put your debugging definitions here */
+} __packed;
+#define CARL9170FW_DBG_DESC_SIZE \
+ (sizeof(struct carl9170fw_dbg_desc))
+
+#define CARL9170FW_CHK_DESC_MIN_VER 1
+#define CARL9170FW_CHK_DESC_CUR_VER 2
+struct carl9170fw_chk_desc {
+ struct carl9170fw_desc_head head;
+ __le32 fw_crc32;
+ __le32 hdr_crc32;
+} __packed;
+#define CARL9170FW_CHK_DESC_SIZE \
+ (sizeof(struct carl9170fw_chk_desc))
+
+#define CARL9170FW_TXSQ_DESC_MIN_VER 1
+#define CARL9170FW_TXSQ_DESC_CUR_VER 1
+struct carl9170fw_txsq_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE \
+ (sizeof(struct carl9170fw_txsq_desc))
+
+#define CARL9170FW_WOL_DESC_MIN_VER 1
+#define CARL9170FW_WOL_DESC_CUR_VER 1
+struct carl9170fw_wol_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 supported_triggers; /* CARL9170_WOL_ */
+} __packed;
+#define CARL9170FW_WOL_DESC_SIZE \
+ (sizeof(struct carl9170fw_wol_desc))
+
+#define CARL9170FW_LAST_DESC_MIN_VER 1
+#define CARL9170FW_LAST_DESC_CUR_VER 2
+struct carl9170fw_last_desc {
+ struct carl9170fw_desc_head head;
+} __packed;
+#define CARL9170FW_LAST_DESC_SIZE \
+ (sizeof(struct carl9170fw_fix_desc))
+
+#define CARL9170FW_DESC_MAX_LENGTH 8192
+
+#define CARL9170FW_FILL_DESC(_magic, _length, _min_ver, _cur_ver) \
+ .head = { \
+ .magic = _magic, \
+ .length = cpu_to_le16(_length), \
+ .min_ver = _min_ver, \
+ .cur_ver = _cur_ver, \
+ }
+
+static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
+ u8 magic[CARL9170FW_MAGIC_SIZE],
+ __le16 length, u8 min_ver, u8 cur_ver)
+{
+ head->magic[0] = magic[0];
+ head->magic[1] = magic[1];
+ head->magic[2] = magic[2];
+ head->magic[3] = magic[3];
+
+ head->length = length;
+ head->min_ver = min_ver;
+ head->cur_ver = cur_ver;
+}
+
+#define carl9170fw_for_each_hdr(desc, fw_desc) \
+ for (desc = fw_desc; \
+ memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
+ le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
+ le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
+ desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
+
+#define CHECK_HDR_VERSION(head, _min_ver) \
+ (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \
+
+static inline bool carl9170fw_supports(__le32 list, u8 feature)
+{
+ return le32_to_cpu(list) & BIT(feature);
+}
+
+static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
+ const u8 descid[CARL9170FW_MAGIC_SIZE],
+ u16 min_len, u8 compatible_revision)
+{
+ if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
+ descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
+ !CHECK_HDR_VERSION(head, compatible_revision) &&
+ (le16_to_cpu(head->length) >= min_len))
+ return true;
+
+ return false;
+}
+
+#define CARL9170FW_MIN_SIZE 32
+#define CARL9170FW_MAX_SIZE 16384
+
+static inline bool carl9170fw_size_check(unsigned int len)
+{
+ return (len <= CARL9170FW_MAX_SIZE && len >= CARL9170FW_MIN_SIZE);
+}
+
+#endif /* __CARL9170_SHARED_FWDESC_H */
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
new file mode 100644
index 000000000..0db874abd
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -0,0 +1,817 @@
+/*
+ * Shared Atheros AR9170 Header
+ *
+ * Register map, hardware-specific definitions
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __CARL9170_SHARED_HW_H
+#define __CARL9170_SHARED_HW_H
+
+/* High Speed UART */
+#define AR9170_UART_REG_BASE 0x1c0000
+
+/* Definitions of interrupt registers */
+#define AR9170_UART_REG_RX_BUFFER (AR9170_UART_REG_BASE + 0x000)
+#define AR9170_UART_REG_TX_HOLDING (AR9170_UART_REG_BASE + 0x004)
+#define AR9170_UART_REG_FIFO_CONTROL (AR9170_UART_REG_BASE + 0x010)
+#define AR9170_UART_FIFO_CTRL_RESET_RX_FIFO 0x02
+#define AR9170_UART_FIFO_CTRL_RESET_TX_FIFO 0x04
+
+#define AR9170_UART_REG_LINE_CONTROL (AR9170_UART_REG_BASE + 0x014)
+#define AR9170_UART_REG_MODEM_CONTROL (AR9170_UART_REG_BASE + 0x018)
+#define AR9170_UART_MODEM_CTRL_DTR_BIT 0x01
+#define AR9170_UART_MODEM_CTRL_RTS_BIT 0x02
+#define AR9170_UART_MODEM_CTRL_INTERNAL_LOOP_BACK 0x10
+#define AR9170_UART_MODEM_CTRL_AUTO_RTS 0x20
+#define AR9170_UART_MODEM_CTRL_AUTO_CTR 0x40
+
+#define AR9170_UART_REG_LINE_STATUS (AR9170_UART_REG_BASE + 0x01c)
+#define AR9170_UART_LINE_STS_RX_DATA_READY 0x01
+#define AR9170_UART_LINE_STS_RX_BUFFER_OVERRUN 0x02
+#define AR9170_UART_LINE_STS_RX_BREAK_IND 0x10
+#define AR9170_UART_LINE_STS_TX_FIFO_NEAR_EMPTY 0x20
+#define AR9170_UART_LINE_STS_TRANSMITTER_EMPTY 0x40
+
+#define AR9170_UART_REG_MODEM_STATUS (AR9170_UART_REG_BASE + 0x020)
+#define AR9170_UART_MODEM_STS_CTS_CHANGE 0x01
+#define AR9170_UART_MODEM_STS_DSR_CHANGE 0x02
+#define AR9170_UART_MODEM_STS_DCD_CHANGE 0x08
+#define AR9170_UART_MODEM_STS_CTS_COMPL 0x10
+#define AR9170_UART_MODEM_STS_DSR_COMPL 0x20
+#define AR9170_UART_MODEM_STS_DCD_COMPL 0x80
+
+#define AR9170_UART_REG_SCRATCH (AR9170_UART_REG_BASE + 0x024)
+#define AR9170_UART_REG_DIVISOR_LSB (AR9170_UART_REG_BASE + 0x028)
+#define AR9170_UART_REG_DIVISOR_MSB (AR9170_UART_REG_BASE + 0x02c)
+#define AR9170_UART_REG_WORD_RX_BUFFER (AR9170_UART_REG_BASE + 0x034)
+#define AR9170_UART_REG_WORD_TX_HOLDING (AR9170_UART_REG_BASE + 0x038)
+#define AR9170_UART_REG_FIFO_COUNT (AR9170_UART_REG_BASE + 0x03c)
+#define AR9170_UART_REG_REMAINDER (AR9170_UART_REG_BASE + 0x04c)
+
+/* Timer */
+#define AR9170_TIMER_REG_BASE 0x1c1000
+
+#define AR9170_TIMER_REG_WATCH_DOG (AR9170_TIMER_REG_BASE + 0x000)
+#define AR9170_TIMER_REG_TIMER0 (AR9170_TIMER_REG_BASE + 0x010)
+#define AR9170_TIMER_REG_TIMER1 (AR9170_TIMER_REG_BASE + 0x014)
+#define AR9170_TIMER_REG_TIMER2 (AR9170_TIMER_REG_BASE + 0x018)
+#define AR9170_TIMER_REG_TIMER3 (AR9170_TIMER_REG_BASE + 0x01c)
+#define AR9170_TIMER_REG_TIMER4 (AR9170_TIMER_REG_BASE + 0x020)
+#define AR9170_TIMER_REG_CONTROL (AR9170_TIMER_REG_BASE + 0x024)
+#define AR9170_TIMER_CTRL_DISABLE_CLOCK 0x100
+
+#define AR9170_TIMER_REG_INTERRUPT (AR9170_TIMER_REG_BASE + 0x028)
+#define AR9170_TIMER_INT_TIMER0 0x001
+#define AR9170_TIMER_INT_TIMER1 0x002
+#define AR9170_TIMER_INT_TIMER2 0x004
+#define AR9170_TIMER_INT_TIMER3 0x008
+#define AR9170_TIMER_INT_TIMER4 0x010
+#define AR9170_TIMER_INT_TICK_TIMER 0x100
+
+#define AR9170_TIMER_REG_TICK_TIMER (AR9170_TIMER_REG_BASE + 0x030)
+#define AR9170_TIMER_REG_CLOCK_LOW (AR9170_TIMER_REG_BASE + 0x040)
+#define AR9170_TIMER_REG_CLOCK_HIGH (AR9170_TIMER_REG_BASE + 0x044)
+
+#define AR9170_MAC_REG_BASE 0x1c3000
+
+#define AR9170_MAC_REG_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x500)
+#define AR9170_MAC_POWER_STATE_CTRL_RESET 0x20
+
+#define AR9170_MAC_REG_MAC_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x50c)
+
+#define AR9170_MAC_REG_INT_CTRL (AR9170_MAC_REG_BASE + 0x510)
+#define AR9170_MAC_INT_TXC BIT(0)
+#define AR9170_MAC_INT_RXC BIT(1)
+#define AR9170_MAC_INT_RETRY_FAIL BIT(2)
+#define AR9170_MAC_INT_WAKEUP BIT(3)
+#define AR9170_MAC_INT_ATIM BIT(4)
+#define AR9170_MAC_INT_DTIM BIT(5)
+#define AR9170_MAC_INT_CFG_BCN BIT(6)
+#define AR9170_MAC_INT_ABORT BIT(7)
+#define AR9170_MAC_INT_QOS BIT(8)
+#define AR9170_MAC_INT_MIMO_PS BIT(9)
+#define AR9170_MAC_INT_KEY_GEN BIT(10)
+#define AR9170_MAC_INT_DECRY_NOUSER BIT(11)
+#define AR9170_MAC_INT_RADAR BIT(12)
+#define AR9170_MAC_INT_QUIET_FRAME BIT(13)
+#define AR9170_MAC_INT_PRETBTT BIT(14)
+
+#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
+#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
+
+#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51c)
+#define AR9170_MAC_ATIM_PERIOD_S 0
+#define AR9170_MAC_ATIM_PERIOD 0x0000ffff
+
+#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
+#define AR9170_MAC_BCN_PERIOD_S 0
+#define AR9170_MAC_BCN_PERIOD 0x0000ffff
+#define AR9170_MAC_BCN_DTIM_S 16
+#define AR9170_MAC_BCN_DTIM 0x00ff0000
+#define AR9170_MAC_BCN_AP_MODE BIT(24)
+#define AR9170_MAC_BCN_IBSS_MODE BIT(25)
+#define AR9170_MAC_BCN_PWR_MGT BIT(26)
+#define AR9170_MAC_BCN_STA_PS BIT(27)
+
+#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
+#define AR9170_MAC_PRETBTT_S 0
+#define AR9170_MAC_PRETBTT 0x0000ffff
+#define AR9170_MAC_PRETBTT2_S 16
+#define AR9170_MAC_PRETBTT2 0xffff0000
+
+#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
+#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
+#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
+#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
+
+#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
+#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
+
+#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62c)
+
+#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
+#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
+#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
+#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
+#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
+#define AR9170_MAC_REG_AFTER_PNP (AR9170_MAC_REG_BASE + 0x648)
+#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64c)
+
+#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
+#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
+#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
+#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
+#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
+#define AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE BIT(2)
+#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
+#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
+
+#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
+#define AR9170_MAC_REG_MISC_684 (AR9170_MAC_REG_BASE + 0x684)
+#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
+
+#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
+#define AR9170_MAC_FTF_ASSOC_REQ BIT(0)
+#define AR9170_MAC_FTF_ASSOC_RESP BIT(1)
+#define AR9170_MAC_FTF_REASSOC_REQ BIT(2)
+#define AR9170_MAC_FTF_REASSOC_RESP BIT(3)
+#define AR9170_MAC_FTF_PRB_REQ BIT(4)
+#define AR9170_MAC_FTF_PRB_RESP BIT(5)
+#define AR9170_MAC_FTF_BIT6 BIT(6)
+#define AR9170_MAC_FTF_BIT7 BIT(7)
+#define AR9170_MAC_FTF_BEACON BIT(8)
+#define AR9170_MAC_FTF_ATIM BIT(9)
+#define AR9170_MAC_FTF_DEASSOC BIT(10)
+#define AR9170_MAC_FTF_AUTH BIT(11)
+#define AR9170_MAC_FTF_DEAUTH BIT(12)
+#define AR9170_MAC_FTF_BIT13 BIT(13)
+#define AR9170_MAC_FTF_BIT14 BIT(14)
+#define AR9170_MAC_FTF_BIT15 BIT(15)
+#define AR9170_MAC_FTF_BAR BIT(24)
+#define AR9170_MAC_FTF_BA BIT(25)
+#define AR9170_MAC_FTF_PSPOLL BIT(26)
+#define AR9170_MAC_FTF_RTS BIT(27)
+#define AR9170_MAC_FTF_CTS BIT(28)
+#define AR9170_MAC_FTF_ACK BIT(29)
+#define AR9170_MAC_FTF_CFE BIT(30)
+#define AR9170_MAC_FTF_CFE_ACK BIT(31)
+#define AR9170_MAC_FTF_DEFAULTS 0x0500ffff
+#define AR9170_MAC_FTF_MONITOR 0xff00ffff
+
+#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
+#define AR9170_MAC_REG_ACK_TPC (AR9170_MAC_REG_BASE + 0x694)
+#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
+#define AR9170_MAC_REG_RX_TIMEOUT_COUNT (AR9170_MAC_REG_BASE + 0x69c)
+#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6a0)
+#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6a4)
+#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6a8)
+#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6ac)
+#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6b0)
+#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6bc)
+#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
+#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
+#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
+#define AR9170_MAC_BACKOFF_CCA BIT(24)
+#define AR9170_MAC_BACKOFF_TX_PEX BIT(25)
+#define AR9170_MAC_BACKOFF_RX_PE BIT(26)
+#define AR9170_MAC_BACKOFF_MD_READY BIT(27)
+#define AR9170_MAC_BACKOFF_TX_PE BIT(28)
+
+#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
+
+#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
+
+#define AR9170_MAC_REG_CHANNEL_BUSY (AR9170_MAC_REG_BASE + 0x6e8)
+#define AR9170_MAC_REG_EXT_BUSY (AR9170_MAC_REG_BASE + 0x6ec)
+
+#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6f0)
+#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6f4)
+#define AR9170_MAC_REG_ACK_FC (AR9170_MAC_REG_BASE + 0x6f8)
+
+#define AR9170_MAC_REG_CAM_MODE (AR9170_MAC_REG_BASE + 0x700)
+#define AR9170_MAC_CAM_IBSS 0xe0
+#define AR9170_MAC_CAM_AP 0xa1
+#define AR9170_MAC_CAM_STA 0x2
+#define AR9170_MAC_CAM_AP_WDS 0x3
+#define AR9170_MAC_CAM_DEFAULTS (0xf << 24)
+#define AR9170_MAC_CAM_HOST_PENDING 0x80000000
+
+#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
+#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
+
+#define AR9170_MAC_REG_CAM_ADDR (AR9170_MAC_REG_BASE + 0x70c)
+#define AR9170_MAC_CAM_ADDR_WRITE 0x80000000
+#define AR9170_MAC_REG_CAM_DATA0 (AR9170_MAC_REG_BASE + 0x720)
+#define AR9170_MAC_REG_CAM_DATA1 (AR9170_MAC_REG_BASE + 0x724)
+#define AR9170_MAC_REG_CAM_DATA2 (AR9170_MAC_REG_BASE + 0x728)
+#define AR9170_MAC_REG_CAM_DATA3 (AR9170_MAC_REG_BASE + 0x72c)
+
+#define AR9170_MAC_REG_CAM_DBG0 (AR9170_MAC_REG_BASE + 0x730)
+#define AR9170_MAC_REG_CAM_DBG1 (AR9170_MAC_REG_BASE + 0x734)
+#define AR9170_MAC_REG_CAM_DBG2 (AR9170_MAC_REG_BASE + 0x738)
+#define AR9170_MAC_REG_CAM_STATE (AR9170_MAC_REG_BASE + 0x73c)
+#define AR9170_MAC_CAM_STATE_READ_PENDING 0x40000000
+#define AR9170_MAC_CAM_STATE_WRITE_PENDING 0x80000000
+
+#define AR9170_MAC_REG_CAM_TXKEY (AR9170_MAC_REG_BASE + 0x740)
+#define AR9170_MAC_REG_CAM_RXKEY (AR9170_MAC_REG_BASE + 0x750)
+
+#define AR9170_MAC_REG_CAM_TX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x760)
+#define AR9170_MAC_REG_CAM_RX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x770)
+#define AR9170_MAC_REG_CAM_TX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x780)
+#define AR9170_MAC_REG_CAM_RX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x790)
+
+#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xb00)
+#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xb04)
+#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xb08)
+#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xb0c)
+#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xb10)
+#define AR9170_MAC_REG_AC2_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xb14)
+#define AR9170_MAC_REG_AC4_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xb18)
+#define AR9170_MAC_REG_TXOP_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0xb1c)
+#define AR9170_MAC_REG_TXOP_ACK_INTERVAL (AR9170_MAC_REG_BASE + 0xb20)
+#define AR9170_MAC_REG_CONTENTION_POINT (AR9170_MAC_REG_BASE + 0xb24)
+#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xb28)
+#define AR9170_MAC_REG_TID_CFACK_CFEND_RATE (AR9170_MAC_REG_BASE + 0xb2c)
+#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xb30)
+#define AR9170_MAC_REG_TKIP_TSC (AR9170_MAC_REG_BASE + 0xb34)
+#define AR9170_MAC_REG_TXOP_DURATION (AR9170_MAC_REG_BASE + 0xb38)
+#define AR9170_MAC_REG_TX_QOS_THRESHOLD (AR9170_MAC_REG_BASE + 0xb3c)
+#define AR9170_MAC_REG_QOS_PRIORITY_VIRTUAL_CCA (AR9170_MAC_REG_BASE + 0xb40)
+#define AR9170_MAC_VIRTUAL_CCA_Q0 BIT(15)
+#define AR9170_MAC_VIRTUAL_CCA_Q1 BIT(16)
+#define AR9170_MAC_VIRTUAL_CCA_Q2 BIT(17)
+#define AR9170_MAC_VIRTUAL_CCA_Q3 BIT(18)
+#define AR9170_MAC_VIRTUAL_CCA_Q4 BIT(19)
+#define AR9170_MAC_VIRTUAL_CCA_ALL (0xf8000)
+
+#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xb44)
+#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xb48)
+
+#define AR9170_MAC_REG_AMPDU_COUNT (AR9170_MAC_REG_BASE + 0xb88)
+#define AR9170_MAC_REG_MPDU_COUNT (AR9170_MAC_REG_BASE + 0xb8c)
+
+#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xb9c)
+#define AR9170_MAC_AMPDU_FACTOR 0x7f0000
+#define AR9170_MAC_AMPDU_FACTOR_S 16
+#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xba0)
+#define AR9170_MAC_AMPDU_DENSITY 0x7
+#define AR9170_MAC_AMPDU_DENSITY_S 0
+
+#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xbb0)
+#define AR9170_MAC_FCS_SWFCS 0x1
+#define AR9170_MAC_FCS_FIFO_PROT 0x4
+
+#define AR9170_MAC_REG_RTS_CTS_TPC (AR9170_MAC_REG_BASE + 0xbb4)
+#define AR9170_MAC_REG_CFEND_QOSNULL_TPC (AR9170_MAC_REG_BASE + 0xbb8)
+
+#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xc00)
+#define AR9170_MAC_REG_RX_CONTROL (AR9170_MAC_REG_BASE + 0xc40)
+#define AR9170_MAC_RX_CTRL_DEAGG 0x1
+#define AR9170_MAC_RX_CTRL_SHORT_FILTER 0x2
+#define AR9170_MAC_RX_CTRL_SA_DA_SEARCH 0x20
+#define AR9170_MAC_RX_CTRL_PASS_TO_HOST BIT(28)
+#define AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER BIT(30)
+
+#define AR9170_MAC_REG_RX_CONTROL_1 (AR9170_MAC_REG_BASE + 0xc44)
+
+#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xc50)
+
+#define AR9170_MAC_REG_RX_MPDU (AR9170_MAC_REG_BASE + 0xca0)
+#define AR9170_MAC_REG_RX_DROPPED_MPDU (AR9170_MAC_REG_BASE + 0xca4)
+#define AR9170_MAC_REG_RX_DEL_MPDU (AR9170_MAC_REG_BASE + 0xca8)
+#define AR9170_MAC_REG_RX_PHY_MISC_ERROR (AR9170_MAC_REG_BASE + 0xcac)
+#define AR9170_MAC_REG_RX_PHY_XR_ERROR (AR9170_MAC_REG_BASE + 0xcb0)
+#define AR9170_MAC_REG_RX_PHY_OFDM_ERROR (AR9170_MAC_REG_BASE + 0xcb4)
+#define AR9170_MAC_REG_RX_PHY_CCK_ERROR (AR9170_MAC_REG_BASE + 0xcb8)
+#define AR9170_MAC_REG_RX_PHY_HT_ERROR (AR9170_MAC_REG_BASE + 0xcbc)
+#define AR9170_MAC_REG_RX_PHY_TOTAL (AR9170_MAC_REG_BASE + 0xcc0)
+
+#define AR9170_MAC_REG_DMA_TXQ_ADDR (AR9170_MAC_REG_BASE + 0xd00)
+#define AR9170_MAC_REG_DMA_TXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
+#define AR9170_MAC_REG_DMA_TXQ0_ADDR (AR9170_MAC_REG_BASE + 0xd00)
+#define AR9170_MAC_REG_DMA_TXQ0_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
+#define AR9170_MAC_REG_DMA_TXQ1_ADDR (AR9170_MAC_REG_BASE + 0xd08)
+#define AR9170_MAC_REG_DMA_TXQ1_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd0c)
+#define AR9170_MAC_REG_DMA_TXQ2_ADDR (AR9170_MAC_REG_BASE + 0xd10)
+#define AR9170_MAC_REG_DMA_TXQ2_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd14)
+#define AR9170_MAC_REG_DMA_TXQ3_ADDR (AR9170_MAC_REG_BASE + 0xd18)
+#define AR9170_MAC_REG_DMA_TXQ3_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd1c)
+#define AR9170_MAC_REG_DMA_TXQ4_ADDR (AR9170_MAC_REG_BASE + 0xd20)
+#define AR9170_MAC_REG_DMA_TXQ4_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd24)
+#define AR9170_MAC_REG_DMA_RXQ_ADDR (AR9170_MAC_REG_BASE + 0xd28)
+#define AR9170_MAC_REG_DMA_RXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd2c)
+
+#define AR9170_MAC_REG_DMA_TRIGGER (AR9170_MAC_REG_BASE + 0xd30)
+#define AR9170_DMA_TRIGGER_TXQ0 BIT(0)
+#define AR9170_DMA_TRIGGER_TXQ1 BIT(1)
+#define AR9170_DMA_TRIGGER_TXQ2 BIT(2)
+#define AR9170_DMA_TRIGGER_TXQ3 BIT(3)
+#define AR9170_DMA_TRIGGER_TXQ4 BIT(4)
+#define AR9170_DMA_TRIGGER_RXQ BIT(8)
+
+#define AR9170_MAC_REG_DMA_WLAN_STATUS (AR9170_MAC_REG_BASE + 0xd38)
+#define AR9170_MAC_REG_DMA_STATUS (AR9170_MAC_REG_BASE + 0xd3c)
+#define AR9170_MAC_REG_DMA_TXQ_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40)
+#define AR9170_MAC_REG_DMA_TXQ0_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40)
+#define AR9170_MAC_REG_DMA_TXQ1_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd44)
+#define AR9170_MAC_REG_DMA_TXQ2_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd48)
+#define AR9170_MAC_REG_DMA_TXQ3_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd4c)
+#define AR9170_MAC_REG_DMA_TXQ4_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd50)
+#define AR9170_MAC_REG_DMA_TXQ0Q1_LEN (AR9170_MAC_REG_BASE + 0xd54)
+#define AR9170_MAC_REG_DMA_TXQ2Q3_LEN (AR9170_MAC_REG_BASE + 0xd58)
+#define AR9170_MAC_REG_DMA_TXQ4_LEN (AR9170_MAC_REG_BASE + 0xd5c)
+
+#define AR9170_MAC_REG_DMA_TXQX_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd74)
+#define AR9170_MAC_REG_DMA_TXQX_FAIL_ADDR (AR9170_MAC_REG_BASE + 0xd78)
+#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xd7c)
+#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
+#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
+#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
+#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
+
+#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84)
+#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88)
+#define AR9170_MAC_BCN_LENGTH_MAX (512 - 32)
+
+#define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c)
+
+#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xd90)
+#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xd94)
+#define AR9170_BCN_CTRL_READY 0x01
+#define AR9170_BCN_CTRL_LOCK 0x02
+
+#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
+#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
+#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
+#define AR9170_MAC_BCN_HT1_HT_EN BIT(0)
+#define AR9170_MAC_BCN_HT1_GF_PMB BIT(1)
+#define AR9170_MAC_BCN_HT1_SP_EXP BIT(2)
+#define AR9170_MAC_BCN_HT1_TX_BF BIT(3)
+#define AR9170_MAC_BCN_HT1_PWR_CTRL_S 4
+#define AR9170_MAC_BCN_HT1_PWR_CTRL 0x70
+#define AR9170_MAC_BCN_HT1_TX_ANT1 BIT(7)
+#define AR9170_MAC_BCN_HT1_TX_ANT0 BIT(8)
+#define AR9170_MAC_BCN_HT1_NUM_LFT_S 9
+#define AR9170_MAC_BCN_HT1_NUM_LFT 0x600
+#define AR9170_MAC_BCN_HT1_BWC_20M_EXT BIT(16)
+#define AR9170_MAC_BCN_HT1_BWC_40M_SHARED BIT(17)
+#define AR9170_MAC_BCN_HT1_BWC_40M_DUP (BIT(16) | BIT(17))
+#define AR9170_MAC_BCN_HT1_BF_MCS_S 18
+#define AR9170_MAC_BCN_HT1_BF_MCS 0x1c0000
+#define AR9170_MAC_BCN_HT1_TPC_S 21
+#define AR9170_MAC_BCN_HT1_TPC 0x7e00000
+#define AR9170_MAC_BCN_HT1_CHAIN_MASK_S 27
+#define AR9170_MAC_BCN_HT1_CHAIN_MASK 0x38000000
+
+#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
+#define AR9170_MAC_BCN_HT2_MCS_S 0
+#define AR9170_MAC_BCN_HT2_MCS 0x7f
+#define AR9170_MAC_BCN_HT2_BW40 BIT(8)
+#define AR9170_MAC_BCN_HT2_SMOOTHING BIT(9)
+#define AR9170_MAC_BCN_HT2_SS BIT(10)
+#define AR9170_MAC_BCN_HT2_NSS BIT(11)
+#define AR9170_MAC_BCN_HT2_STBC_S 12
+#define AR9170_MAC_BCN_HT2_STBC 0x3000
+#define AR9170_MAC_BCN_HT2_ADV_COD BIT(14)
+#define AR9170_MAC_BCN_HT2_SGI BIT(15)
+#define AR9170_MAC_BCN_HT2_LEN_S 16
+#define AR9170_MAC_BCN_HT2_LEN 0xffff0000
+
+#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
+
+/* Random number generator */
+#define AR9170_RAND_REG_BASE 0x1d0000
+
+#define AR9170_RAND_REG_NUM (AR9170_RAND_REG_BASE + 0x000)
+#define AR9170_RAND_REG_MODE (AR9170_RAND_REG_BASE + 0x004)
+#define AR9170_RAND_MODE_MANUAL 0x000
+#define AR9170_RAND_MODE_FREE 0x001
+
+/* GPIO */
+#define AR9170_GPIO_REG_BASE 0x1d0100
+#define AR9170_GPIO_REG_PORT_TYPE (AR9170_GPIO_REG_BASE + 0x000)
+#define AR9170_GPIO_REG_PORT_DATA (AR9170_GPIO_REG_BASE + 0x004)
+#define AR9170_GPIO_PORT_LED_0 1
+#define AR9170_GPIO_PORT_LED_1 2
+/* WPS Button GPIO for TP-Link TL-WN821N */
+#define AR9170_GPIO_PORT_WPS_BUTTON_PRESSED 4
+
+/* Memory Controller */
+#define AR9170_MC_REG_BASE 0x1d1000
+
+#define AR9170_MC_REG_FLASH_WAIT_STATE (AR9170_MC_REG_BASE + 0x000)
+#define AR9170_MC_REG_SEEPROM_WP0 (AR9170_MC_REG_BASE + 0x400)
+#define AR9170_MC_REG_SEEPROM_WP1 (AR9170_MC_REG_BASE + 0x404)
+#define AR9170_MC_REG_SEEPROM_WP2 (AR9170_MC_REG_BASE + 0x408)
+
+/* Interrupt Controller */
+#define AR9170_MAX_INT_SRC 9
+#define AR9170_INT_REG_BASE 0x1d2000
+
+#define AR9170_INT_REG_FLAG (AR9170_INT_REG_BASE + 0x000)
+#define AR9170_INT_REG_FIQ_MASK (AR9170_INT_REG_BASE + 0x004)
+#define AR9170_INT_REG_IRQ_MASK (AR9170_INT_REG_BASE + 0x008)
+/* INT_REG_FLAG, INT_REG_FIQ_MASK and INT_REG_IRQ_MASK */
+#define AR9170_INT_FLAG_WLAN 0x001
+#define AR9170_INT_FLAG_PTAB_BIT 0x002
+#define AR9170_INT_FLAG_SE_BIT 0x004
+#define AR9170_INT_FLAG_UART_BIT 0x008
+#define AR9170_INT_FLAG_TIMER_BIT 0x010
+#define AR9170_INT_FLAG_EXT_BIT 0x020
+#define AR9170_INT_FLAG_SW_BIT 0x040
+#define AR9170_INT_FLAG_USB_BIT 0x080
+#define AR9170_INT_FLAG_ETHERNET_BIT 0x100
+
+#define AR9170_INT_REG_PRIORITY1 (AR9170_INT_REG_BASE + 0x00c)
+#define AR9170_INT_REG_PRIORITY2 (AR9170_INT_REG_BASE + 0x010)
+#define AR9170_INT_REG_PRIORITY3 (AR9170_INT_REG_BASE + 0x014)
+#define AR9170_INT_REG_EXT_INT_CONTROL (AR9170_INT_REG_BASE + 0x018)
+#define AR9170_INT_REG_SW_INT_CONTROL (AR9170_INT_REG_BASE + 0x01c)
+#define AR9170_INT_SW_INT_ENABLE 0x1
+
+#define AR9170_INT_REG_FIQ_ENCODE (AR9170_INT_REG_BASE + 0x020)
+#define AR9170_INT_INT_IRQ_ENCODE (AR9170_INT_REG_BASE + 0x024)
+
+/* Power Management */
+#define AR9170_PWR_REG_BASE 0x1d4000
+
+#define AR9170_PWR_REG_POWER_STATE (AR9170_PWR_REG_BASE + 0x000)
+
+#define AR9170_PWR_REG_RESET (AR9170_PWR_REG_BASE + 0x004)
+#define AR9170_PWR_RESET_COMMIT_RESET_MASK BIT(0)
+#define AR9170_PWR_RESET_WLAN_MASK BIT(1)
+#define AR9170_PWR_RESET_DMA_MASK BIT(2)
+#define AR9170_PWR_RESET_BRIDGE_MASK BIT(3)
+#define AR9170_PWR_RESET_AHB_MASK BIT(9)
+#define AR9170_PWR_RESET_BB_WARM_RESET BIT(10)
+#define AR9170_PWR_RESET_BB_COLD_RESET BIT(11)
+#define AR9170_PWR_RESET_ADDA_CLK_COLD_RESET BIT(12)
+#define AR9170_PWR_RESET_PLL BIT(13)
+#define AR9170_PWR_RESET_USB_PLL BIT(14)
+
+#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
+#define AR9170_PWR_CLK_AHB_40MHZ 0
+#define AR9170_PWR_CLK_AHB_20_22MHZ 1
+#define AR9170_PWR_CLK_AHB_40_44MHZ 2
+#define AR9170_PWR_CLK_AHB_80_88MHZ 3
+#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
+
+#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
+#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
+#define AR9170_PWR_PLL_ADDAC_DIV_S 2
+#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
+#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
+
+/* Faraday USB Controller */
+#define AR9170_USB_REG_BASE 0x1e1000
+
+#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
+#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
+#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
+#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
+#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
+#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
+#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
+
+#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
+#define AR9170_USB_DEVICE_ADDRESS_CONFIGURE BIT(7)
+
+#define AR9170_USB_REG_TEST (AR9170_USB_REG_BASE + 0x002)
+#define AR9170_USB_REG_PHY_TEST_SELECT (AR9170_USB_REG_BASE + 0x008)
+#define AR9170_USB_REG_CX_CONFIG_STATUS (AR9170_USB_REG_BASE + 0x00b)
+#define AR9170_USB_REG_EP0_DATA (AR9170_USB_REG_BASE + 0x00c)
+#define AR9170_USB_REG_EP0_DATA1 (AR9170_USB_REG_BASE + 0x00c)
+#define AR9170_USB_REG_EP0_DATA2 (AR9170_USB_REG_BASE + 0x00d)
+
+#define AR9170_USB_REG_INTR_MASK_BYTE_0 (AR9170_USB_REG_BASE + 0x011)
+#define AR9170_USB_REG_INTR_MASK_BYTE_1 (AR9170_USB_REG_BASE + 0x012)
+#define AR9170_USB_REG_INTR_MASK_BYTE_2 (AR9170_USB_REG_BASE + 0x013)
+#define AR9170_USB_REG_INTR_MASK_BYTE_3 (AR9170_USB_REG_BASE + 0x014)
+#define AR9170_USB_REG_INTR_MASK_BYTE_4 (AR9170_USB_REG_BASE + 0x015)
+#define AR9170_USB_INTR_DISABLE_OUT_INT (BIT(7) | BIT(6))
+
+#define AR9170_USB_REG_INTR_MASK_BYTE_5 (AR9170_USB_REG_BASE + 0x016)
+#define AR9170_USB_REG_INTR_MASK_BYTE_6 (AR9170_USB_REG_BASE + 0x017)
+#define AR9170_USB_INTR_DISABLE_IN_INT BIT(6)
+
+#define AR9170_USB_REG_INTR_MASK_BYTE_7 (AR9170_USB_REG_BASE + 0x018)
+
+#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
+
+#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
+#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
+#define AR9170_USB_INTR_SRC0_IN BIT(1)
+#define AR9170_USB_INTR_SRC0_OUT BIT(2)
+#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
+#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
+#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
+
+#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
+#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
+#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
+#define AR9170_USB_REG_INTR_SOURCE_4 (AR9170_USB_REG_BASE + 0x025)
+#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
+#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
+#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
+#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
+#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
+#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
+#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
+#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
+
+#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
+
+#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
+#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
+#define AR9170_USB_REG_EP2_MAP (AR9170_USB_REG_BASE + 0x031)
+#define AR9170_USB_REG_EP3_MAP (AR9170_USB_REG_BASE + 0x032)
+#define AR9170_USB_REG_EP4_MAP (AR9170_USB_REG_BASE + 0x033)
+#define AR9170_USB_REG_EP5_MAP (AR9170_USB_REG_BASE + 0x034)
+#define AR9170_USB_REG_EP6_MAP (AR9170_USB_REG_BASE + 0x035)
+#define AR9170_USB_REG_EP7_MAP (AR9170_USB_REG_BASE + 0x036)
+#define AR9170_USB_REG_EP8_MAP (AR9170_USB_REG_BASE + 0x037)
+#define AR9170_USB_REG_EP9_MAP (AR9170_USB_REG_BASE + 0x038)
+#define AR9170_USB_REG_EP10_MAP (AR9170_USB_REG_BASE + 0x039)
+
+#define AR9170_USB_REG_EP_IN_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x03f)
+#define AR9170_USB_EP_IN_TOGGLE 0x10
+
+#define AR9170_USB_REG_EP_IN_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x03e)
+
+#define AR9170_USB_REG_EP_OUT_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x05f)
+#define AR9170_USB_EP_OUT_TOGGLE 0x10
+
+#define AR9170_USB_REG_EP_OUT_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x05e)
+
+#define AR9170_USB_REG_EP3_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0ae)
+#define AR9170_USB_REG_EP3_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0be)
+#define AR9170_USB_REG_EP4_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0af)
+#define AR9170_USB_REG_EP4_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0bf)
+
+#define AR9170_USB_REG_FIFO_MAP (AR9170_USB_REG_BASE + 0x080)
+#define AR9170_USB_REG_FIFO0_MAP (AR9170_USB_REG_BASE + 0x080)
+#define AR9170_USB_REG_FIFO1_MAP (AR9170_USB_REG_BASE + 0x081)
+#define AR9170_USB_REG_FIFO2_MAP (AR9170_USB_REG_BASE + 0x082)
+#define AR9170_USB_REG_FIFO3_MAP (AR9170_USB_REG_BASE + 0x083)
+#define AR9170_USB_REG_FIFO4_MAP (AR9170_USB_REG_BASE + 0x084)
+#define AR9170_USB_REG_FIFO5_MAP (AR9170_USB_REG_BASE + 0x085)
+#define AR9170_USB_REG_FIFO6_MAP (AR9170_USB_REG_BASE + 0x086)
+#define AR9170_USB_REG_FIFO7_MAP (AR9170_USB_REG_BASE + 0x087)
+#define AR9170_USB_REG_FIFO8_MAP (AR9170_USB_REG_BASE + 0x088)
+#define AR9170_USB_REG_FIFO9_MAP (AR9170_USB_REG_BASE + 0x089)
+
+#define AR9170_USB_REG_FIFO_CONFIG (AR9170_USB_REG_BASE + 0x090)
+#define AR9170_USB_REG_FIFO0_CONFIG (AR9170_USB_REG_BASE + 0x090)
+#define AR9170_USB_REG_FIFO1_CONFIG (AR9170_USB_REG_BASE + 0x091)
+#define AR9170_USB_REG_FIFO2_CONFIG (AR9170_USB_REG_BASE + 0x092)
+#define AR9170_USB_REG_FIFO3_CONFIG (AR9170_USB_REG_BASE + 0x093)
+#define AR9170_USB_REG_FIFO4_CONFIG (AR9170_USB_REG_BASE + 0x094)
+#define AR9170_USB_REG_FIFO5_CONFIG (AR9170_USB_REG_BASE + 0x095)
+#define AR9170_USB_REG_FIFO6_CONFIG (AR9170_USB_REG_BASE + 0x096)
+#define AR9170_USB_REG_FIFO7_CONFIG (AR9170_USB_REG_BASE + 0x097)
+#define AR9170_USB_REG_FIFO8_CONFIG (AR9170_USB_REG_BASE + 0x098)
+#define AR9170_USB_REG_FIFO9_CONFIG (AR9170_USB_REG_BASE + 0x099)
+
+#define AR9170_USB_REG_EP3_DATA (AR9170_USB_REG_BASE + 0x0f8)
+#define AR9170_USB_REG_EP4_DATA (AR9170_USB_REG_BASE + 0x0fc)
+
+#define AR9170_USB_REG_FIFO_SIZE (AR9170_USB_REG_BASE + 0x100)
+#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
+#define AR9170_USB_DMA_CTL_ENABLE_TO_DEVICE BIT(0)
+#define AR9170_USB_DMA_CTL_ENABLE_FROM_DEVICE BIT(1)
+#define AR9170_USB_DMA_CTL_HIGH_SPEED BIT(2)
+#define AR9170_USB_DMA_CTL_UP_PACKET_MODE BIT(3)
+#define AR9170_USB_DMA_CTL_UP_STREAM_S 4
+#define AR9170_USB_DMA_CTL_UP_STREAM (BIT(4) | BIT(5))
+#define AR9170_USB_DMA_CTL_UP_STREAM_4K (0)
+#define AR9170_USB_DMA_CTL_UP_STREAM_8K BIT(4)
+#define AR9170_USB_DMA_CTL_UP_STREAM_16K BIT(5)
+#define AR9170_USB_DMA_CTL_UP_STREAM_32K (BIT(4) | BIT(5))
+#define AR9170_USB_DMA_CTL_DOWN_STREAM BIT(6)
+
+#define AR9170_USB_REG_DMA_STATUS (AR9170_USB_REG_BASE + 0x10c)
+#define AR9170_USB_DMA_STATUS_UP_IDLE BIT(8)
+#define AR9170_USB_DMA_STATUS_DN_IDLE BIT(16)
+
+#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
+#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
+#define AR9170_USB_WAKE_UP_WAKE BIT(0)
+
+#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
+#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
+
+/* PCI/USB to AHB Bridge */
+#define AR9170_PTA_REG_BASE 0x1e2000
+
+#define AR9170_PTA_REG_CMD (AR9170_PTA_REG_BASE + 0x000)
+#define AR9170_PTA_REG_PARAM1 (AR9170_PTA_REG_BASE + 0x004)
+#define AR9170_PTA_REG_PARAM2 (AR9170_PTA_REG_BASE + 0x008)
+#define AR9170_PTA_REG_PARAM3 (AR9170_PTA_REG_BASE + 0x00c)
+#define AR9170_PTA_REG_RSP (AR9170_PTA_REG_BASE + 0x010)
+#define AR9170_PTA_REG_STATUS1 (AR9170_PTA_REG_BASE + 0x014)
+#define AR9170_PTA_REG_STATUS2 (AR9170_PTA_REG_BASE + 0x018)
+#define AR9170_PTA_REG_STATUS3 (AR9170_PTA_REG_BASE + 0x01c)
+#define AR9170_PTA_REG_AHB_INT_FLAG (AR9170_PTA_REG_BASE + 0x020)
+#define AR9170_PTA_REG_AHB_INT_MASK (AR9170_PTA_REG_BASE + 0x024)
+#define AR9170_PTA_REG_AHB_INT_ACK (AR9170_PTA_REG_BASE + 0x028)
+#define AR9170_PTA_REG_AHB_SCRATCH1 (AR9170_PTA_REG_BASE + 0x030)
+#define AR9170_PTA_REG_AHB_SCRATCH2 (AR9170_PTA_REG_BASE + 0x034)
+#define AR9170_PTA_REG_AHB_SCRATCH3 (AR9170_PTA_REG_BASE + 0x038)
+#define AR9170_PTA_REG_AHB_SCRATCH4 (AR9170_PTA_REG_BASE + 0x03c)
+
+#define AR9170_PTA_REG_SHARE_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
+
+/*
+ * PCI to AHB Bridge
+ */
+
+#define AR9170_PTA_REG_INT_FLAG (AR9170_PTA_REG_BASE + 0x100)
+#define AR9170_PTA_INT_FLAG_DN 0x01
+#define AR9170_PTA_INT_FLAG_UP 0x02
+#define AR9170_PTA_INT_FLAG_CMD 0x04
+
+#define AR9170_PTA_REG_INT_MASK (AR9170_PTA_REG_BASE + 0x104)
+#define AR9170_PTA_REG_DN_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x108)
+#define AR9170_PTA_REG_DN_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x10c)
+#define AR9170_PTA_REG_UP_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x110)
+#define AR9170_PTA_REG_UP_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x114)
+#define AR9170_PTA_REG_DN_PEND_TIME (AR9170_PTA_REG_BASE + 0x118)
+#define AR9170_PTA_REG_UP_PEND_TIME (AR9170_PTA_REG_BASE + 0x11c)
+#define AR9170_PTA_REG_CONTROL (AR9170_PTA_REG_BASE + 0x120)
+#define AR9170_PTA_CTRL_4_BEAT_BURST 0x00
+#define AR9170_PTA_CTRL_8_BEAT_BURST 0x01
+#define AR9170_PTA_CTRL_16_BEAT_BURST 0x02
+#define AR9170_PTA_CTRL_LOOPBACK_MODE 0x10
+
+#define AR9170_PTA_REG_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
+#define AR9170_PTA_REG_MEM_ADDR (AR9170_PTA_REG_BASE + 0x128)
+#define AR9170_PTA_REG_DN_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x12c)
+#define AR9170_PTA_REG_UP_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x130)
+#define AR9170_PTA_REG_DMA_STATUS (AR9170_PTA_REG_BASE + 0x134)
+#define AR9170_PTA_REG_DN_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x138)
+#define AR9170_PTA_REG_DN_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x13c)
+#define AR9170_PTA_REG_UP_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x140)
+#define AR9170_PTA_REG_UP_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x144)
+#define AR9170_PTA_REG_DMA_MODE_CTRL (AR9170_PTA_REG_BASE + 0x148)
+#define AR9170_PTA_DMA_MODE_CTRL_RESET BIT(0)
+#define AR9170_PTA_DMA_MODE_CTRL_DISABLE_USB BIT(1)
+
+/* Protocol Controller Module */
+#define AR9170_MAC_REG_PC_REG_BASE (AR9170_MAC_REG_BASE + 0xe00)
+
+
+#define AR9170_NUM_LEDS 2
+
+/* CAM */
+#define AR9170_CAM_MAX_USER 64
+#define AR9170_CAM_MAX_KEY_LENGTH 16
+
+#define AR9170_SRAM_OFFSET 0x100000
+#define AR9170_SRAM_SIZE 0x18000
+
+#define AR9170_PRAM_OFFSET 0x200000
+#define AR9170_PRAM_SIZE 0x8000
+
+enum cpu_clock {
+ AHB_STATIC_40MHZ = 0,
+ AHB_GMODE_22MHZ = 1,
+ AHB_AMODE_20MHZ = 1,
+ AHB_GMODE_44MHZ = 2,
+ AHB_AMODE_40MHZ = 2,
+ AHB_GMODE_88MHZ = 3,
+ AHB_AMODE_80MHZ = 3
+};
+
+/* USB endpoints */
+enum ar9170_usb_ep {
+ /*
+ * Control EP is always EP 0 (USB SPEC)
+ *
+ * The weird thing is: the original firmware has a few
+ * comments that suggest that the actual EP numbers
+ * are in the 1 to 10 range?!
+ */
+ AR9170_USB_EP_CTRL = 0,
+
+ AR9170_USB_EP_TX,
+ AR9170_USB_EP_RX,
+ AR9170_USB_EP_IRQ,
+ AR9170_USB_EP_CMD,
+ AR9170_USB_NUM_EXTRA_EP = 4,
+
+ __AR9170_USB_NUM_EP,
+
+ __AR9170_USB_NUM_MAX_EP = 10
+};
+
+enum ar9170_usb_fifo {
+ __AR9170_USB_NUM_MAX_FIFO = 10
+};
+
+enum ar9170_tx_queues {
+ AR9170_TXQ0 = 0,
+ AR9170_TXQ1,
+ AR9170_TXQ2,
+ AR9170_TXQ3,
+ AR9170_TXQ_SPECIAL,
+
+ /* keep last */
+ __AR9170_NUM_TX_QUEUES = 5
+};
+
+#define AR9170_TX_STREAM_TAG 0x697e
+#define AR9170_RX_STREAM_TAG 0x4e00
+#define AR9170_RX_STREAM_MAX_SIZE 0xffff
+
+struct ar9170_stream {
+ __le16 length;
+ __le16 tag;
+
+ u8 payload[0];
+} __packed __aligned(4);
+#define AR9170_STREAM_LEN 4
+
+#define AR9170_MAX_ACKTABLE_ENTRIES 8
+#define AR9170_MAX_VIRTUAL_MAC 7
+
+#define AR9170_USB_EP_CTRL_MAX 64
+#define AR9170_USB_EP_TX_MAX 512
+#define AR9170_USB_EP_RX_MAX 512
+#define AR9170_USB_EP_IRQ_MAX 64
+#define AR9170_USB_EP_CMD_MAX 64
+
+/* Trigger PRETBTT interrupt 6 Kus earlier */
+#define CARL9170_PRETBTT_KUS 6
+
+#define AR5416_MAX_RATE_POWER 63
+
+#define SET_VAL(reg, value, newvalue) \
+ (value = ((value) & ~reg) | (((newvalue) << reg##_S) & reg))
+
+#define SET_CONSTVAL(reg, newvalue) \
+ (((newvalue) << reg##_S) & reg)
+
+#define MOD_VAL(reg, value, newvalue) \
+ (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
+
+#define GET_VAL(reg, value) \
+ (((value) & reg) >> reg##_S)
+
+#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
new file mode 100644
index 000000000..78dadc797
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -0,0 +1,190 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * LED handling
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparer <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "carl9170.h"
+#include "cmd.h"
+
+int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state)
+{
+ return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state);
+}
+
+int carl9170_led_init(struct ar9170 *ar)
+{
+ int err;
+
+ /* disable LEDs */
+ /* GPIO [0/1 mode: output, 2/3: input] */
+ err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
+ if (err)
+ goto out;
+
+ /* GPIO 0/1 value: off */
+ err = carl9170_led_set_state(ar, 0);
+
+out:
+ return err;
+}
+
+#ifdef CONFIG_CARL9170_LEDS
+static void carl9170_led_update(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
+ int i, tmp = 300, blink_delay = 1000;
+ u32 led_val = 0;
+ bool rerun = false;
+
+ if (!IS_ACCEPTING_CMD(ar))
+ return;
+
+ mutex_lock(&ar->mutex);
+ for (i = 0; i < AR9170_NUM_LEDS; i++) {
+ if (ar->leds[i].registered) {
+ if (ar->leds[i].last_state ||
+ ar->leds[i].toggled) {
+
+ if (ar->leds[i].toggled)
+ tmp = 70 + 200 / (ar->leds[i].toggled);
+
+ if (tmp < blink_delay)
+ blink_delay = tmp;
+
+ led_val |= 1 << i;
+ ar->leds[i].toggled = 0;
+ rerun = true;
+ }
+ }
+ }
+
+ carl9170_led_set_state(ar, led_val);
+ mutex_unlock(&ar->mutex);
+
+ if (!rerun)
+ return;
+
+ ieee80211_queue_delayed_work(ar->hw,
+ &ar->led_work,
+ msecs_to_jiffies(blink_delay));
+}
+
+static void carl9170_led_set_brightness(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct carl9170_led *arl = container_of(led, struct carl9170_led, l);
+ struct ar9170 *ar = arl->ar;
+
+ if (!arl->registered)
+ return;
+
+ if (arl->last_state != !!brightness) {
+ arl->toggled++;
+ arl->last_state = !!brightness;
+ }
+
+ if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
+ ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
+}
+
+static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
+ char *trigger)
+{
+ int err;
+
+ snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
+ "carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
+
+ ar->leds[i].ar = ar;
+ ar->leds[i].l.name = ar->leds[i].name;
+ ar->leds[i].l.brightness_set = carl9170_led_set_brightness;
+ ar->leds[i].l.brightness = 0;
+ ar->leds[i].l.default_trigger = trigger;
+
+ err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
+ &ar->leds[i].l);
+ if (err) {
+ wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
+ ar->leds[i].name, err);
+ } else {
+ ar->leds[i].registered = true;
+ }
+
+ return err;
+}
+
+void carl9170_led_unregister(struct ar9170 *ar)
+{
+ int i;
+
+ for (i = 0; i < AR9170_NUM_LEDS; i++)
+ if (ar->leds[i].registered) {
+ led_classdev_unregister(&ar->leds[i].l);
+ ar->leds[i].registered = false;
+ ar->leds[i].toggled = 0;
+ }
+
+ cancel_delayed_work_sync(&ar->led_work);
+}
+
+int carl9170_led_register(struct ar9170 *ar)
+{
+ int err;
+
+ INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update);
+
+ err = carl9170_led_register_led(ar, 0, "tx",
+ ieee80211_get_tx_led_name(ar->hw));
+ if (err)
+ goto fail;
+
+ if (ar->features & CARL9170_ONE_LED)
+ return 0;
+
+ err = carl9170_led_register_led(ar, 1, "assoc",
+ ieee80211_get_assoc_led_name(ar->hw));
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ carl9170_led_unregister(ar);
+ return err;
+}
+
+#endif /* CONFIG_CARL9170_LEDS */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
new file mode 100644
index 000000000..a2f005703
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -0,0 +1,538 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * MAC programming
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include "carl9170.h"
+#include "cmd.h"
+
+int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
+{
+ u32 val;
+
+ if (conf_is_ht40(&ar->hw->conf))
+ val = 0x010a;
+ else {
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ val = 0x105;
+ else
+ val = 0x104;
+ }
+
+ return carl9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
+}
+
+int carl9170_set_rts_cts_rate(struct ar9170 *ar)
+{
+ u32 rts_rate, cts_rate;
+
+ if (conf_is_ht(&ar->hw->conf)) {
+ /* 12 mbit OFDM */
+ rts_rate = 0x1da;
+ cts_rate = 0x10a;
+ } else {
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ /* 11 mbit CCK */
+ rts_rate = 033;
+ cts_rate = 003;
+ } else {
+ /* 6 mbit OFDM */
+ rts_rate = 0x1bb;
+ cts_rate = 0x10b;
+ }
+ }
+
+ return carl9170_write_reg(ar, AR9170_MAC_REG_RTS_CTS_RATE,
+ rts_rate | (cts_rate) << 16);
+}
+
+int carl9170_set_slot_time(struct ar9170 *ar)
+{
+ struct ieee80211_vif *vif;
+ u32 slottime = 20;
+
+ rcu_read_lock();
+ vif = carl9170_get_main_vif(ar);
+ if (!vif) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
+ vif->bss_conf.use_short_slot)
+ slottime = 9;
+
+ rcu_read_unlock();
+
+ return carl9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
+ slottime << 10);
+}
+
+int carl9170_set_mac_rates(struct ar9170 *ar)
+{
+ struct ieee80211_vif *vif;
+ u32 basic, mandatory;
+
+ rcu_read_lock();
+ vif = carl9170_get_main_vif(ar);
+
+ if (!vif) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ basic = (vif->bss_conf.basic_rates & 0xf);
+ basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
+ rcu_read_unlock();
+
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
+ else
+ mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, basic);
+ carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, mandatory);
+ carl9170_regwrite_finish();
+
+ return carl9170_regwrite_result();
+}
+
+int carl9170_set_qos(struct ar9170 *ar)
+{
+ carl9170_regwrite_begin(ar);
+
+ carl9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
+ (ar->edcf[0].cw_max << 16));
+ carl9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
+ (ar->edcf[1].cw_max << 16));
+ carl9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
+ (ar->edcf[2].cw_max << 16));
+ carl9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
+ (ar->edcf[3].cw_max << 16));
+ carl9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
+ (ar->edcf[4].cw_max << 16));
+
+ carl9170_regwrite(AR9170_MAC_REG_AC2_AC1_AC0_AIFS,
+ ((ar->edcf[0].aifs * 9 + 10)) |
+ ((ar->edcf[1].aifs * 9 + 10) << 12) |
+ ((ar->edcf[2].aifs * 9 + 10) << 24));
+ carl9170_regwrite(AR9170_MAC_REG_AC4_AC3_AC2_AIFS,
+ ((ar->edcf[2].aifs * 9 + 10) >> 8) |
+ ((ar->edcf[3].aifs * 9 + 10) << 4) |
+ ((ar->edcf[4].aifs * 9 + 10) << 16));
+
+ carl9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
+ ar->edcf[0].txop | ar->edcf[1].txop << 16);
+ carl9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
+ ar->edcf[2].txop | ar->edcf[3].txop << 16 |
+ ar->edcf[4].txop << 24);
+
+ carl9170_regwrite_finish();
+
+ return carl9170_regwrite_result();
+}
+
+int carl9170_init_mac(struct ar9170 *ar)
+{
+ carl9170_regwrite_begin(ar);
+
+ /* switch MAC to OTUS interface */
+ carl9170_regwrite(0x1c3600, 0x3);
+
+ carl9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
+
+ carl9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0x0);
+
+ carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
+ AR9170_MAC_FTF_MONITOR);
+
+ /* enable MMIC */
+ carl9170_regwrite(AR9170_MAC_REG_SNIFFER,
+ AR9170_MAC_SNIFFER_DEFAULTS);
+
+ carl9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
+
+ carl9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
+ carl9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
+ carl9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
+
+ /* CF-END & CF-ACK rate => 24M OFDM */
+ carl9170_regwrite(AR9170_MAC_REG_TID_CFACK_CFEND_RATE, 0x59900000);
+
+ /* NAV protects ACK only (in TXOP) */
+ carl9170_regwrite(AR9170_MAC_REG_TXOP_DURATION, 0x201);
+
+ /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
+ /* OTUS set AM to 0x1 */
+ carl9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
+
+ carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
+
+ /* Aggregation MAX number and timeout */
+ carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
+ carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
+
+ carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
+ AR9170_MAC_FTF_DEFAULTS);
+
+ carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL,
+ AR9170_MAC_RX_CTRL_DEAGG |
+ AR9170_MAC_RX_CTRL_SHORT_FILTER);
+
+ /* rate sets */
+ carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
+ carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
+ carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x0030033);
+
+ /* MIMO response control */
+ carl9170_regwrite(AR9170_MAC_REG_ACK_TPC, 0x4003c1e);
+
+ carl9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
+
+ /* set PHY register read timeout (??) */
+ carl9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
+
+ /* Disable Rx TimeOut, workaround for BB. */
+ carl9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
+
+ /* Set WLAN DMA interrupt mode: generate int per packet */
+ carl9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
+
+ carl9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
+ AR9170_MAC_FCS_FIFO_PROT);
+
+ /* Disables the CF_END frame, undocumented register */
+ carl9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
+ 0x141e0f48);
+
+ /* reset group hash table */
+ carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, 0xffffffff);
+ carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, 0xffffffff);
+
+ /* disable PRETBTT interrupt */
+ carl9170_regwrite(AR9170_MAC_REG_PRETBTT, 0x0);
+ carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, 0x0);
+
+ carl9170_regwrite_finish();
+
+ return carl9170_regwrite_result();
+}
+
+static int carl9170_set_mac_reg(struct ar9170 *ar,
+ const u32 reg, const u8 *mac)
+{
+ static const u8 zero[ETH_ALEN] = { 0 };
+
+ if (!mac)
+ mac = zero;
+
+ carl9170_regwrite_begin(ar);
+
+ carl9170_regwrite(reg, get_unaligned_le32(mac));
+ carl9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
+
+ carl9170_regwrite_finish();
+
+ return carl9170_regwrite_result();
+}
+
+int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
+ const u8 *mac)
+{
+ if (WARN_ON(id >= ar->fw.vif_num))
+ return -EINVAL;
+
+ return carl9170_set_mac_reg(ar,
+ AR9170_MAC_REG_ACK_TABLE + (id - 1) * 8, mac);
+}
+
+int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hash)
+{
+ int err;
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32);
+ carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash);
+ carl9170_regwrite_finish();
+ err = carl9170_regwrite_result();
+ if (err)
+ return err;
+
+ ar->cur_mc_hash = mc_hash;
+ return 0;
+}
+
+int carl9170_set_operating_mode(struct ar9170 *ar)
+{
+ struct ieee80211_vif *vif;
+ struct ath_common *common = &ar->common;
+ u8 *mac_addr, *bssid;
+ u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
+ u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
+ AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
+ u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
+ AR9170_MAC_RX_CTRL_SHORT_FILTER;
+ u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
+ int err = 0;
+
+ rcu_read_lock();
+ vif = carl9170_get_main_vif(ar);
+
+ if (vif) {
+ mac_addr = common->macaddr;
+ bssid = common->curbssid;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ cam_mode |= AR9170_MAC_CAM_IBSS;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ cam_mode |= AR9170_MAC_CAM_AP;
+
+ /* iwlagn 802.11n STA Workaround */
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ break;
+ case NL80211_IFTYPE_WDS:
+ cam_mode |= AR9170_MAC_CAM_AP_WDS;
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ break;
+ case NL80211_IFTYPE_STATION:
+ cam_mode |= AR9170_MAC_CAM_STA;
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ break;
+ default:
+ WARN(1, "Unsupported operation mode %x\n", vif->type);
+ err = -EOPNOTSUPP;
+ break;
+ }
+ } else {
+ /*
+ * Enable monitor mode
+ *
+ * rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
+ * sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
+ *
+ * When the hardware is in SNIFFER_PROMISC mode,
+ * it generates spurious ACKs for every incoming
+ * frame. This confuses every peer in the
+ * vicinity and the network throughput will suffer
+ * badly.
+ *
+ * Hence, the hardware will be put into station
+ * mode and just the rx filters are disabled.
+ */
+ cam_mode |= AR9170_MAC_CAM_STA;
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ mac_addr = common->macaddr;
+ bssid = NULL;
+ }
+ rcu_read_unlock();
+
+ if (err)
+ return err;
+
+ if (ar->rx_software_decryption)
+ enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
+
+ if (ar->sniffer_enabled) {
+ enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
+ }
+
+ err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
+ if (err)
+ return err;
+
+ err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
+ if (err)
+ return err;
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
+ carl9170_regwrite(AR9170_MAC_REG_CAM_MODE, cam_mode);
+ carl9170_regwrite(AR9170_MAC_REG_ENCRYPTION, enc_mode);
+ carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL, rx_ctrl);
+ carl9170_regwrite_finish();
+
+ return carl9170_regwrite_result();
+}
+
+int carl9170_set_hwretry_limit(struct ar9170 *ar, const unsigned int max_retry)
+{
+ u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
+
+ return carl9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
+}
+
+int carl9170_set_beacon_timers(struct ar9170 *ar)
+{
+ struct ieee80211_vif *vif;
+ u32 v = 0;
+ u32 pretbtt = 0;
+
+ rcu_read_lock();
+ vif = carl9170_get_main_vif(ar);
+
+ if (vif) {
+ struct carl9170_vif_info *mvif;
+ mvif = (void *) vif->drv_priv;
+
+ if (mvif->enable_beacon && !WARN_ON(!ar->beacon_enabled)) {
+ ar->global_beacon_int = vif->bss_conf.beacon_int /
+ ar->beacon_enabled;
+
+ SET_VAL(AR9170_MAC_BCN_DTIM, v,
+ vif->bss_conf.dtim_period);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ v |= AR9170_MAC_BCN_IBSS_MODE;
+ break;
+ case NL80211_IFTYPE_AP:
+ v |= AR9170_MAC_BCN_AP_MODE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ar->global_beacon_int = vif->bss_conf.beacon_int;
+
+ SET_VAL(AR9170_MAC_BCN_DTIM, v,
+ ar->hw->conf.ps_dtim_period);
+
+ v |= AR9170_MAC_BCN_STA_PS |
+ AR9170_MAC_BCN_PWR_MGT;
+ }
+
+ if (ar->global_beacon_int) {
+ if (ar->global_beacon_int < 15) {
+ rcu_read_unlock();
+ return -ERANGE;
+ }
+
+ ar->global_pretbtt = ar->global_beacon_int -
+ CARL9170_PRETBTT_KUS;
+ } else {
+ ar->global_pretbtt = 0;
+ }
+ } else {
+ ar->global_beacon_int = 0;
+ ar->global_pretbtt = 0;
+ }
+
+ rcu_read_unlock();
+
+ SET_VAL(AR9170_MAC_BCN_PERIOD, v, ar->global_beacon_int);
+ SET_VAL(AR9170_MAC_PRETBTT, pretbtt, ar->global_pretbtt);
+ SET_VAL(AR9170_MAC_PRETBTT2, pretbtt, ar->global_pretbtt);
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
+ carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
+
+int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
+ const u8 ktype, const u8 keyidx, const u8 *keydata,
+ const int keylen)
+{
+ struct carl9170_set_key_cmd key = { };
+ static const u8 bcast[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ mac = mac ? : bcast;
+
+ key.user = cpu_to_le16(id);
+ key.keyId = cpu_to_le16(keyidx);
+ key.type = cpu_to_le16(ktype);
+ memcpy(&key.macAddr, mac, ETH_ALEN);
+ if (keydata)
+ memcpy(&key.key, keydata, keylen);
+
+ return carl9170_exec_cmd(ar, CARL9170_CMD_EKEY,
+ sizeof(key), (u8 *)&key, 0, NULL);
+}
+
+int carl9170_disable_key(struct ar9170 *ar, const u8 id)
+{
+ struct carl9170_disable_key_cmd key = { };
+
+ key.user = cpu_to_le16(id);
+
+ return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY,
+ sizeof(key), (u8 *)&key, 0, NULL);
+}
+
+int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
+{
+ unsigned int power, chains;
+
+ if (ar->eeprom.tx_mask != 1)
+ chains = AR9170_TX_PHY_TXCHAIN_2;
+ else
+ chains = AR9170_TX_PHY_TXCHAIN_1;
+
+ switch (channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ power = ar->power_2G_ofdm[0] & 0x3f;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ power = ar->power_5G_leg[0] & 0x3f;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ power = min_t(unsigned int, power, ar->hw->conf.power_level * 2);
+
+ carl9170_regwrite_begin(ar);
+ carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
+ 0x3c1e | power << 20 | chains << 26);
+ carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
+ power << 5 | chains << 11 |
+ power << 21 | chains << 27);
+ carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
+ power << 5 | chains << 11 |
+ power << 21 | chains << 27);
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
new file mode 100644
index 000000000..f1455a04c
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -0,0 +1,2113 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * mac80211 interaction code
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include "hw.h"
+#include "carl9170.h"
+#include "cmd.h"
+
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
+
+int modparam_noht;
+module_param_named(noht, modparam_noht, int, S_IRUGO);
+MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
+
+#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate) | (_txpidx) << 4, \
+}
+
+struct ieee80211_rate __carl9170_ratetable[] = {
+ RATE(10, 0, 0, 0),
+ RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0xb, 0, 0),
+ RATE(90, 0xf, 0, 0),
+ RATE(120, 0xa, 0, 0),
+ RATE(180, 0xe, 0, 0),
+ RATE(240, 0x9, 0, 0),
+ RATE(360, 0xd, 1, 0),
+ RATE(480, 0x8, 2, 0),
+ RATE(540, 0xc, 3, 0),
+};
+#undef RATE
+
+#define carl9170_g_ratetable (__carl9170_ratetable + 0)
+#define carl9170_g_ratetable_size 12
+#define carl9170_a_ratetable (__carl9170_ratetable + 4)
+#define carl9170_a_ratetable_size 8
+
+/*
+ * NB: The hw_value is used as an index into the carl9170_phy_freq_params
+ * array in phy.c so that we don't have to do frequency lookups!
+ */
+#define CHAN(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 18, /* XXX */ \
+}
+
+static struct ieee80211_channel carl9170_2ghz_chantable[] = {
+ CHAN(2412, 0),
+ CHAN(2417, 1),
+ CHAN(2422, 2),
+ CHAN(2427, 3),
+ CHAN(2432, 4),
+ CHAN(2437, 5),
+ CHAN(2442, 6),
+ CHAN(2447, 7),
+ CHAN(2452, 8),
+ CHAN(2457, 9),
+ CHAN(2462, 10),
+ CHAN(2467, 11),
+ CHAN(2472, 12),
+ CHAN(2484, 13),
+};
+
+static struct ieee80211_channel carl9170_5ghz_chantable[] = {
+ CHAN(4920, 14),
+ CHAN(4940, 15),
+ CHAN(4960, 16),
+ CHAN(4980, 17),
+ CHAN(5040, 18),
+ CHAN(5060, 19),
+ CHAN(5080, 20),
+ CHAN(5180, 21),
+ CHAN(5200, 22),
+ CHAN(5220, 23),
+ CHAN(5240, 24),
+ CHAN(5260, 25),
+ CHAN(5280, 26),
+ CHAN(5300, 27),
+ CHAN(5320, 28),
+ CHAN(5500, 29),
+ CHAN(5520, 30),
+ CHAN(5540, 31),
+ CHAN(5560, 32),
+ CHAN(5580, 33),
+ CHAN(5600, 34),
+ CHAN(5620, 35),
+ CHAN(5640, 36),
+ CHAN(5660, 37),
+ CHAN(5680, 38),
+ CHAN(5700, 39),
+ CHAN(5745, 40),
+ CHAN(5765, 41),
+ CHAN(5785, 42),
+ CHAN(5805, 43),
+ CHAN(5825, 44),
+ CHAN(5170, 45),
+ CHAN(5190, 46),
+ CHAN(5210, 47),
+ CHAN(5230, 48),
+};
+#undef CHAN
+
+#define CARL9170_HT_CAP \
+{ \
+ .ht_supported = true, \
+ .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
+ IEEE80211_HT_CAP_SGI_40 | \
+ IEEE80211_HT_CAP_DSSSCCK40 | \
+ IEEE80211_HT_CAP_SM_PS, \
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
+ .mcs = { \
+ .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
+ .rx_highest = cpu_to_le16(300), \
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
+ }, \
+}
+
+static struct ieee80211_supported_band carl9170_band_2GHz = {
+ .channels = carl9170_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
+ .bitrates = carl9170_g_ratetable,
+ .n_bitrates = carl9170_g_ratetable_size,
+ .ht_cap = CARL9170_HT_CAP,
+};
+
+static struct ieee80211_supported_band carl9170_band_5GHz = {
+ .channels = carl9170_5ghz_chantable,
+ .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
+ .bitrates = carl9170_a_ratetable,
+ .n_bitrates = carl9170_a_ratetable_size,
+ .ht_cap = CARL9170_HT_CAP,
+};
+
+static void carl9170_ampdu_gc(struct ar9170 *ar)
+{
+ struct carl9170_sta_tid *tid_info;
+ LIST_HEAD(tid_gc);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
+ spin_lock_bh(&ar->tx_ampdu_list_lock);
+ if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
+ tid_info->state = CARL9170_TID_STATE_KILLED;
+ list_del_rcu(&tid_info->list);
+ ar->tx_ampdu_list_len--;
+ list_add_tail(&tid_info->tmp_list, &tid_gc);
+ }
+ spin_unlock_bh(&ar->tx_ampdu_list_lock);
+
+ }
+ rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
+ rcu_read_unlock();
+
+ synchronize_rcu();
+
+ while (!list_empty(&tid_gc)) {
+ struct sk_buff *skb;
+ tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
+ tmp_list);
+
+ while ((skb = __skb_dequeue(&tid_info->queue)))
+ carl9170_tx_status(ar, skb, false);
+
+ list_del_init(&tid_info->tmp_list);
+ kfree(tid_info);
+ }
+}
+
+static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
+{
+ if (drop_queued) {
+ int i;
+
+ /*
+ * We can only drop frames which have not been uploaded
+ * to the device yet.
+ */
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ atomic_dec(&ar->tx_ampdu_upload);
+
+ carl9170_tx_status(ar, skb, false);
+ }
+ }
+ }
+
+ /* Wait for all other outstanding frames to timeout. */
+ if (atomic_read(&ar->tx_total_queued))
+ WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
+}
+
+static void carl9170_flush_ba(struct ar9170 *ar)
+{
+ struct sk_buff_head free;
+ struct carl9170_sta_tid *tid_info;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&free);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->tx_ampdu_list_lock);
+ list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
+ if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
+ tid_info->state = CARL9170_TID_STATE_SUSPEND;
+
+ spin_lock(&tid_info->lock);
+ while ((skb = __skb_dequeue(&tid_info->queue)))
+ __skb_queue_tail(&free, skb);
+ spin_unlock(&tid_info->lock);
+ }
+ }
+ spin_unlock_bh(&ar->tx_ampdu_list_lock);
+ rcu_read_unlock();
+
+ while ((skb = __skb_dequeue(&free)))
+ carl9170_tx_status(ar, skb, false);
+}
+
+static void carl9170_zap_queues(struct ar9170 *ar)
+{
+ struct carl9170_vif_info *cvif;
+ unsigned int i;
+
+ carl9170_ampdu_gc(ar);
+
+ carl9170_flush_ba(ar);
+ carl9170_flush(ar, true);
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ spin_lock_bh(&ar->tx_status[i].lock);
+ while (!skb_queue_empty(&ar->tx_status[i])) {
+ struct sk_buff *skb;
+
+ skb = skb_peek(&ar->tx_status[i]);
+ carl9170_tx_get_skb(skb);
+ spin_unlock_bh(&ar->tx_status[i].lock);
+ carl9170_tx_drop(ar, skb);
+ spin_lock_bh(&ar->tx_status[i].lock);
+ carl9170_tx_put_skb(skb);
+ }
+ spin_unlock_bh(&ar->tx_status[i].lock);
+ }
+
+ BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
+ BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
+ BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
+
+ /* reinitialize queues statistics */
+ memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
+ for (i = 0; i < ar->hw->queues; i++)
+ ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
+
+ for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
+ ar->mem_bitmap[i] = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
+ spin_lock_bh(&ar->beacon_lock);
+ dev_kfree_skb_any(cvif->beacon);
+ cvif->beacon = NULL;
+ spin_unlock_bh(&ar->beacon_lock);
+ }
+ rcu_read_unlock();
+
+ atomic_set(&ar->tx_ampdu_upload, 0);
+ atomic_set(&ar->tx_ampdu_scheduler, 0);
+ atomic_set(&ar->tx_total_pending, 0);
+ atomic_set(&ar->tx_total_queued, 0);
+ atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
+}
+
+#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
+do { \
+ queue.aifs = ai_fs; \
+ queue.cw_min = cwmin; \
+ queue.cw_max = cwmax; \
+ queue.txop = _txop; \
+} while (0)
+
+static int carl9170_op_start(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+ int err, i;
+
+ mutex_lock(&ar->mutex);
+
+ carl9170_zap_queues(ar);
+
+ /* reset QoS defaults */
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
+
+ ar->current_factor = ar->current_density = -1;
+ /* "The first key is unique." */
+ ar->usedkeys = 1;
+ ar->filter_state = 0;
+ ar->ps.last_action = jiffies;
+ ar->ps.last_slept = jiffies;
+ ar->erp_mode = CARL9170_ERP_AUTO;
+
+ /* Set "disable hw crypto offload" whenever the module parameter
+ * nohwcrypt is true or if the firmware does not support it.
+ */
+ ar->disable_offload = modparam_nohwcrypt |
+ ar->fw.disable_offload_fw;
+ ar->rx_software_decryption = ar->disable_offload;
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ ar->queue_stop_timeout[i] = jiffies;
+ ar->max_queue_stop_timeout[i] = 0;
+ }
+
+ atomic_set(&ar->mem_allocs, 0);
+
+ err = carl9170_usb_open(ar);
+ if (err)
+ goto out;
+
+ err = carl9170_init_mac(ar);
+ if (err)
+ goto out;
+
+ err = carl9170_set_qos(ar);
+ if (err)
+ goto out;
+
+ if (ar->fw.rx_filter) {
+ err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
+ CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
+ if (err)
+ goto out;
+ }
+
+ err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
+ AR9170_DMA_TRIGGER_RXQ);
+ if (err)
+ goto out;
+
+ /* Clear key-cache */
+ for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
+ err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
+ 0, NULL, 0);
+ if (err)
+ goto out;
+
+ err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
+ 1, NULL, 0);
+ if (err)
+ goto out;
+
+ if (i < AR9170_CAM_MAX_USER) {
+ err = carl9170_disable_key(ar, i);
+ if (err)
+ goto out;
+ }
+ }
+
+ carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
+ round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
+
+ ieee80211_wake_queues(ar->hw);
+ err = 0;
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static void carl9170_cancel_worker(struct ar9170 *ar)
+{
+ cancel_delayed_work_sync(&ar->stat_work);
+ cancel_delayed_work_sync(&ar->tx_janitor);
+#ifdef CONFIG_CARL9170_LEDS
+ cancel_delayed_work_sync(&ar->led_work);
+#endif /* CONFIG_CARL9170_LEDS */
+ cancel_work_sync(&ar->ps_work);
+ cancel_work_sync(&ar->ping_work);
+ cancel_work_sync(&ar->ampdu_work);
+}
+
+static void carl9170_op_stop(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+
+ carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
+
+ ieee80211_stop_queues(ar->hw);
+
+ mutex_lock(&ar->mutex);
+ if (IS_ACCEPTING_CMD(ar)) {
+ RCU_INIT_POINTER(ar->beacon_iter, NULL);
+
+ carl9170_led_set_state(ar, 0);
+
+ /* stop DMA */
+ carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
+ carl9170_usb_stop(ar);
+ }
+
+ carl9170_zap_queues(ar);
+ mutex_unlock(&ar->mutex);
+
+ carl9170_cancel_worker(ar);
+}
+
+static void carl9170_restart_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ restart_work);
+ int err = -EIO;
+
+ ar->usedkeys = 0;
+ ar->filter_state = 0;
+ carl9170_cancel_worker(ar);
+
+ mutex_lock(&ar->mutex);
+ if (!ar->force_usb_reset) {
+ err = carl9170_usb_restart(ar);
+ if (net_ratelimit()) {
+ if (err)
+ dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
+ else
+ dev_info(&ar->udev->dev, "device restarted successfully.\n");
+ }
+ }
+ carl9170_zap_queues(ar);
+ mutex_unlock(&ar->mutex);
+
+ if (!err && !ar->force_usb_reset) {
+ ar->restart_counter++;
+ atomic_set(&ar->pending_restarts, 0);
+
+ ieee80211_restart_hw(ar->hw);
+ } else {
+ /*
+ * The reset was unsuccessful and the device seems to
+ * be dead. But there's still one option: a low-level
+ * usb subsystem reset...
+ */
+
+ carl9170_usb_reset(ar);
+ }
+}
+
+void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
+{
+ carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
+
+ /*
+ * Sometimes, an error can trigger several different reset events.
+ * By ignoring these *surplus* reset events, the device won't be
+ * killed again, right after it has recovered.
+ */
+ if (atomic_inc_return(&ar->pending_restarts) > 1) {
+ dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
+ return;
+ }
+
+ ieee80211_stop_queues(ar->hw);
+
+ dev_err(&ar->udev->dev, "restart device (%d)\n", r);
+
+ if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
+ !WARN_ON(r >= __CARL9170_RR_LAST))
+ ar->last_reason = r;
+
+ if (!ar->registered)
+ return;
+
+ if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
+ ar->force_usb_reset = true;
+
+ ieee80211_queue_work(ar->hw, &ar->restart_work);
+
+ /*
+ * At this point, the device instance might have vanished/disabled.
+ * So, don't put any code which access the ar9170 struct
+ * without proper protection.
+ */
+}
+
+static void carl9170_ping_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
+ int err;
+
+ if (!IS_STARTED(ar))
+ return;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_echo_test(ar, 0xdeadbeef);
+ if (err)
+ carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
+ mutex_unlock(&ar->mutex);
+}
+
+static int carl9170_init_interface(struct ar9170 *ar,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = &ar->common;
+ int err;
+
+ if (!vif) {
+ WARN_ON_ONCE(IS_STARTED(ar));
+ return 0;
+ }
+
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
+
+ /* We have to fall back to software crypto, whenever
+ * the user choose to participates in an IBSS. HW
+ * offload for IBSS RSN is not supported by this driver.
+ *
+ * NOTE: If the previous main interface has already
+ * disabled hw crypto offload, we have to keep this
+ * previous disable_offload setting as it was.
+ * Altough ideally, we should notify mac80211 and tell
+ * it to forget about any HW crypto offload for now.
+ */
+ ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
+ (vif->type != NL80211_IFTYPE_AP));
+
+ /* While the driver supports HW offload in a single
+ * P2P client configuration, it doesn't support HW
+ * offload in the favourit, concurrent P2P GO+CLIENT
+ * configuration. Hence, HW offload will always be
+ * disabled for P2P.
+ */
+ ar->disable_offload |= vif->p2p;
+
+ ar->rx_software_decryption = ar->disable_offload;
+
+ err = carl9170_set_operating_mode(ar);
+ return err;
+}
+
+static int carl9170_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
+ struct ieee80211_vif *main_vif, *old_main = NULL;
+ struct ar9170 *ar = hw->priv;
+ int vif_id = -1, err = 0;
+
+ mutex_lock(&ar->mutex);
+ rcu_read_lock();
+ if (vif_priv->active) {
+ /*
+ * Skip the interface structure initialization,
+ * if the vif survived the _restart call.
+ */
+ vif_id = vif_priv->id;
+ vif_priv->enable_beacon = false;
+
+ spin_lock_bh(&ar->beacon_lock);
+ dev_kfree_skb_any(vif_priv->beacon);
+ vif_priv->beacon = NULL;
+ spin_unlock_bh(&ar->beacon_lock);
+
+ goto init;
+ }
+
+ /* Because the AR9170 HW's MAC doesn't provide full support for
+ * multiple, independent interfaces [of different operation modes].
+ * We have to select ONE main interface [main mode of HW], but we
+ * can have multiple slaves [AKA: entry in the ACK-table].
+ *
+ * The first (from HEAD/TOP) interface in the ar->vif_list is
+ * always the main intf. All following intfs in this list
+ * are considered to be slave intfs.
+ */
+ main_vif = carl9170_get_main_vif(ar);
+
+ if (main_vif) {
+ switch (main_vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+
+ /* P2P GO [master] use-case
+ * Because the P2P GO station is selected dynamically
+ * by all participating peers of a WIFI Direct network,
+ * the driver has be able to change the main interface
+ * operating mode on the fly.
+ */
+ if (main_vif->p2p && vif->p2p &&
+ vif->type == NL80211_IFTYPE_AP) {
+ old_main = main_vif;
+ break;
+ }
+
+ err = -EBUSY;
+ rcu_read_unlock();
+
+ goto unlock;
+
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_WDS) ||
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT))
+ break;
+
+ err = -EBUSY;
+ rcu_read_unlock();
+ goto unlock;
+
+ default:
+ rcu_read_unlock();
+ goto unlock;
+ }
+ }
+
+ vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
+
+ if (vif_id < 0) {
+ rcu_read_unlock();
+
+ err = -ENOSPC;
+ goto unlock;
+ }
+
+ BUG_ON(ar->vif_priv[vif_id].id != vif_id);
+
+ vif_priv->active = true;
+ vif_priv->id = vif_id;
+ vif_priv->enable_beacon = false;
+ ar->vifs++;
+ if (old_main) {
+ /* We end up in here, if the main interface is being replaced.
+ * Put the new main interface at the HEAD of the list and the
+ * previous inteface will automatically become second in line.
+ */
+ list_add_rcu(&vif_priv->list, &ar->vif_list);
+ } else {
+ /* Add new inteface. If the list is empty, it will become the
+ * main inteface, otherwise it will be slave.
+ */
+ list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
+ }
+ rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
+
+init:
+ main_vif = carl9170_get_main_vif(ar);
+
+ if (main_vif == vif) {
+ rcu_assign_pointer(ar->beacon_iter, vif_priv);
+ rcu_read_unlock();
+
+ if (old_main) {
+ struct carl9170_vif_info *old_main_priv =
+ (void *) old_main->drv_priv;
+ /* downgrade old main intf to slave intf.
+ * NOTE: We are no longer under rcu_read_lock.
+ * But we are still holding ar->mutex, so the
+ * vif data [id, addr] is safe.
+ */
+ err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
+ old_main->addr);
+ if (err)
+ goto unlock;
+ }
+
+ err = carl9170_init_interface(ar, vif);
+ if (err)
+ goto unlock;
+ } else {
+ rcu_read_unlock();
+ err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
+
+ if (err)
+ goto unlock;
+ }
+
+ if (ar->fw.tx_seq_table) {
+ err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+ 0);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ if (err && (vif_id >= 0)) {
+ vif_priv->active = false;
+ bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
+ ar->vifs--;
+ RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
+ list_del_rcu(&vif_priv->list);
+ mutex_unlock(&ar->mutex);
+ synchronize_rcu();
+ } else {
+ if (ar->vifs > 1)
+ ar->ps.off_override |= PS_OFF_VIF;
+
+ mutex_unlock(&ar->mutex);
+ }
+
+ return err;
+}
+
+static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
+ struct ieee80211_vif *main_vif;
+ struct ar9170 *ar = hw->priv;
+ unsigned int id;
+
+ mutex_lock(&ar->mutex);
+
+ if (WARN_ON_ONCE(!vif_priv->active))
+ goto unlock;
+
+ ar->vifs--;
+
+ rcu_read_lock();
+ main_vif = carl9170_get_main_vif(ar);
+
+ id = vif_priv->id;
+
+ vif_priv->active = false;
+ WARN_ON(vif_priv->enable_beacon);
+ vif_priv->enable_beacon = false;
+ list_del_rcu(&vif_priv->list);
+ RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
+
+ if (vif == main_vif) {
+ rcu_read_unlock();
+
+ if (ar->vifs) {
+ WARN_ON(carl9170_init_interface(ar,
+ carl9170_get_main_vif(ar)));
+ } else {
+ carl9170_set_operating_mode(ar);
+ }
+ } else {
+ rcu_read_unlock();
+
+ WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
+ }
+
+ carl9170_update_beacon(ar, false);
+ carl9170_flush_cab(ar, id);
+
+ spin_lock_bh(&ar->beacon_lock);
+ dev_kfree_skb_any(vif_priv->beacon);
+ vif_priv->beacon = NULL;
+ spin_unlock_bh(&ar->beacon_lock);
+
+ bitmap_release_region(&ar->vif_bitmap, id, 0);
+
+ carl9170_set_beacon_timers(ar);
+
+ if (ar->vifs == 1)
+ ar->ps.off_override &= ~PS_OFF_VIF;
+
+unlock:
+ mutex_unlock(&ar->mutex);
+
+ synchronize_rcu();
+}
+
+void carl9170_ps_check(struct ar9170 *ar)
+{
+ ieee80211_queue_work(ar->hw, &ar->ps_work);
+}
+
+/* caller must hold ar->mutex */
+static int carl9170_ps_update(struct ar9170 *ar)
+{
+ bool ps = false;
+ int err = 0;
+
+ if (!ar->ps.off_override)
+ ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
+
+ if (ps != ar->ps.state) {
+ err = carl9170_powersave(ar, ps);
+ if (err)
+ return err;
+
+ if (ar->ps.state && !ps) {
+ ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
+ ar->ps.last_action);
+ }
+
+ if (ps)
+ ar->ps.last_slept = jiffies;
+
+ ar->ps.last_action = jiffies;
+ ar->ps.state = ps;
+ }
+
+ return 0;
+}
+
+static void carl9170_ps_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ ps_work);
+ mutex_lock(&ar->mutex);
+ if (IS_STARTED(ar))
+ WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
+ mutex_unlock(&ar->mutex);
+}
+
+static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
+{
+ int err;
+
+ if (noise) {
+ err = carl9170_get_noisefloor(ar);
+ if (err)
+ return err;
+ }
+
+ if (ar->fw.hw_counters) {
+ err = carl9170_collect_tally(ar);
+ if (err)
+ return err;
+ }
+
+ if (flush)
+ memset(&ar->tally, 0, sizeof(ar->tally));
+
+ return 0;
+}
+
+static void carl9170_stat_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
+ int err;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_update_survey(ar, false, true);
+ mutex_unlock(&ar->mutex);
+
+ if (err)
+ return;
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
+ round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
+}
+
+static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0;
+
+ mutex_lock(&ar->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ err = carl9170_ps_update(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_SMPS) {
+ /* TODO */
+ err = 0;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ enum nl80211_channel_type channel_type =
+ cfg80211_get_chandef_type(&hw->conf.chandef);
+
+ /* adjust slot time for 5 GHz */
+ err = carl9170_set_slot_time(ar);
+ if (err)
+ goto out;
+
+ err = carl9170_update_survey(ar, true, false);
+ if (err)
+ goto out;
+
+ err = carl9170_set_channel(ar, hw->conf.chandef.chan,
+ channel_type);
+ if (err)
+ goto out;
+
+ err = carl9170_update_survey(ar, false, true);
+ if (err)
+ goto out;
+
+ err = carl9170_set_dyn_sifs_ack(ar);
+ if (err)
+ goto out;
+
+ err = carl9170_set_rts_cts_rate(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+}
+
+static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct netdev_hw_addr *ha;
+ u64 mchash;
+
+ /* always get broadcast frames */
+ mchash = 1ULL << (0xff >> 2);
+
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ mchash |= 1ULL << (ha->addr[5] >> 2);
+
+ return mchash;
+}
+
+static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct ar9170 *ar = hw->priv;
+
+ /* mask supported flags */
+ *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
+
+ if (!IS_ACCEPTING_CMD(ar))
+ return;
+
+ mutex_lock(&ar->mutex);
+
+ ar->filter_state = *new_flags;
+ /*
+ * We can support more by setting the sniffer bit and
+ * then checking the error flags, later.
+ */
+
+ if (*new_flags & FIF_ALLMULTI)
+ multicast = ~0ULL;
+
+ if (multicast != ar->cur_mc_hash)
+ WARN_ON(carl9170_update_multicast(ar, multicast));
+
+ if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
+ ar->sniffer_enabled = !!(*new_flags &
+ (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
+
+ WARN_ON(carl9170_set_operating_mode(ar));
+ }
+
+ if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
+ u32 rx_filter = 0;
+
+ if (!ar->fw.ba_filter)
+ rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
+
+ if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
+ rx_filter |= CARL9170_RX_FILTER_BAD;
+
+ if (!(*new_flags & FIF_CONTROL))
+ rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
+
+ if (!(*new_flags & FIF_PSPOLL))
+ rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
+
+ if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
+ rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
+ rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
+ }
+
+ WARN_ON(carl9170_rx_filter(ar, rx_filter));
+ }
+
+ mutex_unlock(&ar->mutex);
+}
+
+
+static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ath_common *common = &ar->common;
+ int err = 0;
+ struct carl9170_vif_info *vif_priv;
+ struct ieee80211_vif *main_vif;
+
+ mutex_lock(&ar->mutex);
+ vif_priv = (void *) vif->drv_priv;
+ main_vif = carl9170_get_main_vif(ar);
+ if (WARN_ON(!main_vif))
+ goto out;
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ struct carl9170_vif_info *iter;
+ int i = 0;
+
+ vif_priv->enable_beacon = bss_conf->enable_beacon;
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &ar->vif_list, list) {
+ if (iter->active && iter->enable_beacon)
+ i++;
+
+ }
+ rcu_read_unlock();
+
+ ar->beacon_enabled = i;
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ err = carl9170_update_beacon(ar, false);
+ if (err)
+ goto out;
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_INT)) {
+
+ if (main_vif != vif) {
+ bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
+ bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
+ }
+
+ /*
+ * Therefore a hard limit for the broadcast traffic should
+ * prevent false alarms.
+ */
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ (bss_conf->beacon_int * bss_conf->dtim_period >=
+ (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = carl9170_set_beacon_timers(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ /* TODO */
+ err = 0;
+ if (err)
+ goto out;
+ }
+
+ if (main_vif != vif)
+ goto out;
+
+ /*
+ * The following settings can only be changed by the
+ * master interface.
+ */
+
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ err = carl9170_set_operating_mode(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ ar->common.curaid = bss_conf->aid;
+ err = carl9170_set_beacon_timers(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ err = carl9170_set_slot_time(ar);
+ if (err)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ err = carl9170_set_mac_rates(ar);
+ if (err)
+ goto out;
+ }
+
+out:
+ WARN_ON_ONCE(err && IS_STARTED(ar));
+ mutex_unlock(&ar->mutex);
+}
+
+static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar9170 *ar = hw->priv;
+ struct carl9170_tsf_rsp tsf;
+ int err;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
+ 0, NULL, sizeof(tsf), &tsf);
+ mutex_unlock(&ar->mutex);
+ if (WARN_ON(err))
+ return 0;
+
+ return le64_to_cpu(tsf.tsf_64);
+}
+
+static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ar9170 *ar = hw->priv;
+ int err = 0, i;
+ u8 ktype;
+
+ if (ar->disable_offload || !vif)
+ return -EOPNOTSUPP;
+
+ /* Fall back to software encryption whenever the driver is connected
+ * to more than one network.
+ *
+ * This is very unfortunate, because some machines cannot handle
+ * the high througput speed in 802.11n networks.
+ */
+
+ if (!is_main_vif(ar, vif)) {
+ mutex_lock(&ar->mutex);
+ goto err_softw;
+ }
+
+ /*
+ * While the hardware supports *catch-all* key, for offloading
+ * group-key en-/de-cryption. The way of how the hardware
+ * decides which keyId maps to which key, remains a mystery...
+ */
+ if ((vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_ADHOC) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ ktype = AR9170_ENC_ALG_WEP64;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ ktype = AR9170_ENC_ALG_WEP128;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ktype = AR9170_ENC_ALG_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ ktype = AR9170_ENC_ALG_AESCCMP;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ar->mutex);
+ if (cmd == SET_KEY) {
+ if (!IS_STARTED(ar)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ sta = NULL;
+
+ i = 64 + key->keyidx;
+ } else {
+ for (i = 0; i < 64; i++)
+ if (!(ar->usedkeys & BIT(i)))
+ break;
+ if (i == 64)
+ goto err_softw;
+ }
+
+ key->hw_key_idx = i;
+
+ err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
+ ktype, 0, key->key,
+ min_t(u8, 16, key->keylen));
+ if (err)
+ goto out;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ err = carl9170_upload_key(ar, i, sta ? sta->addr :
+ NULL, ktype, 1,
+ key->key + 16, 16);
+ if (err)
+ goto out;
+
+ /*
+ * hardware is not capable generating MMIC
+ * of fragmented frames!
+ */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ }
+
+ if (i < 64)
+ ar->usedkeys |= BIT(i);
+
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ } else {
+ if (!IS_STARTED(ar)) {
+ /* The device is gone... together with the key ;-) */
+ err = 0;
+ goto out;
+ }
+
+ if (key->hw_key_idx < 64) {
+ ar->usedkeys &= ~BIT(key->hw_key_idx);
+ } else {
+ err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
+ AR9170_ENC_ALG_NONE, 0,
+ NULL, 0);
+ if (err)
+ goto out;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ err = carl9170_upload_key(ar, key->hw_key_idx,
+ NULL,
+ AR9170_ENC_ALG_NONE,
+ 1, NULL, 0);
+ if (err)
+ goto out;
+ }
+
+ }
+
+ err = carl9170_disable_key(ar, key->hw_key_idx);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->mutex);
+ return err;
+
+err_softw:
+ if (!ar->rx_software_decryption) {
+ ar->rx_software_decryption = true;
+ carl9170_set_operating_mode(ar);
+ }
+ mutex_unlock(&ar->mutex);
+ return -ENOSPC;
+}
+
+static int carl9170_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
+ unsigned int i;
+
+ atomic_set(&sta_info->pending_frames, 0);
+
+ if (sta->ht_cap.ht_supported) {
+ if (sta->ht_cap.ampdu_density > 6) {
+ /*
+ * HW does support 16us AMPDU density.
+ * No HT-Xmit for station.
+ */
+
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
+
+ sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
+ sta_info->ht_sta = true;
+ }
+
+ return 0;
+}
+
+static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ar9170 *ar = hw->priv;
+ struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
+ unsigned int i;
+ bool cleanup = false;
+
+ if (sta->ht_cap.ht_supported) {
+
+ sta_info->ht_sta = false;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
+ struct carl9170_sta_tid *tid_info;
+
+ tid_info = rcu_dereference(sta_info->agg[i]);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
+
+ if (!tid_info)
+ continue;
+
+ spin_lock_bh(&ar->tx_ampdu_list_lock);
+ if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
+ tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
+ spin_unlock_bh(&ar->tx_ampdu_list_lock);
+ cleanup = true;
+ }
+ rcu_read_unlock();
+
+ if (cleanup)
+ carl9170_ampdu_gc(ar);
+ }
+
+ return 0;
+}
+
+static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+{
+ struct ar9170 *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->mutex);
+ if (queue < ar->hw->queues) {
+ memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
+ ret = carl9170_set_qos(ar);
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static void carl9170_ampdu_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ ampdu_work);
+
+ if (!IS_STARTED(ar))
+ return;
+
+ mutex_lock(&ar->mutex);
+ carl9170_ampdu_gc(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn, u8 buf_size)
+{
+ struct ar9170 *ar = hw->priv;
+ struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
+ struct carl9170_sta_tid *tid_info;
+
+ if (modparam_noht)
+ return -EOPNOTSUPP;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ if (!sta_info->ht_sta)
+ return -EOPNOTSUPP;
+
+ tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
+ GFP_ATOMIC);
+ if (!tid_info)
+ return -ENOMEM;
+
+ tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
+ tid_info->state = CARL9170_TID_STATE_PROGRESS;
+ tid_info->tid = tid;
+ tid_info->max = sta_info->ampdu_max_len;
+ tid_info->sta = sta;
+ tid_info->vif = vif;
+
+ INIT_LIST_HEAD(&tid_info->list);
+ INIT_LIST_HEAD(&tid_info->tmp_list);
+ skb_queue_head_init(&tid_info->queue);
+ spin_lock_init(&tid_info->lock);
+
+ spin_lock_bh(&ar->tx_ampdu_list_lock);
+ ar->tx_ampdu_list_len++;
+ list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
+ rcu_assign_pointer(sta_info->agg[tid], tid_info);
+ spin_unlock_bh(&ar->tx_ampdu_list_lock);
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ rcu_read_lock();
+ tid_info = rcu_dereference(sta_info->agg[tid]);
+ if (tid_info) {
+ spin_lock_bh(&ar->tx_ampdu_list_lock);
+ if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
+ tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
+ spin_unlock_bh(&ar->tx_ampdu_list_lock);
+ }
+
+ RCU_INIT_POINTER(sta_info->agg[tid], NULL);
+ rcu_read_unlock();
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ieee80211_queue_work(ar->hw, &ar->ampdu_work);
+ break;
+
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ rcu_read_lock();
+ tid_info = rcu_dereference(sta_info->agg[tid]);
+
+ sta_info->stats[tid].clear = true;
+ sta_info->stats[tid].req = false;
+
+ if (tid_info) {
+ bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
+ tid_info->state = CARL9170_TID_STATE_IDLE;
+ }
+ rcu_read_unlock();
+
+ if (WARN_ON_ONCE(!tid_info))
+ return -EFAULT;
+
+ break;
+
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* Handled by hardware */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CARL9170_WPC
+static int carl9170_register_wps_button(struct ar9170 *ar)
+{
+ struct input_dev *input;
+ int err;
+
+ if (!(ar->features & CARL9170_WPS_BUTTON))
+ return 0;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
+ wiphy_name(ar->hw->wiphy));
+
+ snprintf(ar->wps.phys, sizeof(ar->wps.phys),
+ "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
+
+ input->name = ar->wps.name;
+ input->phys = ar->wps.phys;
+ input->id.bustype = BUS_USB;
+ input->dev.parent = &ar->hw->wiphy->dev;
+
+ input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
+
+ err = input_register_device(input);
+ if (err) {
+ input_free_device(input);
+ return err;
+ }
+
+ ar->wps.pbc = input;
+ return 0;
+}
+#endif /* CONFIG_CARL9170_WPC */
+
+#ifdef CONFIG_CARL9170_HWRNG
+static int carl9170_rng_get(struct ar9170 *ar)
+{
+
+#define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
+#define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
+
+ static const __le32 rng_load[RW] = {
+ [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
+
+ u32 buf[RW];
+
+ unsigned int i, off = 0, transfer, count;
+ int err;
+
+ BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
+
+ if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+ return -EAGAIN;
+
+ count = ARRAY_SIZE(ar->rng.cache);
+ while (count) {
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
+ RB, (u8 *) rng_load,
+ RB, (u8 *) buf);
+ if (err)
+ return err;
+
+ transfer = min_t(unsigned int, count, RW);
+ for (i = 0; i < transfer; i++)
+ ar->rng.cache[off + i] = buf[i];
+
+ off += transfer;
+ count -= transfer;
+ }
+
+ ar->rng.cache_idx = 0;
+
+#undef RW
+#undef RB
+ return 0;
+}
+
+static int carl9170_rng_read(struct hwrng *rng, u32 *data)
+{
+ struct ar9170 *ar = (struct ar9170 *)rng->priv;
+ int ret = -EIO;
+
+ mutex_lock(&ar->mutex);
+ if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
+ ret = carl9170_rng_get(ar);
+ if (ret) {
+ mutex_unlock(&ar->mutex);
+ return ret;
+ }
+ }
+
+ *data = ar->rng.cache[ar->rng.cache_idx++];
+ mutex_unlock(&ar->mutex);
+
+ return sizeof(u16);
+}
+
+static void carl9170_unregister_hwrng(struct ar9170 *ar)
+{
+ if (ar->rng.initialized) {
+ hwrng_unregister(&ar->rng.rng);
+ ar->rng.initialized = false;
+ }
+}
+
+static int carl9170_register_hwrng(struct ar9170 *ar)
+{
+ int err;
+
+ snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
+ "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
+ ar->rng.rng.name = ar->rng.name;
+ ar->rng.rng.data_read = carl9170_rng_read;
+ ar->rng.rng.priv = (unsigned long)ar;
+
+ if (WARN_ON(ar->rng.initialized))
+ return -EALREADY;
+
+ err = hwrng_register(&ar->rng.rng);
+ if (err) {
+ dev_err(&ar->udev->dev, "Failed to register the random "
+ "number generator (%d)\n", err);
+ return err;
+ }
+
+ ar->rng.initialized = true;
+
+ err = carl9170_rng_get(ar);
+ if (err) {
+ carl9170_unregister_hwrng(ar);
+ return err;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_CARL9170_HWRNG */
+
+static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ieee80211_channel *chan;
+ struct ieee80211_supported_band *band;
+ int err, b, i;
+
+ chan = ar->channel;
+ if (!chan)
+ return -ENODEV;
+
+ if (idx == chan->hw_value) {
+ mutex_lock(&ar->mutex);
+ err = carl9170_update_survey(ar, false, true);
+ mutex_unlock(&ar->mutex);
+ if (err)
+ return err;
+ }
+
+ for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ band = ar->hw->wiphy->bands[b];
+
+ if (!band)
+ continue;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].hw_value == idx) {
+ chan = &band->channels[i];
+ goto found;
+ }
+ }
+ }
+ return -ENOENT;
+
+found:
+ memcpy(survey, &ar->survey[idx], sizeof(*survey));
+
+ survey->channel = chan;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+
+ if (ar->channel == chan)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ if (ar->fw.hw_counters) {
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_TX;
+ }
+
+ return 0;
+}
+
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ar9170 *ar = hw->priv;
+ unsigned int vid;
+
+ mutex_lock(&ar->mutex);
+ for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
+ carl9170_flush_cab(ar, vid);
+
+ carl9170_flush(ar, drop);
+ mutex_unlock(&ar->mutex);
+}
+
+static int carl9170_op_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ar9170 *ar = hw->priv;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->dot11ACKFailureCount = ar->tx_ack_failures;
+ stats->dot11FCSErrorCount = ar->tx_fcs_errors;
+ return 0;
+}
+
+static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ sta_info->sleeping = true;
+ if (atomic_read(&sta_info->pending_frames))
+ ieee80211_sta_block_awake(hw, sta, true);
+ break;
+
+ case STA_NOTIFY_AWAKE:
+ sta_info->sleeping = false;
+ break;
+ }
+}
+
+static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+
+ return !!atomic_read(&ar->tx_total_queued);
+}
+
+static const struct ieee80211_ops carl9170_ops = {
+ .start = carl9170_op_start,
+ .stop = carl9170_op_stop,
+ .tx = carl9170_op_tx,
+ .flush = carl9170_op_flush,
+ .add_interface = carl9170_op_add_interface,
+ .remove_interface = carl9170_op_remove_interface,
+ .config = carl9170_op_config,
+ .prepare_multicast = carl9170_op_prepare_multicast,
+ .configure_filter = carl9170_op_configure_filter,
+ .conf_tx = carl9170_op_conf_tx,
+ .bss_info_changed = carl9170_op_bss_info_changed,
+ .get_tsf = carl9170_op_get_tsf,
+ .set_key = carl9170_op_set_key,
+ .sta_add = carl9170_op_sta_add,
+ .sta_remove = carl9170_op_sta_remove,
+ .sta_notify = carl9170_op_sta_notify,
+ .get_survey = carl9170_op_get_survey,
+ .get_stats = carl9170_op_get_stats,
+ .ampdu_action = carl9170_op_ampdu_action,
+ .tx_frames_pending = carl9170_tx_frames_pending,
+};
+
+void *carl9170_alloc(size_t priv_size)
+{
+ struct ieee80211_hw *hw;
+ struct ar9170 *ar;
+ struct sk_buff *skb;
+ int i;
+
+ /*
+ * this buffer is used for rx stream reconstruction.
+ * Under heavy load this device (or the transport layer?)
+ * tends to split the streams into separate rx descriptors.
+ */
+
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err_nomem;
+
+ hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
+ if (!hw)
+ goto err_nomem;
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->rx_failover = skb;
+
+ memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
+ ar->rx_has_plcp = false;
+
+ /*
+ * Here's a hidden pitfall!
+ *
+ * All 4 AC queues work perfectly well under _legacy_ operation.
+ * However as soon as aggregation is enabled, the traffic flow
+ * gets very bumpy. Therefore we have to _switch_ to a
+ * software AC with a single HW queue.
+ */
+ hw->queues = __AR9170_NUM_TXQ;
+
+ mutex_init(&ar->mutex);
+ spin_lock_init(&ar->beacon_lock);
+ spin_lock_init(&ar->cmd_lock);
+ spin_lock_init(&ar->tx_stats_lock);
+ spin_lock_init(&ar->tx_ampdu_list_lock);
+ spin_lock_init(&ar->mem_lock);
+ spin_lock_init(&ar->state_lock);
+ atomic_set(&ar->pending_restarts, 0);
+ ar->vifs = 0;
+ for (i = 0; i < ar->hw->queues; i++) {
+ skb_queue_head_init(&ar->tx_status[i]);
+ skb_queue_head_init(&ar->tx_pending[i]);
+
+ INIT_LIST_HEAD(&ar->bar_list[i]);
+ spin_lock_init(&ar->bar_list_lock[i]);
+ }
+ INIT_WORK(&ar->ps_work, carl9170_ps_work);
+ INIT_WORK(&ar->ping_work, carl9170_ping_work);
+ INIT_WORK(&ar->restart_work, carl9170_restart_work);
+ INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
+ INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
+ INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
+ INIT_LIST_HEAD(&ar->tx_ampdu_list);
+ rcu_assign_pointer(ar->tx_ampdu_iter,
+ (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
+
+ bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
+ INIT_LIST_HEAD(&ar->vif_list);
+ init_completion(&ar->tx_flush);
+
+ /* firmware decides which modes we support */
+ hw->wiphy->interface_modes = 0;
+
+ hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
+ IEEE80211_HW_SUPPORTS_RC_TABLE |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+
+ if (!modparam_noht) {
+ /*
+ * see the comment above, why we allow the user
+ * to disable HT by a module parameter.
+ */
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ }
+
+ hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
+ hw->sta_data_size = sizeof(struct carl9170_sta_info);
+ hw->vif_data_size = sizeof(struct carl9170_vif_info);
+
+ hw->max_rates = CARL9170_TX_MAX_RATES;
+ hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
+
+ for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
+ ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
+
+ return ar;
+
+err_nomem:
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int carl9170_read_eeprom(struct ar9170 *ar)
+{
+#define RW 8 /* number of words to read at once */
+#define RB (sizeof(u32) * RW)
+ u8 *eeprom = (void *)&ar->eeprom;
+ __le32 offsets[RW];
+ int i, j, err;
+
+ BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
+
+ BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
+#ifndef __CHECKER__
+ /* don't want to handle trailing remains */
+ BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
+#endif
+
+ for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
+ for (j = 0; j < RW; j++)
+ offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
+ RB * i + 4 * j);
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
+ RB, (u8 *) &offsets,
+ RB, eeprom + RB * i);
+ if (err)
+ return err;
+ }
+
+#undef RW
+#undef RB
+ return 0;
+}
+
+static int carl9170_parse_eeprom(struct ar9170 *ar)
+{
+ struct ath_regulatory *regulatory = &ar->common.regulatory;
+ unsigned int rx_streams, tx_streams, tx_params = 0;
+ int bands = 0;
+ int chans = 0;
+
+ if (ar->eeprom.length == cpu_to_le16(0xffff))
+ return -ENODATA;
+
+ rx_streams = hweight8(ar->eeprom.rx_mask);
+ tx_streams = hweight8(ar->eeprom.tx_mask);
+
+ if (rx_streams != tx_streams) {
+ tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
+
+ WARN_ON(!(tx_streams >= 1 && tx_streams <=
+ IEEE80211_HT_MCS_TX_MAX_STREAMS));
+
+ tx_params = (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+
+ carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
+ carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
+ }
+
+ if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &carl9170_band_2GHz;
+ chans += carl9170_band_2GHz.n_channels;
+ bands++;
+ }
+ if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
+ ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &carl9170_band_5GHz;
+ chans += carl9170_band_5GHz.n_channels;
+ bands++;
+ }
+
+ if (!bands)
+ return -EINVAL;
+
+ ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
+ if (!ar->survey)
+ return -ENOMEM;
+ ar->num_channels = chans;
+
+ regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
+
+ /* second part of wiphy init */
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
+
+ return 0;
+}
+
+static void carl9170_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ar9170 *ar = hw->priv;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
+}
+
+int carl9170_register(struct ar9170 *ar)
+{
+ struct ath_regulatory *regulatory = &ar->common.regulatory;
+ int err = 0, i;
+
+ if (WARN_ON(ar->mem_bitmap))
+ return -EINVAL;
+
+ ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ar->mem_bitmap)
+ return -ENOMEM;
+
+ /* try to read EEPROM, init MAC addr */
+ err = carl9170_read_eeprom(ar);
+ if (err)
+ return err;
+
+ err = carl9170_parse_eeprom(ar);
+ if (err)
+ return err;
+
+ err = ath_regd_init(regulatory, ar->hw->wiphy,
+ carl9170_reg_notifier);
+ if (err)
+ return err;
+
+ if (modparam_noht) {
+ carl9170_band_2GHz.ht_cap.ht_supported = false;
+ carl9170_band_5GHz.ht_cap.ht_supported = false;
+ }
+
+ for (i = 0; i < ar->fw.vif_num; i++) {
+ ar->vif_priv[i].id = i;
+ ar->vif_priv[i].vif = NULL;
+ }
+
+ err = ieee80211_register_hw(ar->hw);
+ if (err)
+ return err;
+
+ /* mac80211 interface is now registered */
+ ar->registered = true;
+
+ if (!ath_is_world_regd(regulatory))
+ regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
+
+#ifdef CONFIG_CARL9170_DEBUGFS
+ carl9170_debugfs_register(ar);
+#endif /* CONFIG_CARL9170_DEBUGFS */
+
+ err = carl9170_led_init(ar);
+ if (err)
+ goto err_unreg;
+
+#ifdef CONFIG_CARL9170_LEDS
+ err = carl9170_led_register(ar);
+ if (err)
+ goto err_unreg;
+#endif /* CONFIG_CARL9170_LEDS */
+
+#ifdef CONFIG_CARL9170_WPC
+ err = carl9170_register_wps_button(ar);
+ if (err)
+ goto err_unreg;
+#endif /* CONFIG_CARL9170_WPC */
+
+#ifdef CONFIG_CARL9170_HWRNG
+ err = carl9170_register_hwrng(ar);
+ if (err)
+ goto err_unreg;
+#endif /* CONFIG_CARL9170_HWRNG */
+
+ dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
+ wiphy_name(ar->hw->wiphy));
+
+ return 0;
+
+err_unreg:
+ carl9170_unregister(ar);
+ return err;
+}
+
+void carl9170_unregister(struct ar9170 *ar)
+{
+ if (!ar->registered)
+ return;
+
+ ar->registered = false;
+
+#ifdef CONFIG_CARL9170_LEDS
+ carl9170_led_unregister(ar);
+#endif /* CONFIG_CARL9170_LEDS */
+
+#ifdef CONFIG_CARL9170_DEBUGFS
+ carl9170_debugfs_unregister(ar);
+#endif /* CONFIG_CARL9170_DEBUGFS */
+
+#ifdef CONFIG_CARL9170_WPC
+ if (ar->wps.pbc) {
+ input_unregister_device(ar->wps.pbc);
+ ar->wps.pbc = NULL;
+ }
+#endif /* CONFIG_CARL9170_WPC */
+
+#ifdef CONFIG_CARL9170_HWRNG
+ carl9170_unregister_hwrng(ar);
+#endif /* CONFIG_CARL9170_HWRNG */
+
+ carl9170_cancel_worker(ar);
+ cancel_work_sync(&ar->restart_work);
+
+ ieee80211_unregister_hw(ar->hw);
+}
+
+void carl9170_free(struct ar9170 *ar)
+{
+ WARN_ON(ar->registered);
+ WARN_ON(IS_INITIALIZED(ar));
+
+ kfree_skb(ar->rx_failover);
+ ar->rx_failover = NULL;
+
+ kfree(ar->mem_bitmap);
+ ar->mem_bitmap = NULL;
+
+ kfree(ar->survey);
+ ar->survey = NULL;
+
+ mutex_destroy(&ar->mutex);
+
+ ieee80211_free_hw(ar->hw);
+}
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
new file mode 100644
index 000000000..dca6df13f
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -0,0 +1,1729 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * PHY and RF code
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/bitrev.h>
+#include "carl9170.h"
+#include "cmd.h"
+#include "phy.h"
+
+static int carl9170_init_power_cal(struct ar9170 *ar)
+{
+ carl9170_regwrite_begin(ar);
+
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE_MAX, 0x7f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE1, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE2, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE3, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE4, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE5, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE6, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE7, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE8, 0x3f3f3f3f);
+ carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE9, 0x3f3f3f3f);
+
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
+
+struct carl9170_phy_init {
+ u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
+};
+
+static struct carl9170_phy_init ar5416_phy_init[] = {
+ { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
+ { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
+ { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
+ { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
+ { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
+ { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
+ { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
+ { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
+ { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
+ { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
+ { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
+ { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
+ { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
+ { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
+ { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
+ { 0x1c5850, 0x6c48b4e4, 0x6d48b4e4, 0x6d48b0e4, 0x6c48b0e4, },
+ { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
+ { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
+ { 0x1c585c, 0x31395c5e, 0x3139605e, 0x3139605e, 0x31395c5e, },
+ { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
+ { 0x1c5864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
+ { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
+ { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
+ { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
+ { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
+ { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
+ { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
+ { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
+ { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
+ { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
+ { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
+ { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
+ { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
+ { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
+ { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
+ { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
+ { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
+ { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
+ { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
+ { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
+ { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
+ { 0x1c59bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
+ { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
+ { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
+ { 0x1c59c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, },
+ { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
+ { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
+ { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
+ { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
+ { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
+ { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
+ { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
+ { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
+ { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
+ { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
+ { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
+ { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
+ { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
+ { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
+ { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
+ { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
+ { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
+ { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
+ { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
+ { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
+ { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
+ { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
+ { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
+ { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
+ { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
+ { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
+ { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
+ { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
+ { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
+ { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
+ { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
+ { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
+ { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
+ { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
+ { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
+ { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
+ { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
+ { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
+ { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
+ { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
+ { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
+ { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
+ { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
+ { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
+ { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
+ { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
+ { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
+ { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
+ { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
+ { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
+ { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
+ { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
+ { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
+ { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
+ { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
+ { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
+ { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
+ { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
+ { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
+ { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
+ { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
+ { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
+ { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
+ { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
+ { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
+ { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
+ { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
+ { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
+ { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
+ { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
+ { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
+ { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
+ { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
+ { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
+ { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
+ { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
+ { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
+ { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
+ { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
+ { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
+ { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
+ { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
+ { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
+ { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
+ { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
+ { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
+ { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
+ { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
+ { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
+ { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
+ { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
+ { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
+ { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
+ { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
+ { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
+ { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
+ { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
+ { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
+ { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
+ { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
+ { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
+ { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
+ { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
+ { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
+ { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
+ { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
+ { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
+ { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
+ { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
+ { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
+ { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+ { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
+ { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
+ { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
+ { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
+ { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
+ { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
+ { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
+ { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
+ { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
+ { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
+ { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
+ { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
+ { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
+ { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
+ { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
+ { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
+ { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
+ { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
+ { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
+ { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
+ { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
+ { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
+ { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
+ { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
+ { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
+ { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+ { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
+ { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
+ { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
+ { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
+ { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
+/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
+ { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
+ { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
+ { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
+ { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
+ { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
+ { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
+ { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
+ { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
+ { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
+ { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
+ { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
+ { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
+ { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
+ { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
+ { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
+ { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
+};
+
+/*
+ * look up a certain register in ar5416_phy_init[] and return the init. value
+ * for the band and bandwidth given. Return 0 if register address not found.
+ */
+static u32 carl9170_def_val(u32 reg, bool is_2ghz, bool is_40mhz)
+{
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
+ if (ar5416_phy_init[i].reg != reg)
+ continue;
+
+ if (is_2ghz) {
+ if (is_40mhz)
+ return ar5416_phy_init[i]._2ghz_40;
+ else
+ return ar5416_phy_init[i]._2ghz_20;
+ } else {
+ if (is_40mhz)
+ return ar5416_phy_init[i]._5ghz_40;
+ else
+ return ar5416_phy_init[i]._5ghz_20;
+ }
+ }
+ return 0;
+}
+
+/*
+ * initialize some phy regs from eeprom values in modal_header[]
+ * acc. to band and bandwidth
+ */
+static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
+ bool is_2ghz, bool is_40mhz)
+{
+ static const u8 xpd2pd[16] = {
+ 0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
+ 0x2, 0x3, 0x7, 0x2, 0xb, 0x2, 0x2, 0x2
+ };
+ /* pointer to the modal_header acc. to band */
+ struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
+ u32 val;
+
+ carl9170_regwrite_begin(ar);
+
+ /* ant common control (index 0) */
+ carl9170_regwrite(AR9170_PHY_REG_SWITCH_COM,
+ le32_to_cpu(m->antCtrlCommon));
+
+ /* ant control chain 0 (index 1) */
+ carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_0,
+ le32_to_cpu(m->antCtrlChain[0]));
+
+ /* ant control chain 2 (index 2) */
+ carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_2,
+ le32_to_cpu(m->antCtrlChain[1]));
+
+ /* SwSettle (index 3) */
+ if (!is_40mhz) {
+ val = carl9170_def_val(AR9170_PHY_REG_SETTLING,
+ is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_SETTLING_SWITCH, val, m->switchSettling);
+ carl9170_regwrite(AR9170_PHY_REG_SETTLING, val);
+ }
+
+ /* adcDesired, pdaDesired (index 4) */
+ val = carl9170_def_val(AR9170_PHY_REG_DESIRED_SZ, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_DESIRED_SZ_PGA, val, m->pgaDesiredSize);
+ SET_VAL(AR9170_PHY_DESIRED_SZ_ADC, val, m->adcDesiredSize);
+ carl9170_regwrite(AR9170_PHY_REG_DESIRED_SZ, val);
+
+ /* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
+ val = carl9170_def_val(AR9170_PHY_REG_RF_CTL4, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF, val, m->txEndToXpaOff);
+ SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF, val, m->txEndToXpaOff);
+ SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAB_ON, val, m->txFrameToXpaOn);
+ SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAA_ON, val, m->txFrameToXpaOn);
+ carl9170_regwrite(AR9170_PHY_REG_RF_CTL4, val);
+
+ /* TxEndToRxOn (index 6) */
+ val = carl9170_def_val(AR9170_PHY_REG_RF_CTL3, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON, val, m->txEndToRxOn);
+ carl9170_regwrite(AR9170_PHY_REG_RF_CTL3, val);
+
+ /* thresh62 (index 7) */
+ val = carl9170_def_val(0x1c8864, is_2ghz, is_40mhz);
+ val = (val & ~0x7f000) | (m->thresh62 << 12);
+ carl9170_regwrite(0x1c8864, val);
+
+ /* tx/rx attenuation chain 0 (index 8) */
+ val = carl9170_def_val(AR9170_PHY_REG_RXGAIN, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[0]);
+ carl9170_regwrite(AR9170_PHY_REG_RXGAIN, val);
+
+ /* tx/rx attenuation chain 2 (index 9) */
+ val = carl9170_def_val(AR9170_PHY_REG_RXGAIN_CHAIN_2,
+ is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[1]);
+ carl9170_regwrite(AR9170_PHY_REG_RXGAIN_CHAIN_2, val);
+
+ /* tx/rx margin chain 0 (index 10) */
+ val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[0]);
+ /* bsw margin chain 0 for 5GHz only */
+ if (!is_2ghz)
+ SET_VAL(AR9170_PHY_GAIN_2GHZ_BSW_MARGIN, val, m->bswMargin[0]);
+ carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ, val);
+
+ /* tx/rx margin chain 2 (index 11) */
+ val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2,
+ is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[1]);
+ carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2, val);
+
+ /* iqCall, iqCallq chain 0 (index 12) */
+ val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(0),
+ is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[0]);
+ SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[0]);
+ carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(0), val);
+
+ /* iqCall, iqCallq chain 2 (index 13) */
+ val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(2),
+ is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[1]);
+ SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[1]);
+ carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(2), val);
+
+ /* xpd gain mask (index 14) */
+ val = carl9170_def_val(AR9170_PHY_REG_TPCRG1, is_2ghz, is_40mhz);
+ SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_1, val,
+ xpd2pd[m->xpdGain & 0xf] & 3);
+ SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_2, val,
+ xpd2pd[m->xpdGain & 0xf] >> 2);
+ carl9170_regwrite(AR9170_PHY_REG_TPCRG1, val);
+
+ carl9170_regwrite(AR9170_PHY_REG_RX_CHAINMASK, ar->eeprom.rx_mask);
+ carl9170_regwrite(AR9170_PHY_REG_CAL_CHAINMASK, ar->eeprom.rx_mask);
+
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
+
+static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+{
+ int i, err;
+ u32 val;
+ bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+ bool is_40mhz = conf_is_ht40(&ar->hw->conf);
+
+ carl9170_regwrite_begin(ar);
+
+ for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
+ if (is_40mhz) {
+ if (is_2ghz)
+ val = ar5416_phy_init[i]._2ghz_40;
+ else
+ val = ar5416_phy_init[i]._5ghz_40;
+ } else {
+ if (is_2ghz)
+ val = ar5416_phy_init[i]._2ghz_20;
+ else
+ val = ar5416_phy_init[i]._5ghz_20;
+ }
+
+ carl9170_regwrite(ar5416_phy_init[i].reg, val);
+ }
+
+ carl9170_regwrite_finish();
+ err = carl9170_regwrite_result();
+ if (err)
+ return err;
+
+ err = carl9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
+ if (err)
+ return err;
+
+ err = carl9170_init_power_cal(ar);
+ if (err)
+ return err;
+
+ if (!ar->fw.hw_counters) {
+ err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC,
+ is_2ghz ? 0x5163 : 0x5143);
+ }
+
+ return err;
+}
+
+struct carl9170_rf_initvals {
+ u32 reg, _5ghz, _2ghz;
+};
+
+static struct carl9170_rf_initvals carl9170_rf_initval[] = {
+ /* bank 0 */
+ { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
+ { 0x1c58e0, 0x02008020, 0x02008020},
+ /* bank 1 */
+ { 0x1c58b0, 0x02108421, 0x02108421},
+ { 0x1c58ec, 0x00000008, 0x00000008},
+ /* bank 2 */
+ { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
+ { 0x1c58e0, 0x00000420, 0x00000420},
+ /* bank 3 */
+ { 0x1c58f0, 0x01400018, 0x01c00018},
+ /* bank 4 */
+ { 0x1c58b0, 0x000001a1, 0x000001a1},
+ { 0x1c58e8, 0x00000001, 0x00000001},
+ /* bank 5 */
+ { 0x1c58b0, 0x00000013, 0x00000013},
+ { 0x1c58e4, 0x00000002, 0x00000002},
+ /* bank 6 */
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00002c00, 0x00002c00},
+ { 0x1c58b0, 0x00004800, 0x00004800},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00006000, 0x00006000},
+ { 0x1c58b0, 0x00001000, 0x00001000},
+ { 0x1c58b0, 0x00004000, 0x00004000},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00087c00, 0x00087c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00005400, 0x00005400},
+ { 0x1c58b0, 0x00000c00, 0x00000c00},
+ { 0x1c58b0, 0x00001800, 0x00001800},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00006c00, 0x00006c00},
+ { 0x1c58b0, 0x00007c00, 0x00007c00},
+ { 0x1c58b0, 0x00002c00, 0x00002c00},
+ { 0x1c58b0, 0x00003c00, 0x00003c00},
+ { 0x1c58b0, 0x00003800, 0x00003800},
+ { 0x1c58b0, 0x00001c00, 0x00001c00},
+ { 0x1c58b0, 0x00000800, 0x00000800},
+ { 0x1c58b0, 0x00000408, 0x00000408},
+ { 0x1c58b0, 0x00004c15, 0x00004c15},
+ { 0x1c58b0, 0x00004188, 0x00004188},
+ { 0x1c58b0, 0x0000201e, 0x0000201e},
+ { 0x1c58b0, 0x00010408, 0x00010408},
+ { 0x1c58b0, 0x00000801, 0x00000801},
+ { 0x1c58b0, 0x00000c08, 0x00000c08},
+ { 0x1c58b0, 0x0000181e, 0x0000181e},
+ { 0x1c58b0, 0x00001016, 0x00001016},
+ { 0x1c58b0, 0x00002800, 0x00002800},
+ { 0x1c58b0, 0x00004010, 0x00004010},
+ { 0x1c58b0, 0x0000081c, 0x0000081c},
+ { 0x1c58b0, 0x00000115, 0x00000115},
+ { 0x1c58b0, 0x00000015, 0x00000015},
+ { 0x1c58b0, 0x00000066, 0x00000066},
+ { 0x1c58b0, 0x0000001c, 0x0000001c},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000004, 0x00000004},
+ { 0x1c58b0, 0x00000015, 0x00000015},
+ { 0x1c58b0, 0x0000001f, 0x0000001f},
+ { 0x1c58e0, 0x00000000, 0x00000400},
+ /* bank 7 */
+ { 0x1c58b0, 0x000000a0, 0x000000a0},
+ { 0x1c58b0, 0x00000000, 0x00000000},
+ { 0x1c58b0, 0x00000040, 0x00000040},
+ { 0x1c58f0, 0x0000001c, 0x0000001c},
+};
+
+static int carl9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
+{
+ int err, i;
+
+ carl9170_regwrite_begin(ar);
+
+ for (i = 0; i < ARRAY_SIZE(carl9170_rf_initval); i++)
+ carl9170_regwrite(carl9170_rf_initval[i].reg,
+ band5ghz ? carl9170_rf_initval[i]._5ghz
+ : carl9170_rf_initval[i]._2ghz);
+
+ carl9170_regwrite_finish();
+ err = carl9170_regwrite_result();
+ if (err)
+ wiphy_err(ar->hw->wiphy, "rf init failed\n");
+
+ return err;
+}
+
+struct carl9170_phy_freq_params {
+ u8 coeff_exp;
+ u16 coeff_man;
+ u8 coeff_exp_shgi;
+ u16 coeff_man_shgi;
+};
+
+enum carl9170_bw {
+ CARL9170_BW_20,
+ CARL9170_BW_40_BELOW,
+ CARL9170_BW_40_ABOVE,
+
+ __CARL9170_NUM_BW,
+};
+
+struct carl9170_phy_freq_entry {
+ u16 freq;
+ struct carl9170_phy_freq_params params[__CARL9170_NUM_BW];
+};
+
+/* NB: must be in sync with channel tables in main! */
+static const struct carl9170_phy_freq_entry carl9170_phy_freq_params[] = {
+/*
+ * freq,
+ * 20MHz,
+ * 40MHz (below),
+ * 40Mhz (above),
+ */
+ { 2412, {
+ { 3, 21737, 3, 19563, },
+ { 3, 21827, 3, 19644, },
+ { 3, 21647, 3, 19482, },
+ } },
+ { 2417, {
+ { 3, 21692, 3, 19523, },
+ { 3, 21782, 3, 19604, },
+ { 3, 21602, 3, 19442, },
+ } },
+ { 2422, {
+ { 3, 21647, 3, 19482, },
+ { 3, 21737, 3, 19563, },
+ { 3, 21558, 3, 19402, },
+ } },
+ { 2427, {
+ { 3, 21602, 3, 19442, },
+ { 3, 21692, 3, 19523, },
+ { 3, 21514, 3, 19362, },
+ } },
+ { 2432, {
+ { 3, 21558, 3, 19402, },
+ { 3, 21647, 3, 19482, },
+ { 3, 21470, 3, 19323, },
+ } },
+ { 2437, {
+ { 3, 21514, 3, 19362, },
+ { 3, 21602, 3, 19442, },
+ { 3, 21426, 3, 19283, },
+ } },
+ { 2442, {
+ { 3, 21470, 3, 19323, },
+ { 3, 21558, 3, 19402, },
+ { 3, 21382, 3, 19244, },
+ } },
+ { 2447, {
+ { 3, 21426, 3, 19283, },
+ { 3, 21514, 3, 19362, },
+ { 3, 21339, 3, 19205, },
+ } },
+ { 2452, {
+ { 3, 21382, 3, 19244, },
+ { 3, 21470, 3, 19323, },
+ { 3, 21295, 3, 19166, },
+ } },
+ { 2457, {
+ { 3, 21339, 3, 19205, },
+ { 3, 21426, 3, 19283, },
+ { 3, 21252, 3, 19127, },
+ } },
+ { 2462, {
+ { 3, 21295, 3, 19166, },
+ { 3, 21382, 3, 19244, },
+ { 3, 21209, 3, 19088, },
+ } },
+ { 2467, {
+ { 3, 21252, 3, 19127, },
+ { 3, 21339, 3, 19205, },
+ { 3, 21166, 3, 19050, },
+ } },
+ { 2472, {
+ { 3, 21209, 3, 19088, },
+ { 3, 21295, 3, 19166, },
+ { 3, 21124, 3, 19011, },
+ } },
+ { 2484, {
+ { 3, 21107, 3, 18996, },
+ { 3, 21192, 3, 19073, },
+ { 3, 21022, 3, 18920, },
+ } },
+ { 4920, {
+ { 4, 21313, 4, 19181, },
+ { 4, 21356, 4, 19220, },
+ { 4, 21269, 4, 19142, },
+ } },
+ { 4940, {
+ { 4, 21226, 4, 19104, },
+ { 4, 21269, 4, 19142, },
+ { 4, 21183, 4, 19065, },
+ } },
+ { 4960, {
+ { 4, 21141, 4, 19027, },
+ { 4, 21183, 4, 19065, },
+ { 4, 21098, 4, 18988, },
+ } },
+ { 4980, {
+ { 4, 21056, 4, 18950, },
+ { 4, 21098, 4, 18988, },
+ { 4, 21014, 4, 18912, },
+ } },
+ { 5040, {
+ { 4, 20805, 4, 18725, },
+ { 4, 20846, 4, 18762, },
+ { 4, 20764, 4, 18687, },
+ } },
+ { 5060, {
+ { 4, 20723, 4, 18651, },
+ { 4, 20764, 4, 18687, },
+ { 4, 20682, 4, 18614, },
+ } },
+ { 5080, {
+ { 4, 20641, 4, 18577, },
+ { 4, 20682, 4, 18614, },
+ { 4, 20601, 4, 18541, },
+ } },
+ { 5180, {
+ { 4, 20243, 4, 18219, },
+ { 4, 20282, 4, 18254, },
+ { 4, 20204, 4, 18183, },
+ } },
+ { 5200, {
+ { 4, 20165, 4, 18148, },
+ { 4, 20204, 4, 18183, },
+ { 4, 20126, 4, 18114, },
+ } },
+ { 5220, {
+ { 4, 20088, 4, 18079, },
+ { 4, 20126, 4, 18114, },
+ { 4, 20049, 4, 18044, },
+ } },
+ { 5240, {
+ { 4, 20011, 4, 18010, },
+ { 4, 20049, 4, 18044, },
+ { 4, 19973, 4, 17976, },
+ } },
+ { 5260, {
+ { 4, 19935, 4, 17941, },
+ { 4, 19973, 4, 17976, },
+ { 4, 19897, 4, 17907, },
+ } },
+ { 5280, {
+ { 4, 19859, 4, 17873, },
+ { 4, 19897, 4, 17907, },
+ { 4, 19822, 4, 17840, },
+ } },
+ { 5300, {
+ { 4, 19784, 4, 17806, },
+ { 4, 19822, 4, 17840, },
+ { 4, 19747, 4, 17772, },
+ } },
+ { 5320, {
+ { 4, 19710, 4, 17739, },
+ { 4, 19747, 4, 17772, },
+ { 4, 19673, 4, 17706, },
+ } },
+ { 5500, {
+ { 4, 19065, 4, 17159, },
+ { 4, 19100, 4, 17190, },
+ { 4, 19030, 4, 17127, },
+ } },
+ { 5520, {
+ { 4, 18996, 4, 17096, },
+ { 4, 19030, 4, 17127, },
+ { 4, 18962, 4, 17065, },
+ } },
+ { 5540, {
+ { 4, 18927, 4, 17035, },
+ { 4, 18962, 4, 17065, },
+ { 4, 18893, 4, 17004, },
+ } },
+ { 5560, {
+ { 4, 18859, 4, 16973, },
+ { 4, 18893, 4, 17004, },
+ { 4, 18825, 4, 16943, },
+ } },
+ { 5580, {
+ { 4, 18792, 4, 16913, },
+ { 4, 18825, 4, 16943, },
+ { 4, 18758, 4, 16882, },
+ } },
+ { 5600, {
+ { 4, 18725, 4, 16852, },
+ { 4, 18758, 4, 16882, },
+ { 4, 18691, 4, 16822, },
+ } },
+ { 5620, {
+ { 4, 18658, 4, 16792, },
+ { 4, 18691, 4, 16822, },
+ { 4, 18625, 4, 16762, },
+ } },
+ { 5640, {
+ { 4, 18592, 4, 16733, },
+ { 4, 18625, 4, 16762, },
+ { 4, 18559, 4, 16703, },
+ } },
+ { 5660, {
+ { 4, 18526, 4, 16673, },
+ { 4, 18559, 4, 16703, },
+ { 4, 18493, 4, 16644, },
+ } },
+ { 5680, {
+ { 4, 18461, 4, 16615, },
+ { 4, 18493, 4, 16644, },
+ { 4, 18428, 4, 16586, },
+ } },
+ { 5700, {
+ { 4, 18396, 4, 16556, },
+ { 4, 18428, 4, 16586, },
+ { 4, 18364, 4, 16527, },
+ } },
+ { 5745, {
+ { 4, 18252, 4, 16427, },
+ { 4, 18284, 4, 16455, },
+ { 4, 18220, 4, 16398, },
+ } },
+ { 5765, {
+ { 4, 18189, 5, 32740, },
+ { 4, 18220, 4, 16398, },
+ { 4, 18157, 5, 32683, },
+ } },
+ { 5785, {
+ { 4, 18126, 5, 32626, },
+ { 4, 18157, 5, 32683, },
+ { 4, 18094, 5, 32570, },
+ } },
+ { 5805, {
+ { 4, 18063, 5, 32514, },
+ { 4, 18094, 5, 32570, },
+ { 4, 18032, 5, 32458, },
+ } },
+ { 5825, {
+ { 4, 18001, 5, 32402, },
+ { 4, 18032, 5, 32458, },
+ { 4, 17970, 5, 32347, },
+ } },
+ { 5170, {
+ { 4, 20282, 4, 18254, },
+ { 4, 20321, 4, 18289, },
+ { 4, 20243, 4, 18219, },
+ } },
+ { 5190, {
+ { 4, 20204, 4, 18183, },
+ { 4, 20243, 4, 18219, },
+ { 4, 20165, 4, 18148, },
+ } },
+ { 5210, {
+ { 4, 20126, 4, 18114, },
+ { 4, 20165, 4, 18148, },
+ { 4, 20088, 4, 18079, },
+ } },
+ { 5230, {
+ { 4, 20049, 4, 18044, },
+ { 4, 20088, 4, 18079, },
+ { 4, 20011, 4, 18010, },
+ } },
+};
+
+static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
+ u32 freq, enum carl9170_bw bw)
+{
+ int err;
+ u32 d0, d1, td0, td1, fd0, fd1;
+ u8 chansel;
+ u8 refsel0 = 1, refsel1 = 0;
+ u8 lf_synth = 0;
+
+ switch (bw) {
+ case CARL9170_BW_40_ABOVE:
+ freq += 10;
+ break;
+ case CARL9170_BW_40_BELOW:
+ freq -= 10;
+ break;
+ case CARL9170_BW_20:
+ break;
+ default:
+ BUG();
+ return -ENOSYS;
+ }
+
+ if (band5ghz) {
+ if (freq % 10) {
+ chansel = (freq - 4800) / 5;
+ } else {
+ chansel = ((freq - 4800) / 10) * 2;
+ refsel0 = 0;
+ refsel1 = 1;
+ }
+ chansel = bitrev8(chansel);
+ } else {
+ if (freq == 2484) {
+ chansel = 10 + (freq - 2274) / 5;
+ lf_synth = 1;
+ } else
+ chansel = 16 + (freq - 2272) / 5;
+ chansel *= 4;
+ chansel = bitrev8(chansel);
+ }
+
+ d1 = chansel;
+ d0 = 0x21 |
+ refsel0 << 3 |
+ refsel1 << 2 |
+ lf_synth << 1;
+ td0 = d0 & 0x1f;
+ td1 = d1 & 0x1f;
+ fd0 = td1 << 5 | td0;
+
+ td0 = (d0 >> 5) & 0x7;
+ td1 = (d1 >> 5) & 0x7;
+ fd1 = td1 << 5 | td0;
+
+ carl9170_regwrite_begin(ar);
+
+ carl9170_regwrite(0x1c58b0, fd0);
+ carl9170_regwrite(0x1c58e8, fd1);
+
+ carl9170_regwrite_finish();
+ err = carl9170_regwrite_result();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct carl9170_phy_freq_params *
+carl9170_get_hw_dyn_params(struct ieee80211_channel *channel,
+ enum carl9170_bw bw)
+{
+ unsigned int chanidx = 0;
+ u16 freq = 2412;
+
+ if (channel) {
+ chanidx = channel->hw_value;
+ freq = channel->center_freq;
+ }
+
+ BUG_ON(chanidx >= ARRAY_SIZE(carl9170_phy_freq_params));
+
+ BUILD_BUG_ON(__CARL9170_NUM_BW != 3);
+
+ WARN_ON(carl9170_phy_freq_params[chanidx].freq != freq);
+
+ return &carl9170_phy_freq_params[chanidx].params[bw];
+}
+
+static int carl9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
+{
+ int idx = nfreqs - 2;
+
+ while (idx >= 0) {
+ if (f >= freqs[idx])
+ return idx;
+ idx--;
+ }
+
+ return 0;
+}
+
+static s32 carl9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ /* nothing to interpolate, it's horizontal */
+ if (y2 == y1)
+ return y1;
+
+ /* check if we hit one of the edges */
+ if (x == x1)
+ return y1;
+ if (x == x2)
+ return y2;
+
+ /* x1 == x2 is bad, hopefully == x */
+ if (x2 == x1)
+ return y1;
+
+ return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
+}
+
+static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
+{
+#define SHIFT 8
+ s32 y;
+
+ y = carl9170_interpolate_s32(x << SHIFT, x1 << SHIFT,
+ y1 << SHIFT, x2 << SHIFT, y2 << SHIFT);
+
+ /*
+ * XXX: unwrap this expression
+ * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
+ * Can we rely on the compiler to optimise away the div?
+ */
+ return (y >> SHIFT) + ((y & (1 << (SHIFT - 1))) >> (SHIFT - 1));
+#undef SHIFT
+}
+
+static u8 carl9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (x <= x_array[i + 1])
+ break;
+ }
+
+ return carl9170_interpolate_u8(x, x_array[i], y_array[i],
+ x_array[i + 1], y_array[i + 1]);
+}
+
+static int carl9170_set_freq_cal_data(struct ar9170 *ar,
+ struct ieee80211_channel *channel)
+{
+ u8 *cal_freq_pier;
+ u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
+ u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
+ int chain, idx, i;
+ u32 phy_data = 0;
+ u8 f, tmp;
+
+ switch (channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ f = channel->center_freq - 2300;
+ cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
+ i = AR5416_NUM_2G_CAL_PIERS - 1;
+ break;
+
+ case IEEE80211_BAND_5GHZ:
+ f = (channel->center_freq - 4800) / 5;
+ cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
+ i = AR5416_NUM_5G_CAL_PIERS - 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (; i >= 0; i--) {
+ if (cal_freq_pier[i] != 0xff)
+ break;
+ }
+ if (i < 0)
+ return -EINVAL;
+
+ idx = carl9170_find_freq_idx(i, cal_freq_pier, f);
+
+ carl9170_regwrite_begin(ar);
+
+ for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
+ for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
+ struct ar9170_calibration_data_per_freq *cal_pier_data;
+ int j;
+
+ switch (channel->band) {
+ case IEEE80211_BAND_2GHZ:
+ cal_pier_data = &ar->eeprom.
+ cal_pier_data_2G[chain][idx];
+ break;
+
+ case IEEE80211_BAND_5GHZ:
+ cal_pier_data = &ar->eeprom.
+ cal_pier_data_5G[chain][idx];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (j = 0; j < 2; j++) {
+ vpds[j][i] = carl9170_interpolate_u8(f,
+ cal_freq_pier[idx],
+ cal_pier_data->vpd_pdg[j][i],
+ cal_freq_pier[idx + 1],
+ cal_pier_data[1].vpd_pdg[j][i]);
+
+ pwrs[j][i] = carl9170_interpolate_u8(f,
+ cal_freq_pier[idx],
+ cal_pier_data->pwr_pdg[j][i],
+ cal_freq_pier[idx + 1],
+ cal_pier_data[1].pwr_pdg[j][i]) / 2;
+ }
+ }
+
+ for (i = 0; i < 76; i++) {
+ if (i < 25) {
+ tmp = carl9170_interpolate_val(i, &pwrs[0][0],
+ &vpds[0][0]);
+ } else {
+ tmp = carl9170_interpolate_val(i - 12,
+ &pwrs[1][0],
+ &vpds[1][0]);
+ }
+
+ phy_data |= tmp << ((i & 3) << 3);
+ if ((i & 3) == 3) {
+ carl9170_regwrite(0x1c6280 + chain * 0x1000 +
+ (i & ~3), phy_data);
+ phy_data = 0;
+ }
+ }
+
+ for (i = 19; i < 32; i++)
+ carl9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
+ 0x0);
+ }
+
+ carl9170_regwrite_finish();
+ return carl9170_regwrite_result();
+}
+
+static u8 carl9170_get_max_edge_power(struct ar9170 *ar,
+ u32 freq, struct ar9170_calctl_edges edges[])
+{
+ int i;
+ u8 rc = AR5416_MAX_RATE_POWER;
+ u8 f;
+ if (freq < 3000)
+ f = freq - 2300;
+ else
+ f = (freq - 4800) / 5;
+
+ for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
+ if (edges[i].channel == 0xff)
+ break;
+ if (f == edges[i].channel) {
+ /* exact freq match */
+ rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
+ break;
+ }
+ if (i > 0 && f < edges[i].channel) {
+ if (f > edges[i - 1].channel &&
+ edges[i - 1].power_flags &
+ AR9170_CALCTL_EDGE_FLAGS) {
+ /* lower channel has the inband flag set */
+ rc = edges[i - 1].power_flags &
+ ~AR9170_CALCTL_EDGE_FLAGS;
+ }
+ break;
+ }
+ }
+
+ if (i == AR5416_NUM_BAND_EDGES) {
+ if (f > edges[i - 1].channel &&
+ edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
+ /* lower channel has the inband flag set */
+ rc = edges[i - 1].power_flags &
+ ~AR9170_CALCTL_EDGE_FLAGS;
+ }
+ }
+ return rc;
+}
+
+static u8 carl9170_get_heavy_clip(struct ar9170 *ar, u32 freq,
+ enum carl9170_bw bw, struct ar9170_calctl_edges edges[])
+{
+ u8 f;
+ int i;
+ u8 rc = 0;
+
+ if (freq < 3000)
+ f = freq - 2300;
+ else
+ f = (freq - 4800) / 5;
+
+ if (bw == CARL9170_BW_40_BELOW || bw == CARL9170_BW_40_ABOVE)
+ rc |= 0xf0;
+
+ for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
+ if (edges[i].channel == 0xff)
+ break;
+ if (f == edges[i].channel) {
+ if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
+ rc |= 0x0f;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * calculate the conformance test limits and the heavy clip parameter
+ * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
+ */
+static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
+{
+ u8 ctl_grp; /* CTL group */
+ u8 ctl_idx; /* CTL index */
+ int i, j;
+ struct ctl_modes {
+ u8 ctl_mode;
+ u8 max_power;
+ u8 *pwr_cal_data;
+ int pwr_cal_len;
+ } *modes;
+
+ /*
+ * order is relevant in the mode_list_*: we fall back to the
+ * lower indices if any mode is missed in the EEPROM.
+ */
+ struct ctl_modes mode_list_2ghz[] = {
+ { CTL_11B, 0, ar->power_2G_cck, 4 },
+ { CTL_11G, 0, ar->power_2G_ofdm, 4 },
+ { CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
+ { CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
+ };
+ struct ctl_modes mode_list_5ghz[] = {
+ { CTL_11A, 0, ar->power_5G_leg, 4 },
+ { CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
+ { CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
+ };
+ int nr_modes;
+
+#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
+
+ ar->heavy_clip = 0;
+
+ /*
+ * TODO: investigate the differences between OTUS'
+ * hpreg.c::zfHpGetRegulatoryDomain() and
+ * ath/regd.c::ath_regd_get_band_ctl() -
+ * e.g. for FCC3_WORLD the OTUS procedure
+ * always returns CTL_FCC, while the one in ath/ delivers
+ * CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
+ */
+ ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
+ ar->hw->conf.chandef.chan->band);
+
+ /* ctl group not found - either invalid band (NO_CTL) or ww roaming */
+ if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
+ ctl_grp = CTL_FCC;
+
+ if (ctl_grp != CTL_FCC)
+ /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
+ return;
+
+ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ modes = mode_list_2ghz;
+ nr_modes = ARRAY_SIZE(mode_list_2ghz);
+ } else {
+ modes = mode_list_5ghz;
+ nr_modes = ARRAY_SIZE(mode_list_5ghz);
+ }
+
+ for (i = 0; i < nr_modes; i++) {
+ u8 c = ctl_grp | modes[i].ctl_mode;
+ for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
+ if (c == ar->eeprom.ctl_index[ctl_idx])
+ break;
+ if (ctl_idx < AR5416_NUM_CTLS) {
+ int f_off = 0;
+
+ /*
+ * determine heavy clip parameter
+ * from the 11G edges array
+ */
+ if (modes[i].ctl_mode == CTL_11G) {
+ ar->heavy_clip =
+ carl9170_get_heavy_clip(ar,
+ freq, bw, EDGES(ctl_idx, 1));
+ }
+
+ /* adjust freq for 40MHz */
+ if (modes[i].ctl_mode == CTL_2GHT40 ||
+ modes[i].ctl_mode == CTL_5GHT40) {
+ if (bw == CARL9170_BW_40_BELOW)
+ f_off = -10;
+ else
+ f_off = 10;
+ }
+
+ modes[i].max_power =
+ carl9170_get_max_edge_power(ar,
+ freq + f_off, EDGES(ctl_idx, 1));
+
+ /*
+ * TODO: check if the regulatory max. power is
+ * controlled by cfg80211 for DFS.
+ * (hpmain applies it to max_power itself for DFS freq)
+ */
+
+ } else {
+ /*
+ * Workaround in otus driver, hpmain.c, line 3906:
+ * if no data for 5GHT20 are found, take the
+ * legacy 5G value. We extend this here to fallback
+ * from any other HT* or 11G, too.
+ */
+ int k = i;
+
+ modes[i].max_power = AR5416_MAX_RATE_POWER;
+ while (k-- > 0) {
+ if (modes[k].max_power !=
+ AR5416_MAX_RATE_POWER) {
+ modes[i].max_power = modes[k].max_power;
+ break;
+ }
+ }
+ }
+
+ /* apply max power to pwr_cal_data (ar->power_*) */
+ for (j = 0; j < modes[i].pwr_cal_len; j++) {
+ modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
+ modes[i].max_power);
+ }
+ }
+
+ if (ar->heavy_clip & 0xf0) {
+ ar->power_2G_ht40[0]--;
+ ar->power_2G_ht40[1]--;
+ ar->power_2G_ht40[2]--;
+ }
+ if (ar->heavy_clip & 0xf) {
+ ar->power_2G_ht20[0]++;
+ ar->power_2G_ht20[1]++;
+ ar->power_2G_ht20[2]++;
+ }
+
+#undef EDGES
+}
+
+static void carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
+ enum carl9170_bw bw)
+{
+ struct ar9170_calibration_target_power_legacy *ctpl;
+ struct ar9170_calibration_target_power_ht *ctph;
+ u8 *ctpres;
+ int ntargets;
+ int idx, i, n;
+ u8 f;
+ u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
+
+ if (freq < 3000)
+ f = freq - 2300;
+ else
+ f = (freq - 4800) / 5;
+
+ /*
+ * cycle through the various modes
+ *
+ * legacy modes first: 5G, 2G CCK, 2G OFDM
+ */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0: /* 5 GHz legacy */
+ ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_leg;
+ break;
+ case 1: /* 2.4 GHz CCK */
+ ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
+ ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
+ ctpres = ar->power_2G_cck;
+ break;
+ case 2: /* 2.4 GHz OFDM */
+ ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ofdm;
+ break;
+ default:
+ BUG();
+ }
+
+ for (n = 0; n < ntargets; n++) {
+ if (ctpl[n].freq == 0xff)
+ break;
+ pwr_freqs[n] = ctpl[n].freq;
+ }
+ ntargets = n;
+ idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
+ for (n = 0; n < 4; n++)
+ ctpres[n] = carl9170_interpolate_u8(f,
+ ctpl[idx + 0].freq, ctpl[idx + 0].power[n],
+ ctpl[idx + 1].freq, ctpl[idx + 1].power[n]);
+ }
+
+ /* HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 */
+ for (i = 0; i < 4; i++) {
+ switch (i) {
+ case 0: /* 5 GHz HT 20 */
+ ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_ht20;
+ break;
+ case 1: /* 5 GHz HT 40 */
+ ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
+ ntargets = AR5416_NUM_5G_TARGET_PWRS;
+ ctpres = ar->power_5G_ht40;
+ break;
+ case 2: /* 2.4 GHz HT 20 */
+ ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ht20;
+ break;
+ case 3: /* 2.4 GHz HT 40 */
+ ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
+ ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
+ ctpres = ar->power_2G_ht40;
+ break;
+ default:
+ BUG();
+ }
+
+ for (n = 0; n < ntargets; n++) {
+ if (ctph[n].freq == 0xff)
+ break;
+ pwr_freqs[n] = ctph[n].freq;
+ }
+ ntargets = n;
+ idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
+ for (n = 0; n < 8; n++)
+ ctpres[n] = carl9170_interpolate_u8(f,
+ ctph[idx + 0].freq, ctph[idx + 0].power[n],
+ ctph[idx + 1].freq, ctph[idx + 1].power[n]);
+ }
+
+ /* calc. conformance test limits and apply to ar->power*[] */
+ carl9170_calc_ctl(ar, freq, bw);
+}
+
+int carl9170_get_noisefloor(struct ar9170 *ar)
+{
+ static const u32 phy_regs[] = {
+ AR9170_PHY_REG_CCA, AR9170_PHY_REG_CH2_CCA,
+ AR9170_PHY_REG_EXT_CCA, AR9170_PHY_REG_CH2_EXT_CCA };
+ u32 phy_res[ARRAY_SIZE(phy_regs)];
+ int err, i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(phy_regs) != ARRAY_SIZE(ar->noise));
+
+ err = carl9170_read_mreg(ar, ARRAY_SIZE(phy_regs), phy_regs, phy_res);
+ if (err)
+ return err;
+
+ for (i = 0; i < 2; i++) {
+ ar->noise[i] = sign_extend32(GET_VAL(
+ AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
+
+ ar->noise[i + 2] = sign_extend32(GET_VAL(
+ AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
+ }
+
+ if (ar->channel)
+ ar->survey[ar->channel->hw_value].noise = ar->noise[0];
+
+ return 0;
+}
+
+static enum carl9170_bw nl80211_to_carl(enum nl80211_channel_type type)
+{
+ switch (type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ return CARL9170_BW_20;
+ case NL80211_CHAN_HT40MINUS:
+ return CARL9170_BW_40_BELOW;
+ case NL80211_CHAN_HT40PLUS:
+ return CARL9170_BW_40_ABOVE;
+ default:
+ BUG();
+ }
+}
+
+int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
+ enum nl80211_channel_type _bw)
+{
+ const struct carl9170_phy_freq_params *freqpar;
+ struct carl9170_rf_init_result rf_res;
+ struct carl9170_rf_init rf;
+ u32 tmp, offs = 0, new_ht = 0;
+ int err;
+ enum carl9170_bw bw;
+ struct ieee80211_channel *old_channel = NULL;
+
+ bw = nl80211_to_carl(_bw);
+
+ if (conf_is_ht(&ar->hw->conf))
+ new_ht |= CARL9170FW_PHY_HT_ENABLE;
+
+ if (conf_is_ht40(&ar->hw->conf))
+ new_ht |= CARL9170FW_PHY_HT_DYN2040;
+
+ /* may be NULL at first setup */
+ if (ar->channel) {
+ old_channel = ar->channel;
+ ar->channel = NULL;
+ }
+
+ /* cold reset BB/ADDA */
+ err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET,
+ AR9170_PWR_RESET_BB_COLD_RESET);
+ if (err)
+ return err;
+
+ err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
+ if (err)
+ return err;
+
+ err = carl9170_init_phy(ar, channel->band);
+ if (err)
+ return err;
+
+ err = carl9170_init_rf_banks_0_7(ar,
+ channel->band == IEEE80211_BAND_5GHZ);
+ if (err)
+ return err;
+
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL);
+ if (err)
+ return err;
+
+ err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
+ 0x200);
+ if (err)
+ return err;
+
+ err = carl9170_init_rf_bank4_pwr(ar,
+ channel->band == IEEE80211_BAND_5GHZ,
+ channel->center_freq, bw);
+ if (err)
+ return err;
+
+ tmp = AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 |
+ AR9170_PHY_TURBO_FC_HT_EN;
+
+ switch (bw) {
+ case CARL9170_BW_20:
+ break;
+ case CARL9170_BW_40_BELOW:
+ tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
+ AR9170_PHY_TURBO_FC_SHORT_GI_40;
+ offs = 3;
+ break;
+ case CARL9170_BW_40_ABOVE:
+ tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
+ AR9170_PHY_TURBO_FC_SHORT_GI_40 |
+ AR9170_PHY_TURBO_FC_DYN2040_PRI_CH;
+ offs = 1;
+ break;
+ default:
+ BUG();
+ return -ENOSYS;
+ }
+
+ if (ar->eeprom.tx_mask != 1)
+ tmp |= AR9170_PHY_TURBO_FC_WALSH;
+
+ err = carl9170_write_reg(ar, AR9170_PHY_REG_TURBO, tmp);
+ if (err)
+ return err;
+
+ err = carl9170_set_freq_cal_data(ar, channel);
+ if (err)
+ return err;
+
+ carl9170_set_power_cal(ar, channel->center_freq, bw);
+
+ err = carl9170_set_mac_tpc(ar, channel);
+ if (err)
+ return err;
+
+ freqpar = carl9170_get_hw_dyn_params(channel, bw);
+
+ rf.ht_settings = new_ht;
+ if (conf_is_ht40(&ar->hw->conf))
+ SET_VAL(CARL9170FW_PHY_HT_EXT_CHAN_OFF, rf.ht_settings, offs);
+
+ rf.freq = cpu_to_le32(channel->center_freq * 1000);
+ rf.delta_slope_coeff_exp = cpu_to_le32(freqpar->coeff_exp);
+ rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man);
+ rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi);
+ rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi);
+ rf.finiteLoopCount = cpu_to_le32(2000);
+ err = carl9170_exec_cmd(ar, CARL9170_CMD_RF_INIT, sizeof(rf), &rf,
+ sizeof(rf_res), &rf_res);
+ if (err)
+ return err;
+
+ err = le32_to_cpu(rf_res.ret);
+ if (err != 0) {
+ ar->chan_fail++;
+ ar->total_chan_fail++;
+
+ wiphy_err(ar->hw->wiphy, "channel change: %d -> %d "
+ "failed (%d).\n", old_channel ?
+ old_channel->center_freq : -1, channel->center_freq,
+ err);
+
+ if (ar->chan_fail > 3) {
+ /* We have tried very hard to change to _another_
+ * channel and we've failed to do so!
+ * Chances are that the PHY/RF is no longer
+ * operable (due to corruptions/fatal events/bugs?)
+ * and we need to reset at a higher level.
+ */
+ carl9170_restart(ar, CARL9170_RR_TOO_MANY_PHY_ERRORS);
+ return 0;
+ }
+
+ err = carl9170_set_channel(ar, channel, _bw);
+ if (err)
+ return err;
+ } else {
+ ar->chan_fail = 0;
+ }
+
+ if (ar->heavy_clip) {
+ err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
+ 0x200 | ar->heavy_clip);
+ if (err) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "failed to set "
+ "heavy clip\n");
+ }
+
+ return err;
+ }
+ }
+
+ ar->channel = channel;
+ ar->ht_settings = new_ht;
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
new file mode 100644
index 000000000..024fb42bc
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -0,0 +1,564 @@
+/*
+ * Shared Atheros AR9170 Header
+ *
+ * PHY register map
+ *
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __CARL9170_SHARED_PHY_H
+#define __CARL9170_SHARED_PHY_H
+
+#define AR9170_PHY_REG_BASE (0x1bc000 + 0x9800)
+#define AR9170_PHY_REG(_n) (AR9170_PHY_REG_BASE + \
+ ((_n) << 2))
+
+#define AR9170_PHY_REG_TEST (AR9170_PHY_REG_BASE + 0x0000)
+#define AR9170_PHY_TEST_AGC_CLR 0x10000000
+#define AR9170_PHY_TEST_RFSILENT_BB 0x00002000
+
+#define AR9170_PHY_REG_TURBO (AR9170_PHY_REG_BASE + 0x0004)
+#define AR9170_PHY_TURBO_FC_TURBO_MODE 0x00000001
+#define AR9170_PHY_TURBO_FC_TURBO_SHORT 0x00000002
+#define AR9170_PHY_TURBO_FC_DYN2040_EN 0x00000004
+#define AR9170_PHY_TURBO_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR9170_PHY_TURBO_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR9170_PHY_TURBO_FC_DYN2040_EXT_CH 0x00000020
+#define AR9170_PHY_TURBO_FC_HT_EN 0x00000040
+#define AR9170_PHY_TURBO_FC_SHORT_GI_40 0x00000080
+#define AR9170_PHY_TURBO_FC_WALSH 0x00000100
+#define AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 0x00000200
+#define AR9170_PHY_TURBO_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR9170_PHY_REG_TEST2 (AR9170_PHY_REG_BASE + 0x0008)
+
+#define AR9170_PHY_REG_TIMING2 (AR9170_PHY_REG_BASE + 0x0010)
+#define AR9170_PHY_TIMING2_USE_FORCE 0x00001000
+#define AR9170_PHY_TIMING2_FORCE 0x00000fff
+#define AR9170_PHY_TIMING2_FORCE_S 0
+
+#define AR9170_PHY_REG_TIMING3 (AR9170_PHY_REG_BASE + 0x0014)
+#define AR9170_PHY_TIMING3_DSC_EXP 0x0001e000
+#define AR9170_PHY_TIMING3_DSC_EXP_S 13
+#define AR9170_PHY_TIMING3_DSC_MAN 0xfffe0000
+#define AR9170_PHY_TIMING3_DSC_MAN_S 17
+
+#define AR9170_PHY_REG_CHIP_ID (AR9170_PHY_REG_BASE + 0x0018)
+#define AR9170_PHY_CHIP_ID_REV_0 0x80
+#define AR9170_PHY_CHIP_ID_REV_1 0x81
+#define AR9170_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR9170_PHY_REG_ACTIVE (AR9170_PHY_REG_BASE + 0x001c)
+#define AR9170_PHY_ACTIVE_EN 0x00000001
+#define AR9170_PHY_ACTIVE_DIS 0x00000000
+
+#define AR9170_PHY_REG_RF_CTL2 (AR9170_PHY_REG_BASE + 0x0024)
+#define AR9170_PHY_RF_CTL2_TX_END_DATA_START 0x000000ff
+#define AR9170_PHY_RF_CTL2_TX_END_DATA_START_S 0
+#define AR9170_PHY_RF_CTL2_TX_END_PA_ON 0x0000ff00
+#define AR9170_PHY_RF_CTL2_TX_END_PA_ON_S 8
+
+#define AR9170_PHY_REG_RF_CTL3 (AR9170_PHY_REG_BASE + 0x0028)
+#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON 0x00ff0000
+#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON_S 16
+
+#define AR9170_PHY_REG_ADC_CTL (AR9170_PHY_REG_BASE + 0x002c)
+#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR9170_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR9170_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR9170_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR9170_PHY_REG_ADC_SERIAL_CTL (AR9170_PHY_REG_BASE + 0x0030)
+#define AR9170_PHY_ADC_SCTL_SEL_INTERNAL_ADDAC 0x00000000
+#define AR9170_PHY_ADC_SCTL_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR9170_PHY_REG_RF_CTL4 (AR9170_PHY_REG_BASE + 0x0034)
+#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF 0xff000000
+#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00ff0000
+#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000ff00
+#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000ff
+#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR9170_PHY_REG_TSTDAC_CONST (AR9170_PHY_REG_BASE + 0x003c)
+
+#define AR9170_PHY_REG_SETTLING (AR9170_PHY_REG_BASE + 0x0044)
+#define AR9170_PHY_SETTLING_SWITCH 0x00003f80
+#define AR9170_PHY_SETTLING_SWITCH_S 7
+
+#define AR9170_PHY_REG_RXGAIN (AR9170_PHY_REG_BASE + 0x0048)
+#define AR9170_PHY_REG_RXGAIN_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2048)
+#define AR9170_PHY_RXGAIN_TXRX_ATTEN 0x0003f000
+#define AR9170_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR9170_PHY_RXGAIN_TXRX_RF_MAX 0x007c0000
+#define AR9170_PHY_RXGAIN_TXRX_RF_MAX_S 18
+
+#define AR9170_PHY_REG_DESIRED_SZ (AR9170_PHY_REG_BASE + 0x0050)
+#define AR9170_PHY_DESIRED_SZ_ADC 0x000000ff
+#define AR9170_PHY_DESIRED_SZ_ADC_S 0
+#define AR9170_PHY_DESIRED_SZ_PGA 0x0000ff00
+#define AR9170_PHY_DESIRED_SZ_PGA_S 8
+#define AR9170_PHY_DESIRED_SZ_TOT_DES 0x0ff00000
+#define AR9170_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR9170_PHY_REG_FIND_SIG (AR9170_PHY_REG_BASE + 0x0058)
+#define AR9170_PHY_FIND_SIG_FIRSTEP 0x0003f000
+#define AR9170_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR9170_PHY_FIND_SIG_FIRPWR 0x03fc0000
+#define AR9170_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR9170_PHY_REG_AGC_CTL1 (AR9170_PHY_REG_BASE + 0x005c)
+#define AR9170_PHY_AGC_CTL1_COARSE_LOW 0x00007f80
+#define AR9170_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR9170_PHY_AGC_CTL1_COARSE_HIGH 0x003f8000
+#define AR9170_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR9170_PHY_REG_AGC_CONTROL (AR9170_PHY_REG_BASE + 0x0060)
+#define AR9170_PHY_AGC_CONTROL_CAL 0x00000001
+#define AR9170_PHY_AGC_CONTROL_NF 0x00000002
+#define AR9170_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
+#define AR9170_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
+#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
+
+#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
+#define AR9170_PHY_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CCA_MIN_PWR_S 19
+#define AR9170_PHY_CCA_THRESH62 0x0007f000
+#define AR9170_PHY_CCA_THRESH62_S 12
+
+#define AR9170_PHY_REG_SFCORR (AR9170_PHY_REG_BASE + 0x0068)
+#define AR9170_PHY_SFCORR_M2COUNT_THR 0x0000001f
+#define AR9170_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR9170_PHY_SFCORR_M1_THRESH 0x00fe0000
+#define AR9170_PHY_SFCORR_M1_THRESH_S 17
+#define AR9170_PHY_SFCORR_M2_THRESH 0x7f000000
+#define AR9170_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR9170_PHY_REG_SFCORR_LOW (AR9170_PHY_REG_BASE + 0x006c)
+#define AR9170_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003f00
+#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001fc000
+#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0fe00000
+#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR9170_PHY_REG_SLEEP_CTR_CONTROL (AR9170_PHY_REG_BASE + 0x0070)
+#define AR9170_PHY_REG_SLEEP_CTR_LIMIT (AR9170_PHY_REG_BASE + 0x0074)
+#define AR9170_PHY_REG_SLEEP_SCAL (AR9170_PHY_REG_BASE + 0x0078)
+
+#define AR9170_PHY_REG_PLL_CTL (AR9170_PHY_REG_BASE + 0x007c)
+#define AR9170_PHY_PLL_CTL_40 0xaa
+#define AR9170_PHY_PLL_CTL_40_5413 0x04
+#define AR9170_PHY_PLL_CTL_44 0xab
+#define AR9170_PHY_PLL_CTL_44_2133 0xeb
+#define AR9170_PHY_PLL_CTL_40_2133 0xea
+
+#define AR9170_PHY_REG_BIN_MASK_1 (AR9170_PHY_REG_BASE + 0x0100)
+#define AR9170_PHY_REG_BIN_MASK_2 (AR9170_PHY_REG_BASE + 0x0104)
+#define AR9170_PHY_REG_BIN_MASK_3 (AR9170_PHY_REG_BASE + 0x0108)
+#define AR9170_PHY_REG_MASK_CTL (AR9170_PHY_REG_BASE + 0x010c)
+
+/* analogue power on time (100ns) */
+#define AR9170_PHY_REG_RX_DELAY (AR9170_PHY_REG_BASE + 0x0114)
+#define AR9170_PHY_REG_SEARCH_START_DELAY (AR9170_PHY_REG_BASE + 0x0118)
+#define AR9170_PHY_RX_DELAY_DELAY 0x00003fff
+
+#define AR9170_PHY_REG_TIMING_CTRL4(_i) (AR9170_PHY_REG_BASE + \
+ (0x0120 + ((_i) << 12)))
+#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01f
+#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7e0
+#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR9170_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xf000
+#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR9170_PHY_TIMING_CTRL4_DO_IQCAL 0x10000
+#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR9170_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR9170_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR9170_PHY_REG_TIMING5 (AR9170_PHY_REG_BASE + 0x0124)
+#define AR9170_PHY_TIMING5_CYCPWR_THR1 0x000000fe
+#define AR9170_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR9170_PHY_REG_POWER_TX_RATE1 (AR9170_PHY_REG_BASE + 0x0134)
+#define AR9170_PHY_REG_POWER_TX_RATE2 (AR9170_PHY_REG_BASE + 0x0138)
+#define AR9170_PHY_REG_POWER_TX_RATE_MAX (AR9170_PHY_REG_BASE + 0x013c)
+#define AR9170_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR9170_PHY_REG_FRAME_CTL (AR9170_PHY_REG_BASE + 0x0144)
+#define AR9170_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR9170_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR9170_PHY_REG_SPUR_REG (AR9170_PHY_REG_BASE + 0x014c)
+#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL (0xff << 18)
+#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+#define AR9170_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT (0xff << 9)
+#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR9170_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7f
+#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR9170_PHY_REG_RADAR_EXT (AR9170_PHY_REG_BASE + 0x0140)
+#define AR9170_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR9170_PHY_REG_RADAR_0 (AR9170_PHY_REG_BASE + 0x0154)
+#define AR9170_PHY_RADAR_0_ENA 0x00000001
+#define AR9170_PHY_RADAR_0_FFT_ENA 0x80000000
+/* inband pulse threshold */
+#define AR9170_PHY_RADAR_0_INBAND 0x0000003e
+#define AR9170_PHY_RADAR_0_INBAND_S 1
+/* pulse RSSI threshold */
+#define AR9170_PHY_RADAR_0_PRSSI 0x00000fc0
+#define AR9170_PHY_RADAR_0_PRSSI_S 6
+/* pulse height threshold */
+#define AR9170_PHY_RADAR_0_HEIGHT 0x0003f000
+#define AR9170_PHY_RADAR_0_HEIGHT_S 12
+/* radar RSSI threshold */
+#define AR9170_PHY_RADAR_0_RRSSI 0x00fc0000
+#define AR9170_PHY_RADAR_0_RRSSI_S 18
+/* radar firepower threshold */
+#define AR9170_PHY_RADAR_0_FIRPWR 0x7f000000
+#define AR9170_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR9170_PHY_REG_RADAR_1 (AR9170_PHY_REG_BASE + 0x0158)
+#define AR9170_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR9170_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR9170_PHY_RADAR_1_RELPWR_THRESH 0x003f0000
+#define AR9170_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR9170_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR9170_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR9170_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR9170_PHY_RADAR_1_RELSTEP_THRESH 0x00001f00
+#define AR9170_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR9170_PHY_RADAR_1_MAXLEN 0x000000ff
+#define AR9170_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR9170_PHY_REG_SWITCH_CHAIN_0 (AR9170_PHY_REG_BASE + 0x0160)
+#define AR9170_PHY_REG_SWITCH_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2160)
+
+#define AR9170_PHY_REG_SWITCH_COM (AR9170_PHY_REG_BASE + 0x0164)
+
+#define AR9170_PHY_REG_CCA_THRESHOLD (AR9170_PHY_REG_BASE + 0x0168)
+
+#define AR9170_PHY_REG_SIGMA_DELTA (AR9170_PHY_REG_BASE + 0x016c)
+#define AR9170_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR9170_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR9170_PHY_SIGMA_DELTA_FILT2 0x000000f8
+#define AR9170_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR9170_PHY_SIGMA_DELTA_FILT1 0x00001f00
+#define AR9170_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000
+#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR9170_PHY_REG_RESTART (AR9170_PHY_REG_BASE + 0x0170)
+#define AR9170_PHY_RESTART_DIV_GC 0x001c0000
+#define AR9170_PHY_RESTART_DIV_GC_S 18
+
+#define AR9170_PHY_REG_RFBUS_REQ (AR9170_PHY_REG_BASE + 0x017c)
+#define AR9170_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR9170_PHY_REG_TIMING7 (AR9170_PHY_REG_BASE + 0x0180)
+#define AR9170_PHY_REG_TIMING8 (AR9170_PHY_REG_BASE + 0x0184)
+#define AR9170_PHY_TIMING8_PILOT_MASK_2 0x000fffff
+#define AR9170_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR9170_PHY_REG_BIN_MASK2_1 (AR9170_PHY_REG_BASE + 0x0188)
+#define AR9170_PHY_REG_BIN_MASK2_2 (AR9170_PHY_REG_BASE + 0x018c)
+#define AR9170_PHY_REG_BIN_MASK2_3 (AR9170_PHY_REG_BASE + 0x0190)
+#define AR9170_PHY_REG_BIN_MASK2_4 (AR9170_PHY_REG_BASE + 0x0194)
+#define AR9170_PHY_BIN_MASK2_4_MASK_4 0x00003fff
+#define AR9170_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR9170_PHY_REG_TIMING9 (AR9170_PHY_REG_BASE + 0x0198)
+#define AR9170_PHY_REG_TIMING10 (AR9170_PHY_REG_BASE + 0x019c)
+#define AR9170_PHY_TIMING10_PILOT_MASK_2 0x000fffff
+#define AR9170_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR9170_PHY_REG_TIMING11 (AR9170_PHY_REG_BASE + 0x01a0)
+#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE 0x000fffff
+#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR9170_PHY_TIMING11_SPUR_FREQ_SD 0x3ff00000
+#define AR9170_PHY_TIMING11_SPUR_FREQ_SD_S 20
+#define AR9170_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR9170_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR9170_PHY_REG_RX_CHAINMASK (AR9170_PHY_REG_BASE + 0x01a4)
+#define AR9170_PHY_REG_NEW_ADC_DC_GAIN_CORR(_i) (AR9170_PHY_REG_BASE + \
+ 0x01b4 + ((_i) << 12))
+#define AR9170_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR9170_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR9170_PHY_REG_MULTICHAIN_GAIN_CTL (AR9170_PHY_REG_BASE + 0x01ac)
+#define AR9170_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR9170_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR9170_PHY_9285_ANT_DIV_CTL_S 24
+#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR9170_PHY_9285_ANT_DIV_LNA1 2
+#define AR9170_PHY_9285_ANT_DIV_LNA2 1
+#define AR9170_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
+#define AR9170_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
+#define AR9170_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR9170_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define AR9170_PHY_REG_EXT_CCA0 (AR9170_PHY_REG_BASE + 0x01b8)
+#define AR9170_PHY_REG_EXT_CCA0_THRESH62 0x000000ff
+#define AR9170_PHY_REG_EXT_CCA0_THRESH62_S 0
+
+#define AR9170_PHY_REG_EXT_CCA (AR9170_PHY_REG_BASE + 0x01bc)
+#define AR9170_PHY_EXT_CCA_CYCPWR_THR1 0x0000fe00
+#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
+#define AR9170_PHY_EXT_CCA_THRESH62_S 16
+#define AR9170_PHY_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_EXT_CCA_MIN_PWR_S 23
+
+#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
+#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
+#define AR9170_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR9170_PHY_SFCORR_EXT_M2_THRESH 0x00003f80
+#define AR9170_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001fc000
+#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0fe00000
+#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR9170_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR9170_PHY_REG_HALFGI (AR9170_PHY_REG_BASE + 0x01d0)
+#define AR9170_PHY_HALFGI_DSC_MAN 0x0007fff0
+#define AR9170_PHY_HALFGI_DSC_MAN_S 4
+#define AR9170_PHY_HALFGI_DSC_EXP 0x0000000f
+#define AR9170_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR9170_PHY_REG_CHANNEL_MASK_01_30 (AR9170_PHY_REG_BASE + 0x01d4)
+#define AR9170_PHY_REG_CHANNEL_MASK_31_60 (AR9170_PHY_REG_BASE + 0x01d8)
+
+#define AR9170_PHY_REG_CHAN_INFO_MEMORY (AR9170_PHY_REG_BASE + 0x01dc)
+#define AR9170_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR9170_PHY_REG_HEAVY_CLIP_ENABLE (AR9170_PHY_REG_BASE + 0x01e0)
+#define AR9170_PHY_REG_HEAVY_CLIP_FACTOR_RIFS (AR9170_PHY_REG_BASE + 0x01ec)
+#define AR9170_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR9170_PHY_REG_CALMODE (AR9170_PHY_REG_BASE + 0x01f0)
+#define AR9170_PHY_CALMODE_IQ 0x00000000
+#define AR9170_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR9170_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR9170_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR9170_PHY_REG_REFCLKDLY (AR9170_PHY_REG_BASE + 0x01f4)
+#define AR9170_PHY_REG_REFCLKPD (AR9170_PHY_REG_BASE + 0x01f8)
+
+
+#define AR9170_PHY_REG_CAL_MEAS_0(_i) (AR9170_PHY_REG_BASE + \
+ 0x0410 + ((_i) << 12))
+#define AR9170_PHY_REG_CAL_MEAS_1(_i) (AR9170_PHY_REG_BASE + \
+ 0x0414 \ + ((_i) << 12))
+#define AR9170_PHY_REG_CAL_MEAS_2(_i) (AR9170_PHY_REG_BASE + \
+ 0x0418 + ((_i) << 12))
+#define AR9170_PHY_REG_CAL_MEAS_3(_i) (AR9170_PHY_REG_BASE + \
+ 0x041c + ((_i) << 12))
+
+#define AR9170_PHY_REG_CURRENT_RSSI (AR9170_PHY_REG_BASE + 0x041c)
+
+#define AR9170_PHY_REG_RFBUS_GRANT (AR9170_PHY_REG_BASE + 0x0420)
+#define AR9170_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR9170_PHY_REG_CHAN_INFO_GAIN_DIFF (AR9170_PHY_REG_BASE + 0x04f4)
+#define AR9170_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR9170_PHY_REG_CHAN_INFO_GAIN (AR9170_PHY_REG_BASE + 0x04fc)
+
+#define AR9170_PHY_REG_MODE (AR9170_PHY_REG_BASE + 0x0a00)
+#define AR9170_PHY_MODE_ASYNCFIFO 0x80
+#define AR9170_PHY_MODE_AR2133 0x08
+#define AR9170_PHY_MODE_AR5111 0x00
+#define AR9170_PHY_MODE_AR5112 0x08
+#define AR9170_PHY_MODE_DYNAMIC 0x04
+#define AR9170_PHY_MODE_RF2GHZ 0x02
+#define AR9170_PHY_MODE_RF5GHZ 0x00
+#define AR9170_PHY_MODE_CCK 0x01
+#define AR9170_PHY_MODE_OFDM 0x00
+#define AR9170_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR9170_PHY_REG_CCK_TX_CTRL (AR9170_PHY_REG_BASE + 0x0a04)
+#define AR9170_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000c
+#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR9170_PHY_REG_CCK_DETECT (AR9170_PHY_REG_BASE + 0x0a08)
+#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003f
+#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001fc0
+#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR9170_PHY_REG_GAIN_2GHZ (AR9170_PHY_REG_BASE + 0x0a0c)
+#define AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2a0c)
+#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00fc0000
+#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003c00
+#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001f
+#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003e0000
+#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001f000
+#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000fc0
+#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003f
+#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR9170_PHY_REG_CCK_RXCTRL4 (AR9170_PHY_REG_BASE + 0x0a1c)
+#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01f80000
+#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR9170_PHY_REG_DAG_CTRLCCK (AR9170_PHY_REG_BASE + 0x0a28)
+#define AR9170_REG_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR9170_REG_DAG_CTRLCCK_RSSI_THR 0x0001fc00
+#define AR9170_REG_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR9170_PHY_REG_FORCE_CLKEN_CCK (AR9170_PHY_REG_BASE + 0x0a2c)
+#define AR9170_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR9170_PHY_REG_POWER_TX_RATE3 (AR9170_PHY_REG_BASE + 0x0a34)
+#define AR9170_PHY_REG_POWER_TX_RATE4 (AR9170_PHY_REG_BASE + 0x0a38)
+
+#define AR9170_PHY_REG_SCRM_SEQ_XR (AR9170_PHY_REG_BASE + 0x0a3c)
+#define AR9170_PHY_REG_HEADER_DETECT_XR (AR9170_PHY_REG_BASE + 0x0a40)
+#define AR9170_PHY_REG_CHIRP_DETECTED_XR (AR9170_PHY_REG_BASE + 0x0a44)
+#define AR9170_PHY_REG_BLUETOOTH (AR9170_PHY_REG_BASE + 0x0a54)
+
+#define AR9170_PHY_REG_TPCRG1 (AR9170_PHY_REG_BASE + 0x0a58)
+#define AR9170_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR9170_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR9170_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR9170_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR9170_PHY_TPCRG1_PD_GAIN_2 0x000c0000
+#define AR9170_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR9170_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR9170_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR9170_PHY_REG_TX_PWRCTRL4 (AR9170_PHY_REG_BASE + 0x0a64)
+#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001fe
+#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR9170_PHY_REG_ANALOG_SWAP (AR9170_PHY_REG_BASE + 0x0a68)
+#define AR9170_PHY_ANALOG_SWAP_AB 0x0001
+#define AR9170_PHY_ANALOG_SWAP_ALT_CHAIN 0x00000040
+
+#define AR9170_PHY_REG_TPCRG5 (AR9170_PHY_REG_BASE + 0x0a6c)
+#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000f
+#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003f0
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000fc00
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003f0000
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0fc00000
+#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+#define AR9170_PHY_REG_TX_PWRCTRL6_0 (AR9170_PHY_REG_BASE + 0x0a70)
+#define AR9170_PHY_REG_TX_PWRCTRL6_1 (AR9170_PHY_REG_BASE + 0x1a70)
+#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR9170_PHY_REG_TX_PWRCTRL7 (AR9170_PHY_REG_BASE + 0x0a74)
+#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01f80000
+#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR9170_PHY_REG_TX_PWRCTRL9 (AR9170_PHY_REG_BASE + 0x0a7c)
+#define AR9170_PHY_TX_DESIRED_SCALE_CCK 0x00007c00
+#define AR9170_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR9170_PHY_REG_TX_GAIN_TBL1 (AR9170_PHY_REG_BASE + 0x0b00)
+#define AR9170_PHY_TX_GAIN 0x0007f000
+#define AR9170_PHY_TX_GAIN_S 12
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR9170_PHY_REG_CL_CAL_CTL (AR9170_PHY_REG_BASE + 0x0b58)
+#define AR9170_PHY_CL_CAL_ENABLE 0x00000002
+#define AR9170_PHY_CL_CAL_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR9170_PHY_REG_POWER_TX_RATE5 (AR9170_PHY_REG_BASE + 0x0b8c)
+#define AR9170_PHY_REG_POWER_TX_RATE6 (AR9170_PHY_REG_BASE + 0x0b90)
+
+#define AR9170_PHY_REG_CH0_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x0b98)
+#define AR9170_PHY_REG_CH1_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x1b98)
+#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP 0x0000fc00
+#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR9170_PHY_REG_CAL_CHAINMASK (AR9170_PHY_REG_BASE + 0x0b9c)
+#define AR9170_PHY_REG_VIT_MASK2_M_46_61 (AR9170_PHY_REG_BASE + 0x0ba0)
+#define AR9170_PHY_REG_MASK2_M_31_45 (AR9170_PHY_REG_BASE + 0x0ba4)
+#define AR9170_PHY_REG_MASK2_M_16_30 (AR9170_PHY_REG_BASE + 0x0ba8)
+#define AR9170_PHY_REG_MASK2_M_00_15 (AR9170_PHY_REG_BASE + 0x0bac)
+#define AR9170_PHY_REG_PILOT_MASK_01_30 (AR9170_PHY_REG_BASE + 0x0bb0)
+#define AR9170_PHY_REG_PILOT_MASK_31_60 (AR9170_PHY_REG_BASE + 0x0bb4)
+#define AR9170_PHY_REG_MASK2_P_15_01 (AR9170_PHY_REG_BASE + 0x0bb8)
+#define AR9170_PHY_REG_MASK2_P_30_16 (AR9170_PHY_REG_BASE + 0x0bbc)
+#define AR9170_PHY_REG_MASK2_P_45_31 (AR9170_PHY_REG_BASE + 0x0bc0)
+#define AR9170_PHY_REG_MASK2_P_61_45 (AR9170_PHY_REG_BASE + 0x0bc4)
+#define AR9170_PHY_REG_POWER_TX_SUB (AR9170_PHY_REG_BASE + 0x0bc8)
+#define AR9170_PHY_REG_POWER_TX_RATE7 (AR9170_PHY_REG_BASE + 0x0bcc)
+#define AR9170_PHY_REG_POWER_TX_RATE8 (AR9170_PHY_REG_BASE + 0x0bd0)
+#define AR9170_PHY_REG_POWER_TX_RATE9 (AR9170_PHY_REG_BASE + 0x0bd4)
+#define AR9170_PHY_REG_XPA_CFG (AR9170_PHY_REG_BASE + 0x0bd8)
+#define AR9170_PHY_FORCE_XPA_CFG 0x000000001
+#define AR9170_PHY_FORCE_XPA_CFG_S 0
+
+#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
+#define AR9170_PHY_CH1_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CH1_CCA_MIN_PWR_S 19
+
+#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
+#define AR9170_PHY_CH2_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CH2_CCA_MIN_PWR_S 19
+
+#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
+#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR_S 23
+
+#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
+#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR_S 23
+
+#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
new file mode 100644
index 000000000..924135b8e
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -0,0 +1,1012 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * 802.11 & command trap routines
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <net/mac80211.h>
+#include "carl9170.h"
+#include "hw.h"
+#include "cmd.h"
+
+static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len)
+{
+ bool restart = false;
+ enum carl9170_restart_reasons reason = CARL9170_RR_NO_REASON;
+
+ if (len > 3) {
+ if (memcmp(buf, CARL9170_ERR_MAGIC, 3) == 0) {
+ ar->fw.err_counter++;
+ if (ar->fw.err_counter > 3) {
+ restart = true;
+ reason = CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS;
+ }
+ }
+
+ if (memcmp(buf, CARL9170_BUG_MAGIC, 3) == 0) {
+ ar->fw.bug_counter++;
+ restart = true;
+ reason = CARL9170_RR_FATAL_FIRMWARE_ERROR;
+ }
+ }
+
+ wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf);
+
+ if (restart)
+ carl9170_restart(ar, reason);
+}
+
+static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp)
+{
+ u32 ps;
+ bool new_ps;
+
+ ps = le32_to_cpu(rsp->psm.state);
+
+ new_ps = (ps & CARL9170_PSM_COUNTER) != CARL9170_PSM_WAKE;
+ if (ar->ps.state != new_ps) {
+ if (!new_ps) {
+ ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
+ ar->ps.last_action);
+ }
+
+ ar->ps.last_action = jiffies;
+
+ ar->ps.state = new_ps;
+ }
+}
+
+static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq)
+{
+ if (ar->cmd_seq < -1)
+ return 0;
+
+ /*
+ * Initialize Counter
+ */
+ if (ar->cmd_seq < 0)
+ ar->cmd_seq = seq;
+
+ /*
+ * The sequence is strictly monotonic increasing and it never skips!
+ *
+ * Therefore we can safely assume that whenever we received an
+ * unexpected sequence we have lost some valuable data.
+ */
+ if (seq != ar->cmd_seq) {
+ int count;
+
+ count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs;
+
+ wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! "
+ "w:%d g:%d\n", count, ar->cmd_seq, seq);
+
+ carl9170_restart(ar, CARL9170_RR_LOST_RSP);
+ return -EIO;
+ }
+
+ ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs;
+ return 0;
+}
+
+static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
+{
+ /*
+ * Some commands may have a variable response length
+ * and we cannot predict the correct length in advance.
+ * So we only check if we provided enough space for the data.
+ */
+ if (unlikely(ar->readlen != (len - 4))) {
+ dev_warn(&ar->udev->dev, "received invalid command response:"
+ "got %d, instead of %d\n", len - 4, ar->readlen);
+ print_hex_dump_bytes("carl9170 cmd:", DUMP_PREFIX_OFFSET,
+ ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f);
+ print_hex_dump_bytes("carl9170 rsp:", DUMP_PREFIX_OFFSET,
+ buffer, len);
+ /*
+ * Do not complete. The command times out,
+ * and we get a stack trace from there.
+ */
+ carl9170_restart(ar, CARL9170_RR_INVALID_RSP);
+ }
+
+ spin_lock(&ar->cmd_lock);
+ if (ar->readbuf) {
+ if (len >= 4)
+ memcpy(ar->readbuf, buffer + 4, len - 4);
+
+ ar->readbuf = NULL;
+ }
+ complete(&ar->cmd_wait);
+ spin_unlock(&ar->cmd_lock);
+}
+
+void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
+{
+ struct carl9170_rsp *cmd = buf;
+ struct ieee80211_vif *vif;
+
+ if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
+ if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
+ carl9170_cmd_callback(ar, len, buf);
+
+ return;
+ }
+
+ if (unlikely(cmd->hdr.len != (len - 4))) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "FW: received over-/under"
+ "sized event %x (%d, but should be %d).\n",
+ cmd->hdr.cmd, cmd->hdr.len, len - 4);
+
+ print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE,
+ buf, len);
+ }
+
+ return;
+ }
+
+ /* hardware event handlers */
+ switch (cmd->hdr.cmd) {
+ case CARL9170_RSP_PRETBTT:
+ /* pre-TBTT event */
+ rcu_read_lock();
+ vif = carl9170_get_main_vif(ar);
+
+ if (!vif) {
+ rcu_read_unlock();
+ break;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ carl9170_handle_ps(ar, cmd);
+ break;
+
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ carl9170_update_beacon(ar, true);
+ break;
+
+ default:
+ break;
+ }
+ rcu_read_unlock();
+
+ break;
+
+
+ case CARL9170_RSP_TXCOMP:
+ /* TX status notification */
+ carl9170_tx_process_status(ar, cmd);
+ break;
+
+ case CARL9170_RSP_BEACON_CONFIG:
+ /*
+ * (IBSS) beacon send notification
+ * bytes: 04 c2 XX YY B4 B3 B2 B1
+ *
+ * XX always 80
+ * YY always 00
+ * B1-B4 "should" be the number of send out beacons.
+ */
+ break;
+
+ case CARL9170_RSP_ATIM:
+ /* End of Atim Window */
+ break;
+
+ case CARL9170_RSP_WATCHDOG:
+ /* Watchdog Interrupt */
+ carl9170_restart(ar, CARL9170_RR_WATCHDOG);
+ break;
+
+ case CARL9170_RSP_TEXT:
+ /* firmware debug */
+ carl9170_dbg_message(ar, (char *)buf + 4, len - 4);
+ break;
+
+ case CARL9170_RSP_HEXDUMP:
+ wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4);
+ print_hex_dump_bytes("FW:", DUMP_PREFIX_NONE,
+ (char *)buf + 4, len - 4);
+ break;
+
+ case CARL9170_RSP_RADAR:
+ if (!net_ratelimit())
+ break;
+
+ wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this "
+ "incident to linux-wireless@vger.kernel.org !\n");
+ break;
+
+ case CARL9170_RSP_GPIO:
+#ifdef CONFIG_CARL9170_WPC
+ if (ar->wps.pbc) {
+ bool state = !!(cmd->gpio.gpio & cpu_to_le32(
+ AR9170_GPIO_PORT_WPS_BUTTON_PRESSED));
+
+ if (state != ar->wps.pbc_state) {
+ ar->wps.pbc_state = state;
+ input_report_key(ar->wps.pbc, KEY_WPS_BUTTON,
+ state);
+ input_sync(ar->wps.pbc);
+ }
+ }
+#endif /* CONFIG_CARL9170_WPC */
+ break;
+
+ case CARL9170_RSP_BOOT:
+ complete(&ar->fw_boot_wait);
+ break;
+
+ default:
+ wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n",
+ cmd->hdr.cmd);
+ print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
+ break;
+ }
+}
+
+static int carl9170_rx_mac_status(struct ar9170 *ar,
+ struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_channel *chan;
+ u8 error, decrypt;
+
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
+
+ error = mac->error;
+
+ if (error & AR9170_RX_ERROR_WRONG_RA) {
+ if (!ar->sniffer_enabled)
+ return -EINVAL;
+ }
+
+ if (error & AR9170_RX_ERROR_PLCP) {
+ if (!(ar->filter_state & FIF_PLCPFAIL))
+ return -EINVAL;
+
+ status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+ }
+
+ if (error & AR9170_RX_ERROR_FCS) {
+ ar->tx_fcs_errors++;
+
+ if (!(ar->filter_state & FIF_FCSFAIL))
+ return -EINVAL;
+
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ decrypt = ar9170_get_decrypt_type(mac);
+ if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
+ decrypt != AR9170_ENC_ALG_NONE) {
+ if ((decrypt == AR9170_ENC_ALG_TKIP) &&
+ (error & AR9170_RX_ERROR_MMIC))
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+
+ if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled)
+ return -ENODATA;
+
+ error &= ~(AR9170_RX_ERROR_MMIC |
+ AR9170_RX_ERROR_FCS |
+ AR9170_RX_ERROR_WRONG_RA |
+ AR9170_RX_ERROR_DECRYPT |
+ AR9170_RX_ERROR_PLCP);
+
+ /* drop any other error frames */
+ if (unlikely(error)) {
+ /* TODO: update netdevice's RX dropped/errors statistics */
+
+ if (net_ratelimit())
+ wiphy_dbg(ar->hw->wiphy, "received frame with "
+ "suspicious error code (%#x).\n", error);
+
+ return -EINVAL;
+ }
+
+ chan = ar->channel;
+ if (chan) {
+ status->band = chan->band;
+ status->freq = chan->center_freq;
+ }
+
+ switch (mac->status & AR9170_RX_STATUS_MODULATION) {
+ case AR9170_RX_STATUS_MODULATION_CCK:
+ if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
+ status->flag |= RX_FLAG_SHORTPRE;
+ switch (head->plcp[0]) {
+ case AR9170_RX_PHY_RATE_CCK_1M:
+ status->rate_idx = 0;
+ break;
+ case AR9170_RX_PHY_RATE_CCK_2M:
+ status->rate_idx = 1;
+ break;
+ case AR9170_RX_PHY_RATE_CCK_5M:
+ status->rate_idx = 2;
+ break;
+ case AR9170_RX_PHY_RATE_CCK_11M:
+ status->rate_idx = 3;
+ break;
+ default:
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "invalid plcp cck "
+ "rate (%x).\n", head->plcp[0]);
+ }
+
+ return -EINVAL;
+ }
+ break;
+
+ case AR9170_RX_STATUS_MODULATION_DUPOFDM:
+ case AR9170_RX_STATUS_MODULATION_OFDM:
+ switch (head->plcp[0] & 0xf) {
+ case AR9170_TXRX_PHY_RATE_OFDM_6M:
+ status->rate_idx = 0;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_9M:
+ status->rate_idx = 1;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_12M:
+ status->rate_idx = 2;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_18M:
+ status->rate_idx = 3;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_24M:
+ status->rate_idx = 4;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_36M:
+ status->rate_idx = 5;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_48M:
+ status->rate_idx = 6;
+ break;
+ case AR9170_TXRX_PHY_RATE_OFDM_54M:
+ status->rate_idx = 7;
+ break;
+ default:
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "invalid plcp ofdm "
+ "rate (%x).\n", head->plcp[0]);
+ }
+
+ return -EINVAL;
+ }
+ if (status->band == IEEE80211_BAND_2GHZ)
+ status->rate_idx += 4;
+ break;
+
+ case AR9170_RX_STATUS_MODULATION_HT:
+ if (head->plcp[3] & 0x80)
+ status->flag |= RX_FLAG_40MHZ;
+ if (head->plcp[6] & 0x80)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f);
+ status->flag |= RX_FLAG_HT;
+ break;
+
+ default:
+ BUG();
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static void carl9170_rx_phy_status(struct ar9170 *ar,
+ struct ar9170_rx_phystatus *phy, struct ieee80211_rx_status *status)
+{
+ int i;
+
+ BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
+
+ for (i = 0; i < 3; i++)
+ if (phy->rssi[i] != 0x80)
+ status->antenna |= BIT(i);
+
+ /* post-process RSSI */
+ for (i = 0; i < 7; i++)
+ if (phy->rssi[i] & 0x80)
+ phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
+
+ /* TODO: we could do something with phy_errors */
+ status->signal = ar->noise[0] + phy->rssi_combined;
+}
+
+static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
+{
+ struct sk_buff *skb;
+ int reserved = 0;
+ struct ieee80211_hdr *hdr = (void *) buf;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ reserved += NET_IP_ALIGN;
+
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ reserved += NET_IP_ALIGN;
+ }
+
+ if (ieee80211_has_a4(hdr->frame_control))
+ reserved += NET_IP_ALIGN;
+
+ reserved = 32 + (reserved & NET_IP_ALIGN);
+
+ skb = dev_alloc_skb(len + reserved);
+ if (likely(skb)) {
+ skb_reserve(skb, reserved);
+ memcpy(skb_put(skb, len), buf, len);
+ }
+
+ return skb;
+}
+
+static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
+{
+ struct ieee80211_mgmt *mgmt = (void *)data;
+ u8 *pos, *end;
+
+ pos = (u8 *)mgmt->u.beacon.variable;
+ end = data + len;
+ while (pos < end) {
+ if (pos + 2 + pos[1] > end)
+ return NULL;
+
+ if (pos[0] == ie)
+ return pos;
+
+ pos += 2 + pos[1];
+ }
+ return NULL;
+}
+
+/*
+ * NOTE:
+ *
+ * The firmware is in charge of waking up the device just before
+ * the AP is expected to transmit the next beacon.
+ *
+ * This leaves the driver with the important task of deciding when
+ * to set the PHY back to bed again.
+ */
+static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
+{
+ struct ieee80211_hdr *hdr = data;
+ struct ieee80211_tim_ie *tim_ie;
+ struct ath_common *common = &ar->common;
+ u8 *tim;
+ u8 tim_len;
+ bool cam;
+
+ if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
+ return;
+
+ /* min. beacon length + FCS_LEN */
+ if (len <= 40 + FCS_LEN)
+ return;
+
+ /* check if this really is a beacon */
+ /* and only beacons from the associated BSSID, please */
+ if (!ath_is_mybeacon(common, hdr) || !common->curaid)
+ return;
+
+ ar->ps.last_beacon = jiffies;
+
+ tim = carl9170_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
+ if (!tim)
+ return;
+
+ if (tim[1] < sizeof(*tim_ie))
+ return;
+
+ tim_len = tim[1];
+ tim_ie = (struct ieee80211_tim_ie *) &tim[2];
+
+ if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period))
+ ar->ps.dtim_counter = (tim_ie->dtim_count - 1) %
+ ar->hw->conf.ps_dtim_period;
+
+ /* Check whenever the PHY can be turned off again. */
+
+ /* 1. What about buffered unicast traffic for our AID? */
+ cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
+
+ /* 2. Maybe the AP wants to send multicast/broadcast data? */
+ cam |= !!(tim_ie->bitmap_ctrl & 0x01);
+
+ if (!cam) {
+ /* back to low-power land. */
+ ar->ps.off_override &= ~PS_OFF_BCN;
+ carl9170_ps_check(ar);
+ } else {
+ /* force CAM */
+ ar->ps.off_override |= PS_OFF_BCN;
+ }
+}
+
+static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
+{
+ struct ieee80211_bar *bar = data;
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue;
+
+ if (likely(!ieee80211_is_back(bar->frame_control)))
+ return;
+
+ if (len <= sizeof(*bar) + FCS_LEN)
+ return;
+
+ queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ struct sk_buff *entry_skb = entry->skb;
+ struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
+ struct ieee80211_bar *entry_bar = (void *)super->frame_data;
+
+#define TID_CHECK(a, b) ( \
+ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
+ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
+
+ if (bar->start_seq_num == entry_bar->start_seq_num &&
+ TID_CHECK(bar->control, entry_bar->control) &&
+ ether_addr_equal_64bits(bar->ra, entry_bar->ta) &&
+ ether_addr_equal_64bits(bar->ta, entry_bar->ra)) {
+ struct ieee80211_tx_info *tx_info;
+
+ tx_info = IEEE80211_SKB_CB(entry_skb);
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+#undef TID_CHECK
+}
+
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+ struct ieee80211_rx_status *rx_status)
+{
+ __le16 fc;
+
+ if ((ms & AR9170_RX_STATUS_MPDU) == AR9170_RX_STATUS_MPDU_SINGLE) {
+ /*
+ * This frame is not part of an aMPDU.
+ * Therefore it is not subjected to any
+ * of the following content restrictions.
+ */
+ return true;
+ }
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+ rx_status->ampdu_reference = ar->ampdu_ref;
+
+ /*
+ * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
+ * certain frame types can be part of an aMPDU.
+ *
+ * In order to keep the processing cost down, I opted for a
+ * stateless filter solely based on the frame control field.
+ */
+
+ fc = ((struct ieee80211_hdr *)buf)->frame_control;
+ if (ieee80211_is_data_qos(fc) && ieee80211_is_data_present(fc))
+ return true;
+
+ if (ieee80211_is_ack(fc) || ieee80211_is_back(fc) ||
+ ieee80211_is_back_req(fc))
+ return true;
+
+ if (ieee80211_is_action(fc))
+ return true;
+
+ return false;
+}
+
+static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *skb;
+
+ /* (driver) frame trap handler
+ *
+ * Because power-saving mode handing has to be implemented by
+ * the driver/firmware. We have to check each incoming beacon
+ * from the associated AP, if there's new data for us (either
+ * broadcast/multicast or unicast) we have to react quickly.
+ *
+ * So, if you have you want to add additional frame trap
+ * handlers, this would be the perfect place!
+ */
+
+ carl9170_ps_beacon(ar, buf, len);
+
+ carl9170_ba_check(ar, buf, len);
+
+ skb = carl9170_rx_copy_data(buf, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(IEEE80211_SKB_RXCB(skb), status, sizeof(*status));
+ ieee80211_rx(ar->hw, skb);
+ return 0;
+}
+
+/*
+ * If the frame alignment is right (or the kernel has
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
+ * is only a single MPDU in the USB frame, then we could
+ * submit to mac80211 the SKB directly. However, since
+ * there may be multiple packets in one SKB in stream
+ * mode, and we need to observe the proper ordering,
+ * this is non-trivial.
+ */
+static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
+{
+ struct ar9170_rx_head *head;
+ struct ar9170_rx_macstatus *mac;
+ struct ar9170_rx_phystatus *phy = NULL;
+ struct ieee80211_rx_status status;
+ int mpdu_len;
+ u8 mac_status;
+
+ if (!IS_STARTED(ar))
+ return;
+
+ if (unlikely(len < sizeof(*mac)))
+ goto drop;
+
+ memset(&status, 0, sizeof(status));
+
+ mpdu_len = len - sizeof(*mac);
+
+ mac = (void *)(buf + mpdu_len);
+ mac_status = mac->status;
+ switch (mac_status & AR9170_RX_STATUS_MPDU) {
+ case AR9170_RX_STATUS_MPDU_FIRST:
+ ar->ampdu_ref++;
+ /* Aggregated MPDUs start with an PLCP header */
+ if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
+ head = (void *) buf;
+
+ /*
+ * The PLCP header needs to be cached for the
+ * following MIDDLE + LAST A-MPDU packets.
+ *
+ * So, if you are wondering why all frames seem
+ * to share a common RX status information,
+ * then you have the answer right here...
+ */
+ memcpy(&ar->rx_plcp, (void *) buf,
+ sizeof(struct ar9170_rx_head));
+
+ mpdu_len -= sizeof(struct ar9170_rx_head);
+ buf += sizeof(struct ar9170_rx_head);
+
+ ar->rx_has_plcp = true;
+ } else {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "plcp info "
+ "is clipped.\n");
+ }
+
+ goto drop;
+ }
+ break;
+
+ case AR9170_RX_STATUS_MPDU_LAST:
+ status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
+ /*
+ * The last frame of an A-MPDU has an extra tail
+ * which does contain the phy status of the whole
+ * aggregate.
+ */
+ if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
+ mpdu_len -= sizeof(struct ar9170_rx_phystatus);
+ phy = (void *)(buf + mpdu_len);
+ } else {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "frame tail "
+ "is clipped.\n");
+ }
+
+ goto drop;
+ }
+
+ case AR9170_RX_STATUS_MPDU_MIDDLE:
+ /* These are just data + mac status */
+ if (unlikely(!ar->rx_has_plcp)) {
+ if (!net_ratelimit())
+ return;
+
+ wiphy_err(ar->hw->wiphy, "rx stream does not start "
+ "with a first_mpdu frame tag.\n");
+
+ goto drop;
+ }
+
+ head = &ar->rx_plcp;
+ break;
+
+ case AR9170_RX_STATUS_MPDU_SINGLE:
+ /* single mpdu has both: plcp (head) and phy status (tail) */
+ head = (void *) buf;
+
+ mpdu_len -= sizeof(struct ar9170_rx_head);
+ mpdu_len -= sizeof(struct ar9170_rx_phystatus);
+
+ buf += sizeof(struct ar9170_rx_head);
+ phy = (void *)(buf + mpdu_len);
+ break;
+
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ /* FC + DU + RA + FCS */
+ if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
+ goto drop;
+
+ if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
+ goto drop;
+
+ if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
+ goto drop;
+
+ if (phy)
+ carl9170_rx_phy_status(ar, phy, &status);
+ else
+ status.flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
+ goto drop;
+
+ return;
+drop:
+ ar->rx_dropped++;
+}
+
+static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
+ const unsigned int resplen)
+{
+ struct carl9170_rsp *cmd;
+ int i = 0;
+
+ while (i < resplen) {
+ cmd = (void *) &respbuf[i];
+
+ i += cmd->hdr.len + 4;
+ if (unlikely(i > resplen))
+ break;
+
+ if (carl9170_check_sequence(ar, cmd->hdr.seq))
+ break;
+
+ carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
+ }
+
+ if (unlikely(i != resplen)) {
+ if (!net_ratelimit())
+ return;
+
+ wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n");
+ print_hex_dump_bytes("rxcmd:", DUMP_PREFIX_OFFSET,
+ respbuf, resplen);
+ }
+}
+
+static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
+{
+ unsigned int i = 0;
+
+ /* weird thing, but this is the same in the original driver */
+ while (len > 2 && i < 12 && buf[0] == 0xff && buf[1] == 0xff) {
+ i += 2;
+ len -= 2;
+ buf += 2;
+ }
+
+ if (unlikely(len < 4))
+ return;
+
+ /* found the 6 * 0xffff marker? */
+ if (i == 12)
+ carl9170_rx_untie_cmds(ar, buf, len);
+ else
+ carl9170_rx_untie_data(ar, buf, len);
+}
+
+static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
+{
+ unsigned int tlen, wlen = 0, clen = 0;
+ struct ar9170_stream *rx_stream;
+ u8 *tbuf;
+
+ tbuf = buf;
+ tlen = len;
+
+ while (tlen >= 4) {
+ rx_stream = (void *) tbuf;
+ clen = le16_to_cpu(rx_stream->length);
+ wlen = ALIGN(clen, 4);
+
+ /* check if this is stream has a valid tag.*/
+ if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) {
+ /*
+ * TODO: handle the highly unlikely event that the
+ * corrupted stream has the TAG at the right position.
+ */
+
+ /* check if the frame can be repaired. */
+ if (!ar->rx_failover_missing) {
+
+ /* this is not "short read". */
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy,
+ "missing tag!\n");
+ }
+
+ __carl9170_rx(ar, tbuf, tlen);
+ return;
+ }
+
+ if (ar->rx_failover_missing > tlen) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy,
+ "possible multi "
+ "stream corruption!\n");
+ goto err_telluser;
+ } else {
+ goto err_silent;
+ }
+ }
+
+ memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ ar->rx_failover_missing -= tlen;
+
+ if (ar->rx_failover_missing <= 0) {
+ /*
+ * nested carl9170_rx_stream call!
+ *
+ * termination is guaranteed, even when the
+ * combined frame also have an element with
+ * a bad tag.
+ */
+
+ ar->rx_failover_missing = 0;
+ carl9170_rx_stream(ar, ar->rx_failover->data,
+ ar->rx_failover->len);
+
+ skb_reset_tail_pointer(ar->rx_failover);
+ skb_trim(ar->rx_failover, 0);
+ }
+
+ return;
+ }
+
+ /* check if stream is clipped */
+ if (wlen > tlen - 4) {
+ if (ar->rx_failover_missing) {
+ /* TODO: handle double stream corruption. */
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "double rx "
+ "stream corruption!\n");
+ goto err_telluser;
+ } else {
+ goto err_silent;
+ }
+ }
+
+ /*
+ * save incomplete data set.
+ * the firmware will resend the missing bits when
+ * the rx - descriptor comes round again.
+ */
+
+ memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
+ ar->rx_failover_missing = clen - tlen;
+ return;
+ }
+ __carl9170_rx(ar, rx_stream->payload, clen);
+
+ tbuf += wlen + 4;
+ tlen -= wlen + 4;
+ }
+
+ if (tlen) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed "
+ "data left in rx stream!\n", tlen);
+ }
+
+ goto err_telluser;
+ }
+
+ return;
+
+err_telluser:
+ wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, "
+ "data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen,
+ ar->rx_failover_missing);
+
+ if (ar->rx_failover_missing)
+ print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
+ ar->rx_failover->data,
+ ar->rx_failover->len);
+
+ print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
+ buf, len);
+
+ wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if "
+ "you see this message frequently.\n");
+
+err_silent:
+ if (ar->rx_failover_missing) {
+ skb_reset_tail_pointer(ar->rx_failover);
+ skb_trim(ar->rx_failover, 0);
+ ar->rx_failover_missing = 0;
+ }
+}
+
+void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len)
+{
+ if (ar->fw.rx_stream)
+ carl9170_rx_stream(ar, buf, len);
+ else
+ __carl9170_rx(ar, buf, len);
+}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
new file mode 100644
index 000000000..ae86a600d
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -0,0 +1,1711 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * 802.11 xmit & status routines
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "carl9170.h"
+#include "hw.h"
+#include "cmd.h"
+
+static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
+ unsigned int queue)
+{
+ if (unlikely(modparam_noht)) {
+ return queue;
+ } else {
+ /*
+ * This is just another workaround, until
+ * someone figures out how to get QoS and
+ * AMPDU to play nicely together.
+ */
+
+ return 2; /* AC_BE */
+ }
+}
+
+static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
+ struct sk_buff *skb)
+{
+ return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
+}
+
+static bool is_mem_full(struct ar9170 *ar)
+{
+ return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
+ atomic_read(&ar->mem_free_blocks));
+}
+
+static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
+{
+ int queue, i;
+ bool mem_full;
+
+ atomic_inc(&ar->tx_total_queued);
+
+ queue = skb_get_queue_mapping(skb);
+ spin_lock_bh(&ar->tx_stats_lock);
+
+ /*
+ * The driver has to accept the frame, regardless if the queue is
+ * full to the brim, or not. We have to do the queuing internally,
+ * since mac80211 assumes that a driver which can operate with
+ * aggregated frames does not reject frames for this reason.
+ */
+ ar->tx_stats[queue].len++;
+ ar->tx_stats[queue].count++;
+
+ mem_full = is_mem_full(ar);
+ for (i = 0; i < ar->hw->queues; i++) {
+ if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
+ ieee80211_stop_queue(ar->hw, i);
+ ar->queue_stop_timeout[i] = jiffies;
+ }
+ }
+
+ spin_unlock_bh(&ar->tx_stats_lock);
+}
+
+/* needs rcu_read_lock */
+static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
+ struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *) super->frame_data;
+ struct ieee80211_vif *vif;
+ unsigned int vif_id;
+
+ vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
+ CARL9170_TX_SUPER_MISC_VIF_ID_S;
+
+ if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
+ return NULL;
+
+ vif = rcu_dereference(ar->vif_priv[vif_id].vif);
+ if (unlikely(!vif))
+ return NULL;
+
+ /*
+ * Normally we should use wrappers like ieee80211_get_DA to get
+ * the correct peer ieee80211_sta.
+ *
+ * But there is a problem with indirect traffic (broadcasts, or
+ * data which is designated for other stations) in station mode.
+ * The frame will be directed to the AP for distribution and not
+ * to the actual destination.
+ */
+
+ return ieee80211_find_sta(vif, hdr->addr1);
+}
+
+static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct carl9170_sta_info *sta_info;
+
+ rcu_read_lock();
+ sta = __carl9170_get_tx_sta(ar, skb);
+ if (unlikely(!sta))
+ goto out_rcu;
+
+ sta_info = (struct carl9170_sta_info *) sta->drv_priv;
+ if (atomic_dec_return(&sta_info->pending_frames) == 0)
+ ieee80211_sta_block_awake(ar->hw, sta, false);
+
+out_rcu:
+ rcu_read_unlock();
+}
+
+static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
+{
+ int queue;
+
+ queue = skb_get_queue_mapping(skb);
+
+ spin_lock_bh(&ar->tx_stats_lock);
+
+ ar->tx_stats[queue].len--;
+
+ if (!is_mem_full(ar)) {
+ unsigned int i;
+ for (i = 0; i < ar->hw->queues; i++) {
+ if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
+ continue;
+
+ if (ieee80211_queue_stopped(ar->hw, i)) {
+ unsigned long tmp;
+
+ tmp = jiffies - ar->queue_stop_timeout[i];
+ if (tmp > ar->max_queue_stop_timeout[i])
+ ar->max_queue_stop_timeout[i] = tmp;
+ }
+
+ ieee80211_wake_queue(ar->hw, i);
+ }
+ }
+
+ spin_unlock_bh(&ar->tx_stats_lock);
+
+ if (atomic_dec_and_test(&ar->tx_total_queued))
+ complete(&ar->tx_flush);
+}
+
+static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ unsigned int chunks;
+ int cookie = -1;
+
+ atomic_inc(&ar->mem_allocs);
+
+ chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
+ if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
+ atomic_add(chunks, &ar->mem_free_blocks);
+ return -ENOSPC;
+ }
+
+ spin_lock_bh(&ar->mem_lock);
+ cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
+ spin_unlock_bh(&ar->mem_lock);
+
+ if (unlikely(cookie < 0)) {
+ atomic_add(chunks, &ar->mem_free_blocks);
+ return -ENOSPC;
+ }
+
+ super = (void *) skb->data;
+
+ /*
+ * Cookie #0 serves two special purposes:
+ * 1. The firmware might use it generate BlockACK frames
+ * in responds of an incoming BlockAckReqs.
+ *
+ * 2. Prevent double-free bugs.
+ */
+ super->s.cookie = (u8) cookie + 1;
+ return 0;
+}
+
+static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ int cookie;
+
+ /* make a local copy of the cookie */
+ cookie = super->s.cookie;
+ /* invalidate cookie */
+ super->s.cookie = 0;
+
+ /*
+ * Do a out-of-bounds check on the cookie:
+ *
+ * * cookie "0" is reserved and won't be assigned to any
+ * out-going frame. Internally however, it is used to
+ * mark no longer/un-accounted frames and serves as a
+ * cheap way of preventing frames from being freed
+ * twice by _accident_. NB: There is a tiny race...
+ *
+ * * obviously, cookie number is limited by the amount
+ * of available memory blocks, so the number can
+ * never execeed the mem_blocks count.
+ */
+ if (unlikely(WARN_ON_ONCE(cookie == 0) ||
+ WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
+ return;
+
+ atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
+ &ar->mem_free_blocks);
+
+ spin_lock_bh(&ar->mem_lock);
+ bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
+ spin_unlock_bh(&ar->mem_lock);
+}
+
+/* Called from any context */
+static void carl9170_tx_release(struct kref *ref)
+{
+ struct ar9170 *ar;
+ struct carl9170_tx_info *arinfo;
+ struct ieee80211_tx_info *txinfo;
+ struct sk_buff *skb;
+
+ arinfo = container_of(ref, struct carl9170_tx_info, ref);
+ txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
+ rate_driver_data);
+ skb = container_of((void *) txinfo, struct sk_buff, cb);
+
+ ar = arinfo->ar;
+ if (WARN_ON_ONCE(!ar))
+ return;
+
+ BUILD_BUG_ON(
+ offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
+
+ memset(&txinfo->status.ack_signal, 0,
+ sizeof(struct ieee80211_tx_info) -
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
+
+ if (atomic_read(&ar->tx_total_queued))
+ ar->tx_schedule = true;
+
+ if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (!atomic_read(&ar->tx_ampdu_upload))
+ ar->tx_ampdu_schedule = true;
+
+ if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
+ struct _carl9170_tx_superframe *super;
+
+ super = (void *)skb->data;
+ txinfo->status.ampdu_len = super->s.rix;
+ txinfo->status.ampdu_ack_len = super->s.cnt;
+ } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
+ !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /*
+ * drop redundant tx_status reports:
+ *
+ * 1. ampdu_ack_len of the final tx_status does
+ * include the feedback of this particular frame.
+ *
+ * 2. tx_status_irqsafe only queues up to 128
+ * tx feedback reports and discards the rest.
+ *
+ * 3. minstrel_ht is picky, it only accepts
+ * reports of frames with the TX_STATUS_AMPDU flag.
+ *
+ * 4. mac80211 is not particularly interested in
+ * feedback either [CTL_REQ_TX_STATUS not set]
+ */
+
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ } else {
+ /*
+ * Either the frame transmission has failed or
+ * mac80211 requested tx status.
+ */
+ }
+ }
+
+ skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+}
+
+void carl9170_tx_get_skb(struct sk_buff *skb)
+{
+ struct carl9170_tx_info *arinfo = (void *)
+ (IEEE80211_SKB_CB(skb))->rate_driver_data;
+ kref_get(&arinfo->ref);
+}
+
+int carl9170_tx_put_skb(struct sk_buff *skb)
+{
+ struct carl9170_tx_info *arinfo = (void *)
+ (IEEE80211_SKB_CB(skb))->rate_driver_data;
+
+ return kref_put(&arinfo->ref, carl9170_tx_release);
+}
+
+/* Caller must hold the tid_info->lock & rcu_read_lock */
+static void carl9170_tx_shift_bm(struct ar9170 *ar,
+ struct carl9170_sta_tid *tid_info, u16 seq)
+{
+ u16 off;
+
+ off = SEQ_DIFF(seq, tid_info->bsn);
+
+ if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
+ return;
+
+ /*
+ * Sanity check. For each MPDU we set the bit in bitmap and
+ * clear it once we received the tx_status.
+ * But if the bit is already cleared then we've been bitten
+ * by a bug.
+ */
+ WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
+
+ off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
+ if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
+ return;
+
+ if (!bitmap_empty(tid_info->bitmap, off))
+ off = find_first_bit(tid_info->bitmap, off);
+
+ tid_info->bsn += off;
+ tid_info->bsn &= 0x0fff;
+
+ bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
+ off, CARL9170_BAW_BITS);
+}
+
+static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
+ struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_hdr *hdr = (void *) super->frame_data;
+ struct ieee80211_sta *sta;
+ struct carl9170_sta_info *sta_info;
+ struct carl9170_sta_tid *tid_info;
+ u8 tid;
+
+ if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
+ txinfo->flags & IEEE80211_TX_CTL_INJECTED)
+ return;
+
+ rcu_read_lock();
+ sta = __carl9170_get_tx_sta(ar, skb);
+ if (unlikely(!sta))
+ goto out_rcu;
+
+ tid = get_tid_h(hdr);
+
+ sta_info = (void *) sta->drv_priv;
+ tid_info = rcu_dereference(sta_info->agg[tid]);
+ if (!tid_info)
+ goto out_rcu;
+
+ spin_lock_bh(&tid_info->lock);
+ if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
+ carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
+
+ if (sta_info->stats[tid].clear) {
+ sta_info->stats[tid].clear = false;
+ sta_info->stats[tid].req = false;
+ sta_info->stats[tid].ampdu_len = 0;
+ sta_info->stats[tid].ampdu_ack_len = 0;
+ }
+
+ sta_info->stats[tid].ampdu_len++;
+ if (txinfo->status.rates[0].count == 1)
+ sta_info->stats[tid].ampdu_ack_len++;
+
+ if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
+ sta_info->stats[tid].req = true;
+
+ if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
+ super->s.rix = sta_info->stats[tid].ampdu_len;
+ super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
+ txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
+ if (sta_info->stats[tid].req)
+ txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ sta_info->stats[tid].clear = true;
+ }
+ spin_unlock_bh(&tid_info->lock);
+
+out_rcu:
+ rcu_read_unlock();
+}
+
+static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ /*
+ * Unlike all other frames, the status report for BARs does
+ * not directly come from the hardware as it is incapable of
+ * matching a BA to a previously send BAR.
+ * Instead the RX-path will scan for incoming BAs and set the
+ * IEEE80211_TX_STAT_ACK if it sees one that was likely
+ * caused by a BAR from us.
+ */
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
+ struct carl9170_bar_list_entry *entry;
+ int queue = skb_get_queue_mapping(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ if (entry->skb == skb) {
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ goto out;
+ }
+ }
+
+ WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
+ queue, bar->ra, bar->ta, bar->control,
+ bar->start_seq_num);
+out:
+ rcu_read_unlock();
+ }
+}
+
+void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
+ const bool success)
+{
+ struct ieee80211_tx_info *txinfo;
+
+ carl9170_tx_accounting_free(ar, skb);
+
+ txinfo = IEEE80211_SKB_CB(skb);
+
+ carl9170_tx_bar_status(ar, skb, txinfo);
+
+ if (success)
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ ar->tx_ack_failures++;
+
+ if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
+ carl9170_tx_status_process_ampdu(ar, skb, txinfo);
+
+ carl9170_tx_ps_unblock(ar, skb);
+ carl9170_tx_put_skb(skb);
+}
+
+/* This function may be called form any context */
+void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+
+ atomic_dec(&ar->tx_total_pending);
+
+ if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
+ atomic_dec(&ar->tx_ampdu_upload);
+
+ if (carl9170_tx_put_skb(skb))
+ tasklet_hi_schedule(&ar->usb_tasklet);
+}
+
+static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
+ struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(&queue->lock);
+ skb_queue_walk(queue, skb) {
+ struct _carl9170_tx_superframe *txc = (void *) skb->data;
+
+ if (txc->s.cookie != cookie)
+ continue;
+
+ __skb_unlink(skb, queue);
+ spin_unlock_bh(&queue->lock);
+
+ carl9170_release_dev_space(ar, skb);
+ return skb;
+ }
+ spin_unlock_bh(&queue->lock);
+
+ return NULL;
+}
+
+static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
+ unsigned int tries, struct ieee80211_tx_info *txinfo)
+{
+ unsigned int i;
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (txinfo->status.rates[i].idx < 0)
+ break;
+
+ if (i == rix) {
+ txinfo->status.rates[i].count = tries;
+ i++;
+ break;
+ }
+ }
+
+ for (; i < IEEE80211_TX_MAX_RATES; i++) {
+ txinfo->status.rates[i].idx = -1;
+ txinfo->status.rates[i].count = 0;
+ }
+}
+
+static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *txinfo;
+ struct carl9170_tx_info *arinfo;
+ bool restart = false;
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ spin_lock_bh(&ar->tx_status[i].lock);
+
+ skb = skb_peek(&ar->tx_status[i]);
+
+ if (!skb)
+ goto next;
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ arinfo = (void *) txinfo->rate_driver_data;
+
+ if (time_is_before_jiffies(arinfo->timeout +
+ msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
+ restart = true;
+
+next:
+ spin_unlock_bh(&ar->tx_status[i].lock);
+ }
+
+ if (restart) {
+ /*
+ * At least one queue has been stuck for long enough.
+ * Give the device a kick and hope it gets back to
+ * work.
+ *
+ * possible reasons may include:
+ * - frames got lost/corrupted (bad connection to the device)
+ * - stalled rx processing/usb controller hiccups
+ * - firmware errors/bugs
+ * - every bug you can think of.
+ * - all bugs you can't...
+ * - ...
+ */
+ carl9170_restart(ar, CARL9170_RR_STUCK_TX);
+ }
+}
+
+static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
+{
+ struct carl9170_sta_tid *iter;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *txinfo;
+ struct carl9170_tx_info *arinfo;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
+ if (iter->state < CARL9170_TID_STATE_IDLE)
+ continue;
+
+ spin_lock_bh(&iter->lock);
+ skb = skb_peek(&iter->queue);
+ if (!skb)
+ goto unlock;
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ arinfo = (void *)txinfo->rate_driver_data;
+ if (time_is_after_jiffies(arinfo->timeout +
+ msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
+ goto unlock;
+
+ sta = iter->sta;
+ if (WARN_ON(!sta))
+ goto unlock;
+
+ ieee80211_stop_tx_ba_session(sta, iter->tid);
+unlock:
+ spin_unlock_bh(&iter->lock);
+
+ }
+ rcu_read_unlock();
+}
+
+void carl9170_tx_janitor(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170,
+ tx_janitor.work);
+ if (!IS_STARTED(ar))
+ return;
+
+ ar->tx_janitor_last_run = jiffies;
+
+ carl9170_check_queue_stop_timeout(ar);
+ carl9170_tx_ampdu_timeout(ar);
+
+ if (!atomic_read(&ar->tx_total_queued))
+ return;
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
+ msecs_to_jiffies(CARL9170_TX_TIMEOUT));
+}
+
+static void __carl9170_tx_process_status(struct ar9170 *ar,
+ const uint8_t cookie, const uint8_t info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *txinfo;
+ unsigned int r, t, q;
+ bool success = true;
+
+ q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
+
+ skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
+ if (!skb) {
+ /*
+ * We have lost the race to another thread.
+ */
+
+ return ;
+ }
+
+ txinfo = IEEE80211_SKB_CB(skb);
+
+ if (!(info & CARL9170_TX_STATUS_SUCCESS))
+ success = false;
+
+ r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
+ t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
+
+ carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
+ carl9170_tx_status(ar, skb, success);
+}
+
+void carl9170_tx_process_status(struct ar9170 *ar,
+ const struct carl9170_rsp *cmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < cmd->hdr.ext; i++) {
+ if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
+ print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
+ (void *) cmd, cmd->hdr.len + 4);
+ break;
+ }
+
+ __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
+ cmd->_tx_status[i].info);
+ }
+}
+
+static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
+ struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
+ unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
+{
+ struct ieee80211_rate *rate = NULL;
+ u8 *txpower;
+ unsigned int idx;
+
+ idx = txrate->idx;
+ *tpc = 0;
+ *phyrate = 0;
+
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ /* +1 dBm for HT40 */
+ *tpc += 2;
+
+ if (info->band == IEEE80211_BAND_2GHZ)
+ txpower = ar->power_2G_ht40;
+ else
+ txpower = ar->power_5G_ht40;
+ } else {
+ if (info->band == IEEE80211_BAND_2GHZ)
+ txpower = ar->power_2G_ht20;
+ else
+ txpower = ar->power_5G_ht20;
+ }
+
+ *phyrate = txrate->idx;
+ *tpc += txpower[idx & 7];
+ } else {
+ if (info->band == IEEE80211_BAND_2GHZ) {
+ if (idx < 4)
+ txpower = ar->power_2G_cck;
+ else
+ txpower = ar->power_2G_ofdm;
+ } else {
+ txpower = ar->power_5G_leg;
+ idx += 4;
+ }
+
+ rate = &__carl9170_ratetable[idx];
+ *tpc += txpower[(rate->hw_value & 0x30) >> 4];
+ *phyrate = rate->hw_value & 0xf;
+ }
+
+ if (ar->eeprom.tx_mask == 1) {
+ *chains = AR9170_TX_PHY_TXCHAIN_1;
+ } else {
+ if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
+ rate && rate->bitrate >= 360)
+ *chains = AR9170_TX_PHY_TXCHAIN_1;
+ else
+ *chains = AR9170_TX_PHY_TXCHAIN_2;
+ }
+
+ *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2);
+}
+
+static __le32 carl9170_tx_physet(struct ar9170 *ar,
+ struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
+{
+ unsigned int power = 0, chains = 0, phyrate = 0;
+ __le32 tmp;
+
+ tmp = cpu_to_le32(0);
+
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
+ AR9170_TX_PHY_BW_S);
+ /* this works because 40 MHz is 2 and dup is 3 */
+ if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
+ tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
+ AR9170_TX_PHY_BW_S);
+
+ if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+ tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
+
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
+
+ /* heavy clip control */
+ tmp |= cpu_to_le32((txrate->idx & 0x7) <<
+ AR9170_TX_PHY_TX_HEAVY_CLIP_S);
+
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
+
+ /*
+ * green field preamble does not work.
+ *
+ * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
+ */
+ } else {
+ if (info->band == IEEE80211_BAND_2GHZ) {
+ if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
+ else
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
+ } else {
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
+ }
+
+ /*
+ * short preamble seems to be broken too.
+ *
+ * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
+ */
+ }
+ carl9170_tx_rate_tpc_chains(ar, info, txrate,
+ &phyrate, &power, &chains);
+
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
+ return tmp;
+}
+
+static bool carl9170_tx_rts_check(struct ar9170 *ar,
+ struct ieee80211_tx_rate *rate,
+ bool ampdu, bool multi)
+{
+ switch (ar->erp_mode) {
+ case CARL9170_ERP_AUTO:
+ if (ampdu)
+ break;
+
+ case CARL9170_ERP_MAC80211:
+ if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ break;
+
+ case CARL9170_ERP_RTS:
+ if (likely(!multi))
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool carl9170_tx_cts_check(struct ar9170 *ar,
+ struct ieee80211_tx_rate *rate)
+{
+ switch (ar->erp_mode) {
+ case CARL9170_ERP_AUTO:
+ case CARL9170_ERP_MAC80211:
+ if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ break;
+
+ case CARL9170_ERP_CTS:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void carl9170_tx_get_rates(struct ar9170 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info;
+
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ ieee80211_get_tx_rates(vif, sta, skb,
+ info->control.rates,
+ IEEE80211_TX_MAX_RATES);
+}
+
+static void carl9170_tx_apply_rateset(struct ar9170 *ar,
+ struct ieee80211_tx_info *sinfo,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_rate *txrate;
+ struct ieee80211_tx_info *info;
+ struct _carl9170_tx_superframe *txc = (void *) skb->data;
+ int i;
+ bool ampdu;
+ bool no_ack;
+
+ info = IEEE80211_SKB_CB(skb);
+ ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
+ no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
+
+ /* Set the rate control probe flag for all (sub-) frames.
+ * This is because the TX_STATS_AMPDU flag is only set on
+ * the last frame, so it has to be inherited.
+ */
+ info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ /* NOTE: For the first rate, the ERP & AMPDU flags are directly
+ * taken from mac_control. For all fallback rate, the firmware
+ * updates the mac_control flags from the rate info field.
+ */
+ for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
+ __le32 phy_set;
+
+ txrate = &sinfo->control.rates[i];
+ if (txrate->idx < 0)
+ break;
+
+ phy_set = carl9170_tx_physet(ar, info, txrate);
+ if (i == 0) {
+ __le16 mac_tmp = cpu_to_le16(0);
+
+ /* first rate - part of the hw's frame header */
+ txc->f.phy_control = phy_set;
+
+ if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+
+ if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+ else if (carl9170_tx_cts_check(ar, txrate))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
+
+ txc->f.mac_control |= mac_tmp;
+ } else {
+ /* fallback rates are stored in the firmware's
+ * retry rate set array.
+ */
+ txc->s.rr[i - 1] = phy_set;
+ }
+
+ SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
+ txrate->count);
+
+ if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+ txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
+ CARL9170_TX_SUPER_RI_ERP_PROT_S);
+ else if (carl9170_tx_cts_check(ar, txrate))
+ txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
+ CARL9170_TX_SUPER_RI_ERP_PROT_S);
+
+ if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
+ txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
+ }
+}
+
+static int carl9170_tx_prepare(struct ar9170 *ar,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct _carl9170_tx_superframe *txc;
+ struct carl9170_vif_info *cvif;
+ struct ieee80211_tx_info *info;
+ struct carl9170_tx_info *arinfo;
+ unsigned int hw_queue;
+ __le16 mac_tmp;
+ u16 len;
+
+ BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
+ BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
+ CARL9170_TX_SUPERDESC_LEN);
+
+ BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
+ AR9170_TX_HWDESC_LEN);
+
+ BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
+ ((CARL9170_TX_SUPER_MISC_VIF_ID >>
+ CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
+
+ hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
+
+ hdr = (void *)skb->data;
+ info = IEEE80211_SKB_CB(skb);
+ len = skb->len;
+
+ /*
+ * Note: If the frame was sent through a monitor interface,
+ * the ieee80211_vif pointer can be NULL.
+ */
+ if (likely(info->control.vif))
+ cvif = (void *) info->control.vif->drv_priv;
+ else
+ cvif = NULL;
+
+ txc = (void *)skb_push(skb, sizeof(*txc));
+ memset(txc, 0, sizeof(*txc));
+
+ SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
+
+ if (likely(cvif))
+ SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
+
+ mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
+ AR9170_TX_MAC_BACKOFF);
+ mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
+ AR9170_TX_MAC_QOS);
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
+
+ if (info->control.hw_key) {
+ len += info->control.hw_key->icv_len;
+
+ switch (info->control.hw_key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
+ break;
+ default:
+ WARN_ON(1);
+ goto err_out;
+ }
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ unsigned int density, factor;
+
+ if (unlikely(!sta || !cvif))
+ goto err_out;
+
+ factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
+ density = sta->ht_cap.ampdu_density;
+
+ if (density) {
+ /*
+ * Watch out!
+ *
+ * Otus uses slightly different density values than
+ * those from the 802.11n spec.
+ */
+
+ density = max_t(unsigned int, density + 1, 7u);
+ }
+
+ SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
+ txc->s.ampdu_settings, density);
+
+ SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
+ txc->s.ampdu_settings, factor);
+ }
+
+ txc->s.len = cpu_to_le16(skb->len);
+ txc->f.length = cpu_to_le16(len + FCS_LEN);
+ txc->f.mac_control = mac_tmp;
+
+ arinfo = (void *)info->rate_driver_data;
+ arinfo->timeout = jiffies;
+ arinfo->ar = ar;
+ kref_init(&arinfo->ref);
+ return 0;
+
+err_out:
+ skb_pull(skb, sizeof(*txc));
+ return -EINVAL;
+}
+
+static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super;
+
+ super = (void *) skb->data;
+ super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
+}
+
+static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super;
+ int tmp;
+
+ super = (void *) skb->data;
+
+ tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
+ CARL9170_TX_SUPER_AMPDU_DENSITY_S;
+
+ /*
+ * If you haven't noticed carl9170_tx_prepare has already filled
+ * in all ampdu spacing & factor parameters.
+ * Now it's the time to check whenever the settings have to be
+ * updated by the firmware, or if everything is still the same.
+ *
+ * There's no sane way to handle different density values with
+ * this hardware, so we may as well just do the compare in the
+ * driver.
+ */
+
+ if (tmp != ar->current_density) {
+ ar->current_density = tmp;
+ super->s.ampdu_settings |=
+ CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
+ }
+
+ tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
+ CARL9170_TX_SUPER_AMPDU_FACTOR_S;
+
+ if (tmp != ar->current_factor) {
+ ar->current_factor = tmp;
+ super->s.ampdu_settings |=
+ CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
+ }
+}
+
+static void carl9170_tx_ampdu(struct ar9170 *ar)
+{
+ struct sk_buff_head agg;
+ struct carl9170_sta_tid *tid_info;
+ struct sk_buff *skb, *first;
+ struct ieee80211_tx_info *tx_info_first;
+ unsigned int i = 0, done_ampdus = 0;
+ u16 seq, queue, tmpssn;
+
+ atomic_inc(&ar->tx_ampdu_scheduler);
+ ar->tx_ampdu_schedule = false;
+
+ if (atomic_read(&ar->tx_ampdu_upload))
+ return;
+
+ if (!ar->tx_ampdu_list_len)
+ return;
+
+ __skb_queue_head_init(&agg);
+
+ rcu_read_lock();
+ tid_info = rcu_dereference(ar->tx_ampdu_iter);
+ if (WARN_ON_ONCE(!tid_info)) {
+ rcu_read_unlock();
+ return;
+ }
+
+retry:
+ list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
+ i++;
+
+ if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
+ continue;
+
+ queue = TID_TO_WME_AC(tid_info->tid);
+
+ spin_lock_bh(&tid_info->lock);
+ if (tid_info->state != CARL9170_TID_STATE_XMIT)
+ goto processed;
+
+ tid_info->counter++;
+ first = skb_peek(&tid_info->queue);
+ tmpssn = carl9170_get_seq(first);
+ seq = tid_info->snx;
+
+ if (unlikely(tmpssn != seq)) {
+ tid_info->state = CARL9170_TID_STATE_IDLE;
+
+ goto processed;
+ }
+
+ tx_info_first = NULL;
+ while ((skb = skb_peek(&tid_info->queue))) {
+ /* strict 0, 1, ..., n - 1, n frame sequence order */
+ if (unlikely(carl9170_get_seq(skb) != seq))
+ break;
+
+ /* don't upload more than AMPDU FACTOR allows. */
+ if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
+ (tid_info->max - 1)))
+ break;
+
+ if (!tx_info_first) {
+ carl9170_tx_get_rates(ar, tid_info->vif,
+ tid_info->sta, first);
+ tx_info_first = IEEE80211_SKB_CB(first);
+ }
+
+ carl9170_tx_apply_rateset(ar, tx_info_first, skb);
+
+ atomic_inc(&ar->tx_ampdu_upload);
+ tid_info->snx = seq = SEQ_NEXT(seq);
+ __skb_unlink(skb, &tid_info->queue);
+
+ __skb_queue_tail(&agg, skb);
+
+ if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
+ break;
+ }
+
+ if (skb_queue_empty(&tid_info->queue) ||
+ carl9170_get_seq(skb_peek(&tid_info->queue)) !=
+ tid_info->snx) {
+ /* stop TID, if A-MPDU frames are still missing,
+ * or whenever the queue is empty.
+ */
+
+ tid_info->state = CARL9170_TID_STATE_IDLE;
+ }
+ done_ampdus++;
+
+processed:
+ spin_unlock_bh(&tid_info->lock);
+
+ if (skb_queue_empty(&agg))
+ continue;
+
+ /* apply ampdu spacing & factor settings */
+ carl9170_set_ampdu_params(ar, skb_peek(&agg));
+
+ /* set aggregation push bit */
+ carl9170_set_immba(ar, skb_peek_tail(&agg));
+
+ spin_lock_bh(&ar->tx_pending[queue].lock);
+ skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
+ spin_unlock_bh(&ar->tx_pending[queue].lock);
+ ar->tx_schedule = true;
+ }
+ if ((done_ampdus++ == 0) && (i++ == 0))
+ goto retry;
+
+ rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
+ rcu_read_unlock();
+}
+
+static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
+ struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ struct carl9170_tx_info *arinfo;
+
+ BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
+
+ spin_lock_bh(&queue->lock);
+ skb = skb_peek(queue);
+ if (unlikely(!skb))
+ goto err_unlock;
+
+ if (carl9170_alloc_dev_space(ar, skb))
+ goto err_unlock;
+
+ __skb_unlink(skb, queue);
+ spin_unlock_bh(&queue->lock);
+
+ info = IEEE80211_SKB_CB(skb);
+ arinfo = (void *) info->rate_driver_data;
+
+ arinfo->timeout = jiffies;
+ return skb;
+
+err_unlock:
+ spin_unlock_bh(&queue->lock);
+ return NULL;
+}
+
+void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super;
+ uint8_t q = 0;
+
+ ar->tx_dropped++;
+
+ super = (void *)skb->data;
+ SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
+ ar9170_qmap[carl9170_get_queue(ar, skb)]);
+ __carl9170_tx_process_status(ar, super->s.cookie, q);
+}
+
+static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct carl9170_sta_info *sta_info;
+ struct ieee80211_tx_info *tx_info;
+
+ rcu_read_lock();
+ sta = __carl9170_get_tx_sta(ar, skb);
+ if (!sta)
+ goto out_rcu;
+
+ sta_info = (void *) sta->drv_priv;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (unlikely(sta_info->sleeping) &&
+ !(tx_info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
+ IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
+ rcu_read_unlock();
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ atomic_dec(&ar->tx_ampdu_upload);
+
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ carl9170_release_dev_space(ar, skb);
+ carl9170_tx_status(ar, skb, false);
+ return true;
+ }
+
+out_rcu:
+ rcu_read_unlock();
+ return false;
+}
+
+static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ skb->len >= sizeof(struct ieee80211_bar)) {
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!WARN_ON_ONCE(!entry)) {
+ entry->skb = skb;
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ }
+ }
+}
+
+static void carl9170_tx(struct ar9170 *ar)
+{
+ struct sk_buff *skb;
+ unsigned int i, q;
+ bool schedule_garbagecollector = false;
+
+ ar->tx_schedule = false;
+
+ if (unlikely(!IS_STARTED(ar)))
+ return;
+
+ carl9170_usb_handle_tx_err(ar);
+
+ for (i = 0; i < ar->hw->queues; i++) {
+ while (!skb_queue_empty(&ar->tx_pending[i])) {
+ skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
+ if (unlikely(!skb))
+ break;
+
+ if (unlikely(carl9170_tx_ps_drop(ar, skb)))
+ continue;
+
+ carl9170_bar_check(ar, skb);
+
+ atomic_inc(&ar->tx_total_pending);
+
+ q = __carl9170_get_queue(ar, i);
+ /*
+ * NB: tx_status[i] vs. tx_status[q],
+ * TODO: Move into pick_skb or alloc_dev_space.
+ */
+ skb_queue_tail(&ar->tx_status[q], skb);
+
+ /*
+ * increase ref count to "2".
+ * Ref counting is the easiest way to solve the
+ * race between the urb's completion routine:
+ * carl9170_tx_callback
+ * and wlan tx status functions:
+ * carl9170_tx_status/janitor.
+ */
+ carl9170_tx_get_skb(skb);
+
+ carl9170_usb_tx(ar, skb);
+ schedule_garbagecollector = true;
+ }
+ }
+
+ if (!schedule_garbagecollector)
+ return;
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
+ msecs_to_jiffies(CARL9170_TX_TIMEOUT));
+}
+
+static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ struct ieee80211_tx_info *txinfo)
+{
+ struct carl9170_sta_info *sta_info;
+ struct carl9170_sta_tid *agg;
+ struct sk_buff *iter;
+ u16 tid, seq, qseq, off;
+ bool run = false;
+
+ tid = carl9170_get_tid(skb);
+ seq = carl9170_get_seq(skb);
+ sta_info = (void *) sta->drv_priv;
+
+ rcu_read_lock();
+ agg = rcu_dereference(sta_info->agg[tid]);
+
+ if (!agg)
+ goto err_unlock_rcu;
+
+ spin_lock_bh(&agg->lock);
+ if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
+ goto err_unlock;
+
+ /* check if sequence is within the BA window */
+ if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
+ goto err_unlock;
+
+ if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
+ goto err_unlock;
+
+ off = SEQ_DIFF(seq, agg->bsn);
+ if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
+ goto err_unlock;
+
+ if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
+ __skb_queue_tail(&agg->queue, skb);
+ agg->hsn = seq;
+ goto queued;
+ }
+
+ skb_queue_reverse_walk(&agg->queue, iter) {
+ qseq = carl9170_get_seq(iter);
+
+ if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
+ __skb_queue_after(&agg->queue, iter, skb);
+ goto queued;
+ }
+ }
+
+ __skb_queue_head(&agg->queue, skb);
+queued:
+
+ if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
+ if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
+ agg->state = CARL9170_TID_STATE_XMIT;
+ run = true;
+ }
+ }
+
+ spin_unlock_bh(&agg->lock);
+ rcu_read_unlock();
+
+ return run;
+
+err_unlock:
+ spin_unlock_bh(&agg->lock);
+
+err_unlock_rcu:
+ rcu_read_unlock();
+ txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ carl9170_tx_status(ar, skb, false);
+ ar->tx_dropped++;
+ return false;
+}
+
+void carl9170_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ar9170 *ar = hw->priv;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_vif *vif;
+ bool run;
+
+ if (unlikely(!IS_STARTED(ar)))
+ goto err_free;
+
+ info = IEEE80211_SKB_CB(skb);
+ vif = info->control.vif;
+
+ if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
+ goto err_free;
+
+ carl9170_tx_accounting(ar, skb);
+ /*
+ * from now on, one has to use carl9170_tx_status to free
+ * all ressouces which are associated with the frame.
+ */
+
+ if (sta) {
+ struct carl9170_sta_info *stai = (void *) sta->drv_priv;
+ atomic_inc(&stai->pending_frames);
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ /* to static code analyzers and reviewers:
+ * mac80211 guarantees that a valid "sta"
+ * reference is present, if a frame is to
+ * be part of an ampdu. Hence any extra
+ * sta == NULL checks are redundant in this
+ * special case.
+ */
+ run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
+ if (run)
+ carl9170_tx_ampdu(ar);
+
+ } else {
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ carl9170_tx_get_rates(ar, vif, sta, skb);
+ carl9170_tx_apply_rateset(ar, info, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
+ }
+
+ carl9170_tx(ar);
+ return;
+
+err_free:
+ ar->tx_dropped++;
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+void carl9170_tx_scheduler(struct ar9170 *ar)
+{
+
+ if (ar->tx_ampdu_schedule)
+ carl9170_tx_ampdu(ar);
+
+ if (ar->tx_schedule)
+ carl9170_tx(ar);
+}
+
+/* caller has to take rcu_read_lock */
+static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
+{
+ struct carl9170_vif_info *cvif;
+ int i = 1;
+
+ /* The AR9170 hardware has no fancy beacon queue or some
+ * other scheduling mechanism. So, the driver has to make
+ * due by setting the two beacon timers (pretbtt and tbtt)
+ * once and then swapping the beacon address in the HW's
+ * register file each time the pretbtt fires.
+ */
+
+ cvif = rcu_dereference(ar->beacon_iter);
+ if (ar->vifs > 0 && cvif) {
+ do {
+ list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
+ list) {
+ if (cvif->active && cvif->enable_beacon)
+ goto out;
+ }
+ } while (ar->beacon_enabled && i--);
+ }
+
+out:
+ RCU_INIT_POINTER(ar->beacon_iter, cvif);
+ return cvif;
+}
+
+static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
+ u32 *ht1, u32 *plcp)
+{
+ struct ieee80211_tx_info *txinfo;
+ struct ieee80211_tx_rate *rate;
+ unsigned int power, chains;
+ bool ht_rate;
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ rate = &txinfo->control.rates[0];
+ ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS);
+ carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
+
+ *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
+ if (chains == AR9170_TX_PHY_TXCHAIN_2)
+ *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+ SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7);
+ SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power);
+ SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains);
+
+ if (ht_rate) {
+ *ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ *plcp |= AR9170_MAC_BCN_HT2_SGI;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ }
+
+ SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
+ } else {
+ if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M)
+ *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+ else
+ *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
+ }
+
+ return ht_rate;
+}
+
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+{
+ struct sk_buff *skb = NULL;
+ struct carl9170_vif_info *cvif;
+ __le32 *data, *old = NULL;
+ u32 word, ht1, plcp, off, addr, len;
+ int i = 0, err = 0;
+ bool ht_rate;
+
+ rcu_read_lock();
+ cvif = carl9170_pick_beaconing_vif(ar);
+ if (!cvif)
+ goto out_unlock;
+
+ skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
+ NULL, NULL);
+
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ spin_lock_bh(&ar->beacon_lock);
+ data = (__le32 *)skb->data;
+ if (cvif->beacon)
+ old = (__le32 *)cvif->beacon->data;
+
+ off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
+ addr = ar->fw.beacon_addr + off;
+ len = roundup(skb->len + FCS_LEN, 4);
+
+ if ((off + len) > ar->fw.beacon_max_len) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "beacon does not "
+ "fit into device memory!\n");
+ }
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (len > AR9170_MAC_BCN_LENGTH_MAX) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "no support for beacons "
+ "bigger than %d (yours:%d).\n",
+ AR9170_MAC_BCN_LENGTH_MAX, len);
+ }
+
+ err = -EMSGSIZE;
+ goto err_unlock;
+ }
+
+ ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
+
+ carl9170_async_regwrite_begin(ar);
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
+ if (ht_rate)
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
+ else
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
+
+ for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
+ /*
+ * XXX: This accesses beyond skb data for up
+ * to the last 3 bytes!!
+ */
+
+ if (old && (data[i] == old[i]))
+ continue;
+
+ word = le32_to_cpu(data[i]);
+ carl9170_async_regwrite(addr + 4 * i, word);
+ }
+ carl9170_async_regwrite_finish();
+
+ dev_kfree_skb_any(cvif->beacon);
+ cvif->beacon = NULL;
+
+ err = carl9170_async_regwrite_result();
+ if (!err)
+ cvif->beacon = skb;
+ spin_unlock_bh(&ar->beacon_lock);
+ if (err)
+ goto err_free;
+
+ if (submit) {
+ err = carl9170_bcn_ctrl(ar, cvif->id,
+ CARL9170_BCN_CTRL_CAB_TRIGGER,
+ addr, skb->len + FCS_LEN);
+
+ if (err)
+ goto err_free;
+ }
+out_unlock:
+ rcu_read_unlock();
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&ar->beacon_lock);
+
+err_free:
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ return err;
+}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
new file mode 100644
index 000000000..c9f93310c
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -0,0 +1,1201 @@
+/*
+ * Atheros CARL9170 driver
+ *
+ * USB - frontend
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <net/mac80211.h>
+#include "carl9170.h"
+#include "cmd.h"
+#include "hw.h"
+#include "fwcmd.h"
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@googlemail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
+MODULE_FIRMWARE(CARL9170FW_NAME);
+MODULE_ALIAS("ar9170usb");
+MODULE_ALIAS("arusb_lnx");
+
+/*
+ * Note:
+ *
+ * Always update our wiki's device list (located at:
+ * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ),
+ * whenever you add a new device.
+ */
+static struct usb_device_id carl9170_usb_ids[] = {
+ /* Atheros 9170 */
+ { USB_DEVICE(0x0cf3, 0x9170) },
+ /* Atheros TG121N */
+ { USB_DEVICE(0x0cf3, 0x1001) },
+ /* TP-Link TL-WN821N v2 */
+ { USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON |
+ CARL9170_ONE_LED },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
+ /* Cace Airpcap NX */
+ { USB_DEVICE(0xcace, 0x0300) },
+ /* D-Link DWA 160 A1 */
+ { USB_DEVICE(0x07d1, 0x3c10) },
+ /* D-Link DWA 160 A2 */
+ { USB_DEVICE(0x07d1, 0x3a09) },
+ /* D-Link DWA 130 D */
+ { USB_DEVICE(0x07d1, 0x3a0f) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
+ /* Netgear WNDA3100 (v1) */
+ { USB_DEVICE(0x0846, 0x9010) },
+ /* Netgear WN111 v2 */
+ { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
+ /* Zydas ZD1221 */
+ { USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
+ /* ZyXEL NWD271N */
+ { USB_DEVICE(0x0586, 0x3417) },
+ /* Z-Com UB81 BG */
+ { USB_DEVICE(0x0cde, 0x0023) },
+ /* Z-Com UB82 ABG */
+ { USB_DEVICE(0x0cde, 0x0026) },
+ /* Sphairon Homelink 1202 */
+ { USB_DEVICE(0x0cde, 0x0027) },
+ /* Arcadyan WN7512 */
+ { USB_DEVICE(0x083a, 0xf522) },
+ /* Planex GWUS300 */
+ { USB_DEVICE(0x2019, 0x5304) },
+ /* IO-Data WNGDNUS2 */
+ { USB_DEVICE(0x04bb, 0x093f) },
+ /* NEC WL300NU-G */
+ { USB_DEVICE(0x0409, 0x0249) },
+ /* NEC WL300NU-AG */
+ { USB_DEVICE(0x0409, 0x02b4) },
+ /* AVM FRITZ!WLAN USB Stick N */
+ { USB_DEVICE(0x057c, 0x8401) },
+ /* AVM FRITZ!WLAN USB Stick N 2.4 */
+ { USB_DEVICE(0x057c, 0x8402) },
+ /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
+ { USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
+
+ /* terminate */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+
+static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
+{
+ struct urb *urb;
+ int err;
+
+ if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS)
+ goto err_acc;
+
+ urb = usb_get_from_anchor(&ar->tx_wait);
+ if (!urb)
+ goto err_acc;
+
+ usb_anchor_urb(urb, &ar->tx_anch);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ if (net_ratelimit()) {
+ dev_err(&ar->udev->dev, "tx submit failed (%d)\n",
+ urb->status);
+ }
+
+ usb_unanchor_urb(urb);
+ usb_anchor_urb(urb, &ar->tx_err);
+ }
+
+ usb_free_urb(urb);
+
+ if (likely(err == 0))
+ return;
+
+err_acc:
+ atomic_dec(&ar->tx_anch_urbs);
+}
+
+static void carl9170_usb_tx_data_complete(struct urb *urb)
+{
+ struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+
+ if (WARN_ON_ONCE(!ar)) {
+ dev_kfree_skb_irq(urb->context);
+ return;
+ }
+
+ atomic_dec(&ar->tx_anch_urbs);
+
+ switch (urb->status) {
+ /* everything is fine */
+ case 0:
+ carl9170_tx_callback(ar, (void *)urb->context);
+ break;
+
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /*
+ * Defer the frame clean-up to the tasklet worker.
+ * This is necessary, because carl9170_tx_drop
+ * does not work in an irqsave context.
+ */
+ usb_anchor_urb(urb, &ar->tx_err);
+ return;
+
+ /* a random transmission error has occurred? */
+ default:
+ if (net_ratelimit()) {
+ dev_err(&ar->udev->dev, "tx failed (%d)\n",
+ urb->status);
+ }
+
+ usb_anchor_urb(urb, &ar->tx_err);
+ break;
+ }
+
+ if (likely(IS_STARTED(ar)))
+ carl9170_usb_submit_data_urb(ar);
+}
+
+static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar)
+{
+ struct urb *urb;
+ int err;
+
+ if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) {
+ atomic_dec(&ar->tx_cmd_urbs);
+ return 0;
+ }
+
+ urb = usb_get_from_anchor(&ar->tx_cmd);
+ if (!urb) {
+ atomic_dec(&ar->tx_cmd_urbs);
+ return 0;
+ }
+
+ usb_anchor_urb(urb, &ar->tx_anch);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ usb_unanchor_urb(urb);
+ atomic_dec(&ar->tx_cmd_urbs);
+ }
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static void carl9170_usb_cmd_complete(struct urb *urb)
+{
+ struct ar9170 *ar = urb->context;
+ int err = 0;
+
+ if (WARN_ON_ONCE(!ar))
+ return;
+
+ atomic_dec(&ar->tx_cmd_urbs);
+
+ switch (urb->status) {
+ /* everything is fine */
+ case 0:
+ break;
+
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ err = urb->status;
+ break;
+ }
+
+ if (!IS_INITIALIZED(ar))
+ return;
+
+ if (err)
+ dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err);
+
+ err = carl9170_usb_submit_cmd_urb(ar);
+ if (err)
+ dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err);
+}
+
+static void carl9170_usb_rx_irq_complete(struct urb *urb)
+{
+ struct ar9170 *ar = urb->context;
+
+ if (WARN_ON_ONCE(!ar))
+ return;
+
+ switch (urb->status) {
+ /* everything is fine */
+ case 0:
+ break;
+
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ goto resubmit;
+ }
+
+ /*
+ * While the carl9170 firmware does not use this EP, the
+ * firmware loader in the EEPROM unfortunately does.
+ * Therefore we need to be ready to handle out-of-band
+ * responses and traps in case the firmware crashed and
+ * the loader took over again.
+ */
+ carl9170_handle_command_response(ar, urb->transfer_buffer,
+ urb->actual_length);
+
+resubmit:
+ usb_anchor_urb(urb, &ar->rx_anch);
+ if (unlikely(usb_submit_urb(urb, GFP_ATOMIC)))
+ usb_unanchor_urb(urb);
+}
+
+static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
+{
+ struct urb *urb;
+ int err = 0, runs = 0;
+
+ while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) &&
+ (runs++ < AR9170_NUM_RX_URBS)) {
+ err = -ENOSPC;
+ urb = usb_get_from_anchor(&ar->rx_pool);
+ if (urb) {
+ usb_anchor_urb(urb, &ar->rx_anch);
+ err = usb_submit_urb(urb, gfp);
+ if (unlikely(err)) {
+ usb_unanchor_urb(urb);
+ usb_anchor_urb(urb, &ar->rx_pool);
+ } else {
+ atomic_dec(&ar->rx_pool_urbs);
+ atomic_inc(&ar->rx_anch_urbs);
+ }
+ usb_free_urb(urb);
+ }
+ }
+
+ return err;
+}
+
+static void carl9170_usb_rx_work(struct ar9170 *ar)
+{
+ struct urb *urb;
+ int i;
+
+ for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
+ urb = usb_get_from_anchor(&ar->rx_work);
+ if (!urb)
+ break;
+
+ atomic_dec(&ar->rx_work_urbs);
+ if (IS_INITIALIZED(ar)) {
+ carl9170_rx(ar, urb->transfer_buffer,
+ urb->actual_length);
+ }
+
+ usb_anchor_urb(urb, &ar->rx_pool);
+ atomic_inc(&ar->rx_pool_urbs);
+
+ usb_free_urb(urb);
+
+ carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
+ }
+}
+
+void carl9170_usb_handle_tx_err(struct ar9170 *ar)
+{
+ struct urb *urb;
+
+ while ((urb = usb_get_from_anchor(&ar->tx_err))) {
+ struct sk_buff *skb = (void *)urb->context;
+
+ carl9170_tx_drop(ar, skb);
+ carl9170_tx_callback(ar, skb);
+ usb_free_urb(urb);
+ }
+}
+
+static void carl9170_usb_tasklet(unsigned long data)
+{
+ struct ar9170 *ar = (struct ar9170 *) data;
+
+ if (!IS_INITIALIZED(ar))
+ return;
+
+ carl9170_usb_rx_work(ar);
+
+ /*
+ * Strictly speaking: The tx scheduler is not part of the USB system.
+ * But the rx worker returns frames back to the mac80211-stack and
+ * this is the _perfect_ place to generate the next transmissions.
+ */
+ if (IS_STARTED(ar))
+ carl9170_tx_scheduler(ar);
+}
+
+static void carl9170_usb_rx_complete(struct urb *urb)
+{
+ struct ar9170 *ar = (struct ar9170 *)urb->context;
+ int err;
+
+ if (WARN_ON_ONCE(!ar))
+ return;
+
+ atomic_dec(&ar->rx_anch_urbs);
+
+ switch (urb->status) {
+ case 0:
+ /* rx path */
+ usb_anchor_urb(urb, &ar->rx_work);
+ atomic_inc(&ar->rx_work_urbs);
+ break;
+
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /* handle disconnect events*/
+ return;
+
+ default:
+ /* handle all other errors */
+ usb_anchor_urb(urb, &ar->rx_pool);
+ atomic_inc(&ar->rx_pool_urbs);
+ break;
+ }
+
+ err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
+ if (unlikely(err)) {
+ /*
+ * usb_submit_rx_urb reported a problem.
+ * In case this is due to a rx buffer shortage,
+ * elevate the tasklet worker priority to
+ * the highest available level.
+ */
+ tasklet_hi_schedule(&ar->usb_tasklet);
+
+ if (atomic_read(&ar->rx_anch_urbs) == 0) {
+ /*
+ * The system is too slow to cope with
+ * the enormous workload. We have simply
+ * run out of active rx urbs and this
+ * unfortunately leads to an unpredictable
+ * device.
+ */
+
+ ieee80211_queue_work(ar->hw, &ar->ping_work);
+ }
+ } else {
+ /*
+ * Using anything less than _high_ priority absolutely
+ * kills the rx performance my UP-System...
+ */
+ tasklet_hi_schedule(&ar->usb_tasklet);
+ }
+}
+
+static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp)
+{
+ struct urb *urb;
+ void *buf;
+
+ buf = kmalloc(ar->fw.rx_size, gfp);
+ if (!buf)
+ return NULL;
+
+ urb = usb_alloc_urb(0, gfp);
+ if (!urb) {
+ kfree(buf);
+ return NULL;
+ }
+
+ usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev,
+ AR9170_USB_EP_RX), buf, ar->fw.rx_size,
+ carl9170_usb_rx_complete, ar);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ return urb;
+}
+
+static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar)
+{
+ struct urb *urb = NULL;
+ void *ibuf;
+ int err = -ENOMEM;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ goto out;
+
+ ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL);
+ if (!ibuf)
+ goto out;
+
+ usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev,
+ AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX,
+ carl9170_usb_rx_irq_complete, ar, 1);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, &ar->rx_anch);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err)
+ usb_unanchor_urb(urb);
+
+out:
+ usb_free_urb(urb);
+ return err;
+}
+
+static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar)
+{
+ struct urb *urb;
+ int i, err = -EINVAL;
+
+ /*
+ * The driver actively maintains a second shadow
+ * pool for inactive, but fully-prepared rx urbs.
+ *
+ * The pool should help the driver to master huge
+ * workload spikes without running the risk of
+ * undersupplying the hardware or wasting time by
+ * processing rx data (streams) inside the urb
+ * completion (hardirq context).
+ */
+ for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
+ urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ usb_anchor_urb(urb, &ar->rx_pool);
+ atomic_inc(&ar->rx_pool_urbs);
+ usb_free_urb(urb);
+ }
+
+ err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL);
+ if (err)
+ goto err_out;
+
+ /* the device now waiting for the firmware. */
+ carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
+ return 0;
+
+err_out:
+
+ usb_scuttle_anchored_urbs(&ar->rx_pool);
+ usb_scuttle_anchored_urbs(&ar->rx_work);
+ usb_kill_anchored_urbs(&ar->rx_anch);
+ return err;
+}
+
+static int carl9170_usb_flush(struct ar9170 *ar)
+{
+ struct urb *urb;
+ int ret, err = 0;
+
+ while ((urb = usb_get_from_anchor(&ar->tx_wait))) {
+ struct sk_buff *skb = (void *)urb->context;
+ carl9170_tx_drop(ar, skb);
+ carl9170_tx_callback(ar, skb);
+ usb_free_urb(urb);
+ }
+
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
+ if (ret == 0)
+ err = -ETIMEDOUT;
+
+ /* lets wait a while until the tx - queues are dried out */
+ ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
+ if (ret == 0)
+ err = -ETIMEDOUT;
+
+ usb_kill_anchored_urbs(&ar->tx_anch);
+ carl9170_usb_handle_tx_err(ar);
+
+ return err;
+}
+
+static void carl9170_usb_cancel_urbs(struct ar9170 *ar)
+{
+ int err;
+
+ carl9170_set_state(ar, CARL9170_UNKNOWN_STATE);
+
+ err = carl9170_usb_flush(ar);
+ if (err)
+ dev_err(&ar->udev->dev, "stuck tx urbs!\n");
+
+ usb_poison_anchored_urbs(&ar->tx_anch);
+ carl9170_usb_handle_tx_err(ar);
+ usb_poison_anchored_urbs(&ar->rx_anch);
+
+ tasklet_kill(&ar->usb_tasklet);
+
+ usb_scuttle_anchored_urbs(&ar->rx_work);
+ usb_scuttle_anchored_urbs(&ar->rx_pool);
+ usb_scuttle_anchored_urbs(&ar->tx_cmd);
+}
+
+int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
+ const bool free_buf)
+{
+ struct urb *urb;
+ int err = 0;
+
+ if (!IS_INITIALIZED(ar)) {
+ err = -EPERM;
+ goto err_free;
+ }
+
+ if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (ar->usb_ep_cmd_is_bulk)
+ usb_fill_bulk_urb(urb, ar->udev,
+ usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
+ cmd, cmd->hdr.len + 4,
+ carl9170_usb_cmd_complete, ar);
+ else
+ usb_fill_int_urb(urb, ar->udev,
+ usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
+ cmd, cmd->hdr.len + 4,
+ carl9170_usb_cmd_complete, ar, 1);
+
+ if (free_buf)
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, &ar->tx_cmd);
+ usb_free_urb(urb);
+
+ return carl9170_usb_submit_cmd_urb(ar);
+
+err_free:
+ if (free_buf)
+ kfree(cmd);
+
+ return err;
+}
+
+int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
+ unsigned int plen, void *payload, unsigned int outlen, void *out)
+{
+ int err = -ENOMEM;
+
+ if (!IS_ACCEPTING_CMD(ar))
+ return -EIO;
+
+ if (!(cmd & CARL9170_CMD_ASYNC_FLAG))
+ might_sleep();
+
+ ar->cmd.hdr.len = plen;
+ ar->cmd.hdr.cmd = cmd;
+ /* writing multiple regs fills this buffer already */
+ if (plen && payload != (u8 *)(ar->cmd.data))
+ memcpy(ar->cmd.data, payload, plen);
+
+ spin_lock_bh(&ar->cmd_lock);
+ ar->readbuf = (u8 *)out;
+ ar->readlen = outlen;
+ spin_unlock_bh(&ar->cmd_lock);
+
+ err = __carl9170_exec_cmd(ar, &ar->cmd, false);
+
+ if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
+ err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
+ if (err == 0) {
+ err = -ETIMEDOUT;
+ goto err_unbuf;
+ }
+
+ if (ar->readlen != outlen) {
+ err = -EMSGSIZE;
+ goto err_unbuf;
+ }
+ }
+
+ return 0;
+
+err_unbuf:
+ /* Maybe the device was removed in the moment we were waiting? */
+ if (IS_STARTED(ar)) {
+ dev_err(&ar->udev->dev, "no command feedback "
+ "received (%d).\n", err);
+
+ /* provide some maybe useful debug information */
+ print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE,
+ &ar->cmd, plen + 4);
+
+ carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT);
+ }
+
+ /* invalidate to avoid completing the next command prematurely */
+ spin_lock_bh(&ar->cmd_lock);
+ ar->readbuf = NULL;
+ ar->readlen = 0;
+ spin_unlock_bh(&ar->cmd_lock);
+
+ return err;
+}
+
+void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct ar9170_stream *tx_stream;
+ void *data;
+ unsigned int len;
+
+ if (!IS_STARTED(ar))
+ goto err_drop;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ goto err_drop;
+
+ if (ar->fw.tx_stream) {
+ tx_stream = (void *) (skb->data - sizeof(*tx_stream));
+
+ len = skb->len + sizeof(*tx_stream);
+ tx_stream->length = cpu_to_le16(len);
+ tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG);
+ data = tx_stream;
+ } else {
+ data = skb->data;
+ len = skb->len;
+ }
+
+ usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev,
+ AR9170_USB_EP_TX), data, len,
+ carl9170_usb_tx_data_complete, skb);
+
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
+ usb_anchor_urb(urb, &ar->tx_wait);
+
+ usb_free_urb(urb);
+
+ carl9170_usb_submit_data_urb(ar);
+ return;
+
+err_drop:
+ carl9170_tx_drop(ar, skb);
+ carl9170_tx_callback(ar, skb);
+}
+
+static void carl9170_release_firmware(struct ar9170 *ar)
+{
+ if (ar->fw.fw) {
+ release_firmware(ar->fw.fw);
+ memset(&ar->fw, 0, sizeof(ar->fw));
+ }
+}
+
+void carl9170_usb_stop(struct ar9170 *ar)
+{
+ int ret;
+
+ carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED);
+
+ ret = carl9170_usb_flush(ar);
+ if (ret)
+ dev_err(&ar->udev->dev, "kill pending tx urbs.\n");
+
+ usb_poison_anchored_urbs(&ar->tx_anch);
+ carl9170_usb_handle_tx_err(ar);
+
+ /* kill any pending command */
+ spin_lock_bh(&ar->cmd_lock);
+ ar->readlen = 0;
+ spin_unlock_bh(&ar->cmd_lock);
+ complete_all(&ar->cmd_wait);
+
+ /* This is required to prevent an early completion on _start */
+ reinit_completion(&ar->cmd_wait);
+
+ /*
+ * Note:
+ * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
+ * Else we would end up with a unresponsive device...
+ */
+}
+
+int carl9170_usb_open(struct ar9170 *ar)
+{
+ usb_unpoison_anchored_urbs(&ar->tx_anch);
+
+ carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
+ return 0;
+}
+
+static int carl9170_usb_load_firmware(struct ar9170 *ar)
+{
+ const u8 *data;
+ u8 *buf;
+ unsigned int transfer;
+ size_t len;
+ u32 addr;
+ int err = 0;
+
+ buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ data = ar->fw.fw->data;
+ len = ar->fw.fw->size;
+ addr = ar->fw.address;
+
+ /* this removes the miniboot image */
+ data += ar->fw.offset;
+ len -= ar->fw.offset;
+
+ while (len) {
+ transfer = min_t(unsigned int, len, 4096u);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
+ 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, 100);
+
+ if (err < 0) {
+ kfree(buf);
+ goto err_out;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
+ 0x31 /* FW DL COMPLETE */,
+ 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200);
+
+ if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) {
+ err = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ err = carl9170_echo_test(ar, 0x4a110123);
+ if (err)
+ goto err_out;
+
+ /* now, start the command response counter */
+ ar->cmd_seq = -1;
+
+ return 0;
+
+err_out:
+ dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err);
+ return err;
+}
+
+int carl9170_usb_restart(struct ar9170 *ar)
+{
+ int err = 0;
+
+ if (ar->intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ /*
+ * Disable the command response sequence counter check.
+ * We already know that the device/firmware is in a bad state.
+ * So, no extra points are awarded to anyone who reminds the
+ * driver about that.
+ */
+ ar->cmd_seq = -2;
+
+ err = carl9170_reboot(ar);
+
+ carl9170_usb_stop(ar);
+
+ if (err)
+ goto err_out;
+
+ tasklet_schedule(&ar->usb_tasklet);
+
+ /* The reboot procedure can take quite a while to complete. */
+ msleep(1100);
+
+ err = carl9170_usb_open(ar);
+ if (err)
+ goto err_out;
+
+ err = carl9170_usb_load_firmware(ar);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ carl9170_usb_cancel_urbs(ar);
+ return err;
+}
+
+void carl9170_usb_reset(struct ar9170 *ar)
+{
+ /*
+ * This is the last resort to get the device going again
+ * without any *user replugging action*.
+ *
+ * But there is a catch: usb_reset really is like a physical
+ * *reconnect*. The mac80211 state will be lost in the process.
+ * Therefore a userspace application, which is monitoring
+ * the link must step in.
+ */
+ carl9170_usb_cancel_urbs(ar);
+
+ carl9170_usb_stop(ar);
+
+ usb_queue_reset_device(ar->intf);
+}
+
+static int carl9170_usb_init_device(struct ar9170 *ar)
+{
+ int err;
+
+ /*
+ * The carl9170 firmware let's the driver know when it's
+ * ready for action. But we have to be prepared to gracefully
+ * handle all spurious [flushed] messages after each (re-)boot.
+ * Thus the command response counter remains disabled until it
+ * can be safely synchronized.
+ */
+ ar->cmd_seq = -2;
+
+ err = carl9170_usb_send_rx_irq_urb(ar);
+ if (err)
+ goto err_out;
+
+ err = carl9170_usb_init_rx_bulk_urbs(ar);
+ if (err)
+ goto err_unrx;
+
+ err = carl9170_usb_open(ar);
+ if (err)
+ goto err_unrx;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_usb_load_firmware(ar);
+ mutex_unlock(&ar->mutex);
+ if (err)
+ goto err_stop;
+
+ return 0;
+
+err_stop:
+ carl9170_usb_stop(ar);
+
+err_unrx:
+ carl9170_usb_cancel_urbs(ar);
+
+err_out:
+ return err;
+}
+
+static void carl9170_usb_firmware_failed(struct ar9170 *ar)
+{
+ struct device *parent = ar->udev->dev.parent;
+ struct usb_device *udev;
+
+ /*
+ * Store a copy of the usb_device pointer locally.
+ * This is because device_release_driver initiates
+ * carl9170_usb_disconnect, which in turn frees our
+ * driver context (ar).
+ */
+ udev = ar->udev;
+
+ complete(&ar->fw_load_wait);
+
+ /* unbind anything failed */
+ if (parent)
+ device_lock(parent);
+
+ device_release_driver(&udev->dev);
+ if (parent)
+ device_unlock(parent);
+
+ usb_put_dev(udev);
+}
+
+static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+{
+ int err;
+
+ err = carl9170_parse_firmware(ar);
+ if (err)
+ goto err_freefw;
+
+ err = carl9170_usb_init_device(ar);
+ if (err)
+ goto err_freefw;
+
+ err = carl9170_register(ar);
+
+ carl9170_usb_stop(ar);
+ if (err)
+ goto err_unrx;
+
+ complete(&ar->fw_load_wait);
+ usb_put_dev(ar->udev);
+ return;
+
+err_unrx:
+ carl9170_usb_cancel_urbs(ar);
+
+err_freefw:
+ carl9170_release_firmware(ar);
+ carl9170_usb_firmware_failed(ar);
+}
+
+static void carl9170_usb_firmware_step2(const struct firmware *fw,
+ void *context)
+{
+ struct ar9170 *ar = context;
+
+ if (fw) {
+ ar->fw.fw = fw;
+ carl9170_usb_firmware_finish(ar);
+ return;
+ }
+
+ dev_err(&ar->udev->dev, "firmware not found.\n");
+ carl9170_usb_firmware_failed(ar);
+}
+
+static int carl9170_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep;
+ struct ar9170 *ar;
+ struct usb_device *udev;
+ int i, err;
+
+ err = usb_reset_device(interface_to_usbdev(intf));
+ if (err)
+ return err;
+
+ ar = carl9170_alloc(sizeof(*ar));
+ if (IS_ERR(ar))
+ return PTR_ERR(ar);
+
+ udev = interface_to_usbdev(intf);
+ usb_get_dev(udev);
+ ar->udev = udev;
+ ar->intf = intf;
+ ar->features = id->driver_info;
+
+ /* We need to remember the type of endpoint 4 because it differs
+ * between high- and full-speed configuration. The high-speed
+ * configuration specifies it as interrupt and the full-speed
+ * configuration as bulk endpoint. This information is required
+ * later when sending urbs to that endpoint.
+ */
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
+ ep = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
+ usb_endpoint_dir_out(ep) &&
+ usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
+ ar->usb_ep_cmd_is_bulk = true;
+ }
+
+ usb_set_intfdata(intf, ar);
+ SET_IEEE80211_DEV(ar->hw, &intf->dev);
+
+ init_usb_anchor(&ar->rx_anch);
+ init_usb_anchor(&ar->rx_pool);
+ init_usb_anchor(&ar->rx_work);
+ init_usb_anchor(&ar->tx_wait);
+ init_usb_anchor(&ar->tx_anch);
+ init_usb_anchor(&ar->tx_cmd);
+ init_usb_anchor(&ar->tx_err);
+ init_completion(&ar->cmd_wait);
+ init_completion(&ar->fw_boot_wait);
+ init_completion(&ar->fw_load_wait);
+ tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet,
+ (unsigned long)ar);
+
+ atomic_set(&ar->tx_cmd_urbs, 0);
+ atomic_set(&ar->tx_anch_urbs, 0);
+ atomic_set(&ar->rx_work_urbs, 0);
+ atomic_set(&ar->rx_anch_urbs, 0);
+ atomic_set(&ar->rx_pool_urbs, 0);
+
+ usb_get_dev(ar->udev);
+
+ carl9170_set_state(ar, CARL9170_STOPPED);
+
+ err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+ &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+ if (err) {
+ usb_put_dev(udev);
+ usb_put_dev(udev);
+ carl9170_free(ar);
+ }
+ return err;
+}
+
+static void carl9170_usb_disconnect(struct usb_interface *intf)
+{
+ struct ar9170 *ar = usb_get_intfdata(intf);
+ struct usb_device *udev;
+
+ if (WARN_ON(!ar))
+ return;
+
+ udev = ar->udev;
+ wait_for_completion(&ar->fw_load_wait);
+
+ if (IS_INITIALIZED(ar)) {
+ carl9170_reboot(ar);
+ carl9170_usb_stop(ar);
+ }
+
+ carl9170_usb_cancel_urbs(ar);
+ carl9170_unregister(ar);
+
+ usb_set_intfdata(intf, NULL);
+
+ carl9170_release_firmware(ar);
+ carl9170_free(ar);
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int carl9170_usb_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ struct ar9170 *ar = usb_get_intfdata(intf);
+
+ if (!ar)
+ return -ENODEV;
+
+ carl9170_usb_cancel_urbs(ar);
+
+ return 0;
+}
+
+static int carl9170_usb_resume(struct usb_interface *intf)
+{
+ struct ar9170 *ar = usb_get_intfdata(intf);
+ int err;
+
+ if (!ar)
+ return -ENODEV;
+
+ usb_unpoison_anchored_urbs(&ar->rx_anch);
+ carl9170_set_state(ar, CARL9170_STOPPED);
+
+ /*
+ * The USB documentation demands that [for suspend] all traffic
+ * to and from the device has to stop. This would be fine, but
+ * there's a catch: the device[usb phy] does not come back.
+ *
+ * Upon resume the firmware will "kill" itself and the
+ * boot-code sorts out the magic voodoo.
+ * Not very nice, but there's not much what could go wrong.
+ */
+ msleep(1100);
+
+ err = carl9170_usb_init_device(ar);
+ if (err)
+ goto err_unrx;
+
+ return 0;
+
+err_unrx:
+ carl9170_usb_cancel_urbs(ar);
+
+ return err;
+}
+#endif /* CONFIG_PM */
+
+static struct usb_driver carl9170_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = carl9170_usb_probe,
+ .disconnect = carl9170_usb_disconnect,
+ .id_table = carl9170_usb_ids,
+ .soft_unbind = 1,
+#ifdef CONFIG_PM
+ .suspend = carl9170_usb_suspend,
+ .resume = carl9170_usb_resume,
+ .reset_resume = carl9170_usb_resume,
+#endif /* CONFIG_PM */
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(carl9170_driver);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
new file mode 100644
index 000000000..2282847d4
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -0,0 +1,7 @@
+#ifndef __CARL9170_SHARED_VERSION_H
+#define __CARL9170_SHARED_VERSION_H
+#define CARL9170FW_VERSION_YEAR 12
+#define CARL9170FW_VERSION_MONTH 12
+#define CARL9170FW_VERSION_DAY 15
+#define CARL9170FW_VERSION_GIT "1.9.7"
+#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
new file mode 100644
index 000000000..ea17995b3
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -0,0 +1,435 @@
+/*
+ * Shared Atheros AR9170 Header
+ *
+ * RX/TX meta descriptor format
+ *
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, see
+ * http://www.gnu.org/licenses/.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * Copyright (c) 2007-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __CARL9170_SHARED_WLAN_H
+#define __CARL9170_SHARED_WLAN_H
+
+#include "fwcmd.h"
+
+#define AR9170_RX_PHY_RATE_CCK_1M 0x0a
+#define AR9170_RX_PHY_RATE_CCK_2M 0x14
+#define AR9170_RX_PHY_RATE_CCK_5M 0x37
+#define AR9170_RX_PHY_RATE_CCK_11M 0x6e
+
+#define AR9170_ENC_ALG_NONE 0x0
+#define AR9170_ENC_ALG_WEP64 0x1
+#define AR9170_ENC_ALG_TKIP 0x2
+#define AR9170_ENC_ALG_AESCCMP 0x4
+#define AR9170_ENC_ALG_WEP128 0x5
+#define AR9170_ENC_ALG_WEP256 0x6
+#define AR9170_ENC_ALG_CENC 0x7
+
+#define AR9170_RX_ENC_SOFTWARE 0x8
+
+#define AR9170_RX_STATUS_MODULATION 0x03
+#define AR9170_RX_STATUS_MODULATION_S 0
+#define AR9170_RX_STATUS_MODULATION_CCK 0x00
+#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
+#define AR9170_RX_STATUS_MODULATION_HT 0x02
+#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
+
+/* depends on modulation */
+#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
+#define AR9170_RX_STATUS_GREENFIELD 0x08
+
+#define AR9170_RX_STATUS_MPDU 0x30
+#define AR9170_RX_STATUS_MPDU_S 4
+#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
+#define AR9170_RX_STATUS_MPDU_FIRST 0x20
+#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
+#define AR9170_RX_STATUS_MPDU_LAST 0x10
+
+#define AR9170_RX_STATUS_CONT_AGGR 0x40
+#define AR9170_RX_STATUS_TOTAL_ERROR 0x80
+
+#define AR9170_RX_ERROR_RXTO 0x01
+#define AR9170_RX_ERROR_OVERRUN 0x02
+#define AR9170_RX_ERROR_DECRYPT 0x04
+#define AR9170_RX_ERROR_FCS 0x08
+#define AR9170_RX_ERROR_WRONG_RA 0x10
+#define AR9170_RX_ERROR_PLCP 0x20
+#define AR9170_RX_ERROR_MMIC 0x40
+
+/* these are either-or */
+#define AR9170_TX_MAC_PROT_RTS 0x0001
+#define AR9170_TX_MAC_PROT_CTS 0x0002
+#define AR9170_TX_MAC_PROT 0x0003
+
+#define AR9170_TX_MAC_NO_ACK 0x0004
+/* if unset, MAC will only do SIFS space before frame */
+#define AR9170_TX_MAC_BACKOFF 0x0008
+#define AR9170_TX_MAC_BURST 0x0010
+#define AR9170_TX_MAC_AGGR 0x0020
+
+/* encryption is a two-bit field */
+#define AR9170_TX_MAC_ENCR_NONE 0x0000
+#define AR9170_TX_MAC_ENCR_RC4 0x0040
+#define AR9170_TX_MAC_ENCR_CENC 0x0080
+#define AR9170_TX_MAC_ENCR_AES 0x00c0
+
+#define AR9170_TX_MAC_MMIC 0x0100
+#define AR9170_TX_MAC_HW_DURATION 0x0200
+#define AR9170_TX_MAC_QOS_S 10
+#define AR9170_TX_MAC_QOS 0x0c00
+#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
+#define AR9170_TX_MAC_TXOP_RIFS 0x2000
+#define AR9170_TX_MAC_IMM_BA 0x4000
+
+/* either-or */
+#define AR9170_TX_PHY_MOD_CCK 0x00000000
+#define AR9170_TX_PHY_MOD_OFDM 0x00000001
+#define AR9170_TX_PHY_MOD_HT 0x00000002
+
+/* depends on modulation */
+#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
+#define AR9170_TX_PHY_GREENFIELD 0x00000004
+
+#define AR9170_TX_PHY_BW_S 3
+#define AR9170_TX_PHY_BW (3 << AR9170_TX_PHY_BW_SHIFT)
+#define AR9170_TX_PHY_BW_20MHZ 0
+#define AR9170_TX_PHY_BW_40MHZ 2
+#define AR9170_TX_PHY_BW_40MHZ_DUP 3
+
+#define AR9170_TX_PHY_TX_HEAVY_CLIP_S 6
+#define AR9170_TX_PHY_TX_HEAVY_CLIP (7 << \
+ AR9170_TX_PHY_TX_HEAVY_CLIP_S)
+
+#define AR9170_TX_PHY_TX_PWR_S 9
+#define AR9170_TX_PHY_TX_PWR (0x3f << \
+ AR9170_TX_PHY_TX_PWR_S)
+
+#define AR9170_TX_PHY_TXCHAIN_S 15
+#define AR9170_TX_PHY_TXCHAIN (7 << \
+ AR9170_TX_PHY_TXCHAIN_S)
+#define AR9170_TX_PHY_TXCHAIN_1 1
+/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
+#define AR9170_TX_PHY_TXCHAIN_2 5
+
+#define AR9170_TX_PHY_MCS_S 18
+#define AR9170_TX_PHY_MCS (0x7f << \
+ AR9170_TX_PHY_MCS_S)
+
+#define AR9170_TX_PHY_RATE_CCK_1M 0x0
+#define AR9170_TX_PHY_RATE_CCK_2M 0x1
+#define AR9170_TX_PHY_RATE_CCK_5M 0x2
+#define AR9170_TX_PHY_RATE_CCK_11M 0x3
+
+/* same as AR9170_RX_PHY_RATE */
+#define AR9170_TXRX_PHY_RATE_OFDM_6M 0xb
+#define AR9170_TXRX_PHY_RATE_OFDM_9M 0xf
+#define AR9170_TXRX_PHY_RATE_OFDM_12M 0xa
+#define AR9170_TXRX_PHY_RATE_OFDM_18M 0xe
+#define AR9170_TXRX_PHY_RATE_OFDM_24M 0x9
+#define AR9170_TXRX_PHY_RATE_OFDM_36M 0xd
+#define AR9170_TXRX_PHY_RATE_OFDM_48M 0x8
+#define AR9170_TXRX_PHY_RATE_OFDM_54M 0xc
+
+#define AR9170_TXRX_PHY_RATE_HT_MCS0 0x0
+#define AR9170_TXRX_PHY_RATE_HT_MCS1 0x1
+#define AR9170_TXRX_PHY_RATE_HT_MCS2 0x2
+#define AR9170_TXRX_PHY_RATE_HT_MCS3 0x3
+#define AR9170_TXRX_PHY_RATE_HT_MCS4 0x4
+#define AR9170_TXRX_PHY_RATE_HT_MCS5 0x5
+#define AR9170_TXRX_PHY_RATE_HT_MCS6 0x6
+#define AR9170_TXRX_PHY_RATE_HT_MCS7 0x7
+#define AR9170_TXRX_PHY_RATE_HT_MCS8 0x8
+#define AR9170_TXRX_PHY_RATE_HT_MCS9 0x9
+#define AR9170_TXRX_PHY_RATE_HT_MCS10 0xa
+#define AR9170_TXRX_PHY_RATE_HT_MCS11 0xb
+#define AR9170_TXRX_PHY_RATE_HT_MCS12 0xc
+#define AR9170_TXRX_PHY_RATE_HT_MCS13 0xd
+#define AR9170_TXRX_PHY_RATE_HT_MCS14 0xe
+#define AR9170_TXRX_PHY_RATE_HT_MCS15 0xf
+
+#define AR9170_TX_PHY_SHORT_GI 0x80000000
+
+#ifdef __CARL9170FW__
+struct ar9170_tx_hw_mac_control {
+ union {
+ struct {
+ /*
+ * Beware of compiler bugs in all gcc pre 4.4!
+ */
+
+ u8 erp_prot:2;
+ u8 no_ack:1;
+ u8 backoff:1;
+ u8 burst:1;
+ u8 ampdu:1;
+
+ u8 enc_mode:2;
+
+ u8 hw_mmic:1;
+ u8 hw_duration:1;
+
+ u8 qos_queue:2;
+
+ u8 disable_txop:1;
+ u8 txop_rifs:1;
+
+ u8 ba_end:1;
+ u8 probe:1;
+ } __packed;
+
+ __le16 set;
+ } __packed;
+} __packed;
+
+struct ar9170_tx_hw_phy_control {
+ union {
+ struct {
+ /*
+ * Beware of compiler bugs in all gcc pre 4.4!
+ */
+
+ u8 modulation:2;
+ u8 preamble:1;
+ u8 bandwidth:2;
+ u8:1;
+ u8 heavy_clip:3;
+ u8 tx_power:6;
+ u8 chains:3;
+ u8 mcs:7;
+ u8:6;
+ u8 short_gi:1;
+ } __packed;
+
+ __le32 set;
+ } __packed;
+} __packed;
+
+struct ar9170_tx_rate_info {
+ u8 tries:3;
+ u8 erp_prot:2;
+ u8 ampdu:1;
+ u8 free:2; /* free for use (e.g.:RIFS/TXOP/AMPDU) */
+} __packed;
+
+struct carl9170_tx_superdesc {
+ __le16 len;
+ u8 rix;
+ u8 cnt;
+ u8 cookie;
+ u8 ampdu_density:3;
+ u8 ampdu_factor:2;
+ u8 ampdu_commit_density:1;
+ u8 ampdu_commit_factor:1;
+ u8 ampdu_unused_bit:1;
+ u8 queue:2;
+ u8 assign_seq:1;
+ u8 vif_id:3;
+ u8 fill_in_tsf:1;
+ u8 cab:1;
+ u8 padding2;
+ struct ar9170_tx_rate_info ri[CARL9170_TX_MAX_RATES];
+ struct ar9170_tx_hw_phy_control rr[CARL9170_TX_MAX_RETRY_RATES];
+} __packed;
+
+struct ar9170_tx_hwdesc {
+ __le16 length;
+ struct ar9170_tx_hw_mac_control mac;
+ struct ar9170_tx_hw_phy_control phy;
+} __packed;
+
+struct ar9170_tx_frame {
+ struct ar9170_tx_hwdesc hdr;
+
+ union {
+ struct ieee80211_hdr i3e;
+ u8 payload[0];
+ } data;
+} __packed;
+
+struct carl9170_tx_superframe {
+ struct carl9170_tx_superdesc s;
+ struct ar9170_tx_frame f;
+} __packed __aligned(4);
+
+#endif /* __CARL9170FW__ */
+
+struct _ar9170_tx_hwdesc {
+ __le16 length;
+ __le16 mac_control;
+ __le32 phy_control;
+} __packed;
+
+#define CARL9170_TX_SUPER_AMPDU_DENSITY_S 0
+#define CARL9170_TX_SUPER_AMPDU_DENSITY 0x7
+#define CARL9170_TX_SUPER_AMPDU_FACTOR 0x18
+#define CARL9170_TX_SUPER_AMPDU_FACTOR_S 3
+#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY 0x20
+#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY_S 5
+#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR 0x40
+#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR_S 6
+
+#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
+#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
+#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
+#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
+#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
+#define CARL9170_TX_SUPER_MISC_CAB 0x80
+
+#define CARL9170_TX_SUPER_RI_TRIES 0x7
+#define CARL9170_TX_SUPER_RI_TRIES_S 0
+#define CARL9170_TX_SUPER_RI_ERP_PROT 0x18
+#define CARL9170_TX_SUPER_RI_ERP_PROT_S 3
+#define CARL9170_TX_SUPER_RI_AMPDU 0x20
+#define CARL9170_TX_SUPER_RI_AMPDU_S 5
+
+struct _carl9170_tx_superdesc {
+ __le16 len;
+ u8 rix;
+ u8 cnt;
+ u8 cookie;
+ u8 ampdu_settings;
+ u8 misc;
+ u8 padding;
+ u8 ri[CARL9170_TX_MAX_RATES];
+ __le32 rr[CARL9170_TX_MAX_RETRY_RATES];
+} __packed;
+
+struct _carl9170_tx_superframe {
+ struct _carl9170_tx_superdesc s;
+ struct _ar9170_tx_hwdesc f;
+ u8 frame_data[0];
+} __packed __aligned(4);
+
+#define CARL9170_TX_SUPERDESC_LEN 24
+#define AR9170_TX_HWDESC_LEN 8
+#define CARL9170_TX_SUPERFRAME_LEN (CARL9170_TX_SUPERDESC_LEN + \
+ AR9170_TX_HWDESC_LEN)
+
+struct ar9170_rx_head {
+ u8 plcp[12];
+} __packed;
+
+#define AR9170_RX_HEAD_LEN 12
+
+struct ar9170_rx_phystatus {
+ union {
+ struct {
+ u8 rssi_ant0, rssi_ant1, rssi_ant2,
+ rssi_ant0x, rssi_ant1x, rssi_ant2x,
+ rssi_combined;
+ } __packed;
+ u8 rssi[7];
+ } __packed;
+
+ u8 evm_stream0[6], evm_stream1[6];
+ u8 phy_err;
+} __packed;
+
+#define AR9170_RX_PHYSTATUS_LEN 20
+
+struct ar9170_rx_macstatus {
+ u8 SAidx, DAidx;
+ u8 error;
+ u8 status;
+} __packed;
+
+#define AR9170_RX_MACSTATUS_LEN 4
+
+struct ar9170_rx_frame_single {
+ struct ar9170_rx_head phy_head;
+ struct ieee80211_hdr i3e;
+ struct ar9170_rx_phystatus phy_tail;
+ struct ar9170_rx_macstatus macstatus;
+} __packed;
+
+struct ar9170_rx_frame_head {
+ struct ar9170_rx_head phy_head;
+ struct ieee80211_hdr i3e;
+ struct ar9170_rx_macstatus macstatus;
+} __packed;
+
+struct ar9170_rx_frame_middle {
+ struct ieee80211_hdr i3e;
+ struct ar9170_rx_macstatus macstatus;
+} __packed;
+
+struct ar9170_rx_frame_tail {
+ struct ieee80211_hdr i3e;
+ struct ar9170_rx_phystatus phy_tail;
+ struct ar9170_rx_macstatus macstatus;
+} __packed;
+
+struct ar9170_rx_frame {
+ union {
+ struct ar9170_rx_frame_single single;
+ struct ar9170_rx_frame_head head;
+ struct ar9170_rx_frame_middle middle;
+ struct ar9170_rx_frame_tail tail;
+ } __packed;
+} __packed;
+
+static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
+{
+ return (t->SAidx & 0xc0) >> 4 |
+ (t->DAidx & 0xc0) >> 6;
+}
+
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ * * The MAC does not aggregate frames on anything other
+ * than the first HW queue.
+ * * when an AMPDU is placed [in the first hw queue] and
+ * additional frames are already queued on a different
+ * hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
+enum ar9170_txq {
+ AR9170_TXQ_BK = 0, /* TXQ0 */
+ AR9170_TXQ_BE, /* TXQ1 */
+ AR9170_TXQ_VI, /* TXQ2 */
+ AR9170_TXQ_VO, /* TXQ3 */
+
+ __AR9170_NUM_TXQ,
+};
+
+#define AR9170_TXQ_DEPTH 32
+
+#endif /* __CARL9170_SHARED_WLAN_H */
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
new file mode 100644
index 000000000..508eccf5d
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath.h"
+
+const char *ath_opmode_to_string(enum nl80211_iftype opmode)
+{
+ switch (opmode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ return "UNSPEC";
+ case NL80211_IFTYPE_ADHOC:
+ return "ADHOC";
+ case NL80211_IFTYPE_STATION:
+ return "STATION";
+ case NL80211_IFTYPE_AP:
+ return "AP";
+ case NL80211_IFTYPE_AP_VLAN:
+ return "AP-VLAN";
+ case NL80211_IFTYPE_WDS:
+ return "WDS";
+ case NL80211_IFTYPE_MONITOR:
+ return "MONITOR";
+ case NL80211_IFTYPE_MESH_POINT:
+ return "MESH";
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return "P2P-CLIENT";
+ case NL80211_IFTYPE_P2P_GO:
+ return "P2P-GO";
+ default:
+ return "UNKNOWN";
+ }
+}
+EXPORT_SYMBOL(ath_opmode_to_string);
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
new file mode 100644
index 000000000..c657ca26a
--- /dev/null
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "dfs_pattern_detector.h"
+#include "dfs_pri_detector.h"
+#include "ath.h"
+
+/*
+ * tolerated deviation of radar time stamp in usecs on both sides
+ * TODO: this might need to be HW-dependent
+ */
+#define PRI_TOLERANCE 16
+
+/**
+ * struct radar_types - contains array of patterns defined for one DFS domain
+ * @domain: DFS regulatory domain
+ * @num_radar_types: number of radar types to follow
+ * @radar_types: radar types array
+ */
+struct radar_types {
+ enum nl80211_dfs_regions region;
+ u32 num_radar_types;
+ const struct radar_detector_specs *radar_types;
+};
+
+/* percentage on ppb threshold to trigger detection */
+#define MIN_PPB_THRESH 50
+#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
+/* percentage of pulse width tolerance */
+#define WIDTH_TOLERANCE 5
+#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
+#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
+
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ (PRF2PRI(PMAX) - PRI_TOLERANCE), \
+ (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, \
+}
+
+/* radar types as defined by ETSI EN-301-893 v1.5.1 */
+static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
+ ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
+ ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
+ ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
+ ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
+ ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
+ ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
+ ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
+};
+
+static const struct radar_types etsi_radar_types_v15 = {
+ .region = NL80211_DFS_ETSI,
+ .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15),
+ .radar_types = etsi_radar_ref_types_v15,
+};
+
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, \
+}
+
+static const struct radar_detector_specs fcc_radar_ref_types[] = {
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
+ FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
+ FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
+ FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types fcc_radar_types = {
+ .region = NL80211_DFS_FCC,
+ .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
+ .radar_types = fcc_radar_ref_types,
+};
+
+#define JP_PATTERN FCC_PATTERN
+static const struct radar_detector_specs jp_radar_ref_types[] = {
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types jp_radar_types = {
+ .region = NL80211_DFS_JP,
+ .num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
+ .radar_types = jp_radar_ref_types,
+};
+
+static const struct radar_types *dfs_domains[] = {
+ &etsi_radar_types_v15,
+ &fcc_radar_types,
+ &jp_radar_types,
+};
+
+/**
+ * get_dfs_domain_radar_types() - get radar types for a given DFS domain
+ * @param domain DFS domain
+ * @return radar_types ptr on success, NULL if DFS domain is not supported
+ */
+static const struct radar_types *
+get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
+{
+ u32 i;
+ for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
+ if (dfs_domains[i]->region == region)
+ return dfs_domains[i];
+ }
+ return NULL;
+}
+
+/**
+ * struct channel_detector - detector elements for a DFS channel
+ * @head: list_head
+ * @freq: frequency for this channel detector in MHz
+ * @detectors: array of dynamically created detector elements for this freq
+ *
+ * Channel detectors are required to provide multi-channel DFS detection, e.g.
+ * to support off-channel scanning. A pattern detector has a list of channels
+ * radar pulses have been reported for in the past.
+ */
+struct channel_detector {
+ struct list_head head;
+ u16 freq;
+ struct pri_detector **detectors;
+};
+
+/* channel_detector_reset() - reset detector lines for a given channel */
+static void channel_detector_reset(struct dfs_pattern_detector *dpd,
+ struct channel_detector *cd)
+{
+ u32 i;
+ if (cd == NULL)
+ return;
+ for (i = 0; i < dpd->num_radar_types; i++)
+ cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
+}
+
+/* channel_detector_exit() - destructor */
+static void channel_detector_exit(struct dfs_pattern_detector *dpd,
+ struct channel_detector *cd)
+{
+ u32 i;
+ if (cd == NULL)
+ return;
+ list_del(&cd->head);
+ for (i = 0; i < dpd->num_radar_types; i++) {
+ struct pri_detector *de = cd->detectors[i];
+ if (de != NULL)
+ de->exit(de);
+ }
+ kfree(cd->detectors);
+ kfree(cd);
+}
+
+static struct channel_detector *
+channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+{
+ u32 sz, i;
+ struct channel_detector *cd;
+
+ cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
+ if (cd == NULL)
+ goto fail;
+
+ INIT_LIST_HEAD(&cd->head);
+ cd->freq = freq;
+ sz = sizeof(cd->detectors) * dpd->num_radar_types;
+ cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ if (cd->detectors == NULL)
+ goto fail;
+
+ for (i = 0; i < dpd->num_radar_types; i++) {
+ const struct radar_detector_specs *rs = &dpd->radar_spec[i];
+ struct pri_detector *de = pri_detector_init(rs);
+ if (de == NULL)
+ goto fail;
+ cd->detectors[i] = de;
+ }
+ list_add(&cd->head, &dpd->channel_detectors);
+ return cd;
+
+fail:
+ ath_dbg(dpd->common, DFS,
+ "failed to allocate channel_detector for freq=%d\n", freq);
+ channel_detector_exit(dpd, cd);
+ return NULL;
+}
+
+/**
+ * channel_detector_get() - get channel detector for given frequency
+ * @param dpd instance pointer
+ * @param freq frequency in MHz
+ * @return pointer to channel detector on success, NULL otherwise
+ *
+ * Return existing channel detector for the given frequency or return a
+ * newly create one.
+ */
+static struct channel_detector *
+channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
+{
+ struct channel_detector *cd;
+ list_for_each_entry(cd, &dpd->channel_detectors, head) {
+ if (cd->freq == freq)
+ return cd;
+ }
+ return channel_detector_create(dpd, freq);
+}
+
+/*
+ * DFS Pattern Detector
+ */
+
+/* dpd_reset(): reset all channel detectors */
+static void dpd_reset(struct dfs_pattern_detector *dpd)
+{
+ struct channel_detector *cd;
+ if (!list_empty(&dpd->channel_detectors))
+ list_for_each_entry(cd, &dpd->channel_detectors, head)
+ channel_detector_reset(dpd, cd);
+
+}
+static void dpd_exit(struct dfs_pattern_detector *dpd)
+{
+ struct channel_detector *cd, *cd0;
+ if (!list_empty(&dpd->channel_detectors))
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
+ kfree(dpd);
+}
+
+static bool
+dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
+{
+ u32 i;
+ struct channel_detector *cd;
+
+ /*
+ * pulses received for a non-supported or un-initialized
+ * domain are treated as detected radars for fail-safety
+ */
+ if (dpd->region == NL80211_DFS_UNSET)
+ return true;
+
+ cd = channel_detector_get(dpd, event->freq);
+ if (cd == NULL)
+ return false;
+
+ dpd->last_pulse_ts = event->ts;
+ /* reset detector on time stamp wraparound, caused by TSF reset */
+ if (event->ts < dpd->last_pulse_ts)
+ dpd_reset(dpd);
+
+ /* do type individual pattern matching */
+ for (i = 0; i < dpd->num_radar_types; i++) {
+ struct pri_detector *pd = cd->detectors[i];
+ struct pri_sequence *ps = pd->add_pulse(pd, event);
+ if (ps != NULL) {
+ ath_dbg(dpd->common, DFS,
+ "DFS: radar found on freq=%d: id=%d, pri=%d, "
+ "count=%d, count_false=%d\n",
+ event->freq, pd->rs->type_id,
+ ps->pri, ps->count, ps->count_falses);
+ pd->reset(pd, dpd->last_pulse_ts);
+ return true;
+ }
+ }
+ return false;
+}
+
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+ return global_dfs_pool_stats;
+}
+
+static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
+ enum nl80211_dfs_regions region)
+{
+ const struct radar_types *rt;
+ struct channel_detector *cd, *cd0;
+
+ if (dpd->region == region)
+ return true;
+
+ dpd->region = NL80211_DFS_UNSET;
+
+ rt = get_dfs_domain_radar_types(region);
+ if (rt == NULL)
+ return false;
+
+ /* delete all channel detectors for previous DFS domain */
+ if (!list_empty(&dpd->channel_detectors))
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
+ dpd->radar_spec = rt->radar_types;
+ dpd->num_radar_types = rt->num_radar_types;
+
+ dpd->region = region;
+ return true;
+}
+
+static struct dfs_pattern_detector default_dpd = {
+ .exit = dpd_exit,
+ .set_dfs_domain = dpd_set_domain,
+ .add_pulse = dpd_add_pulse,
+ .get_stats = dpd_get_stats,
+ .region = NL80211_DFS_UNSET,
+};
+
+struct dfs_pattern_detector *
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region)
+{
+ struct dfs_pattern_detector *dpd;
+
+ if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ return NULL;
+
+ dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
+ if (dpd == NULL)
+ return NULL;
+
+ *dpd = default_dpd;
+ INIT_LIST_HEAD(&dpd->channel_detectors);
+
+ dpd->common = common;
+ if (dpd->set_dfs_domain(dpd, region))
+ return dpd;
+
+ ath_dbg(common, DFS,"Could not set DFS domain to %d", region);
+ kfree(dpd);
+ return NULL;
+}
+EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
new file mode 100644
index 000000000..dde2652b7
--- /dev/null
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DFS_PATTERN_DETECTOR_H
+#define DFS_PATTERN_DETECTOR_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/nl80211.h>
+
+/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+ u32 pool_reference;
+ u32 pulse_allocated;
+ u32 pulse_alloc_error;
+ u32 pulse_used;
+ u32 pseq_allocated;
+ u32 pseq_alloc_error;
+ u32 pseq_used;
+};
+
+/**
+ * struct pulse_event - describing pulses reported by PHY
+ * @ts: pulse time stamp in us
+ * @freq: channel frequency in MHz
+ * @width: pulse duration in us
+ * @rssi: rssi of radar event
+ */
+struct pulse_event {
+ u64 ts;
+ u16 freq;
+ u8 width;
+ u8 rssi;
+};
+
+/**
+ * struct radar_detector_specs - detector specs for a radar pattern type
+ * @type_id: pattern type, as defined by regulatory
+ * @width_min: minimum radar pulse width in [us]
+ * @width_max: maximum radar pulse width in [us]
+ * @pri_min: minimum pulse repetition interval in [us] (including tolerance)
+ * @pri_max: minimum pri in [us] (including tolerance)
+ * @num_pri: maximum number of different pri for this type
+ * @ppb: pulses per bursts for this type
+ * @ppb_thresh: number of pulses required to trigger detection
+ * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ */
+struct radar_detector_specs {
+ u8 type_id;
+ u8 width_min;
+ u8 width_max;
+ u16 pri_min;
+ u16 pri_max;
+ u8 num_pri;
+ u8 ppb;
+ u8 ppb_thresh;
+ u8 max_pri_tolerance;
+};
+
+/**
+ * struct dfs_pattern_detector - DFS pattern detector
+ * @exit(): destructor
+ * @set_dfs_domain(): set DFS domain, resets detector lines upon domain changes
+ * @add_pulse(): add radar pulse to detector, returns true on detection
+ * @region: active DFS region, NL80211_DFS_UNSET until set
+ * @num_radar_types: number of different radar types
+ * @last_pulse_ts: time stamp of last valid pulse in usecs
+ * @radar_detector_specs: array of radar detection specs
+ * @channel_detectors: list connecting channel_detector elements
+ */
+struct dfs_pattern_detector {
+ void (*exit)(struct dfs_pattern_detector *dpd);
+ bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd,
+ enum nl80211_dfs_regions region);
+ bool (*add_pulse)(struct dfs_pattern_detector *dpd,
+ struct pulse_event *pe);
+
+ struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
+ enum nl80211_dfs_regions region;
+ u8 num_radar_types;
+ u64 last_pulse_ts;
+ /* needed for ath_dbg() */
+ struct ath_common *common;
+
+ const struct radar_detector_specs *radar_spec;
+ struct list_head channel_detectors;
+};
+
+/**
+ * dfs_pattern_detector_init() - constructor for pattern detector class
+ * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
+ * @return instance pointer on success, NULL otherwise
+ */
+extern struct dfs_pattern_detector *
+dfs_pattern_detector_init(struct ath_common *common,
+ enum nl80211_dfs_regions region);
+#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
new file mode 100644
index 000000000..43b608178
--- /dev/null
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ath.h"
+#include "dfs_pattern_detector.h"
+#include "dfs_pri_detector.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
+
+/**
+ * struct pulse_elem - elements in pulse queue
+ * @ts: time stamp in usecs
+ */
+struct pulse_elem {
+ struct list_head head;
+ u64 ts;
+};
+
+/**
+ * pde_get_multiple() - get number of multiples considering a given tolerance
+ * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
+ */
+static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
+{
+ u32 remainder;
+ u32 factor;
+ u32 delta;
+
+ if (fraction == 0)
+ return 0;
+
+ delta = (val < fraction) ? (fraction - val) : (val - fraction);
+
+ if (delta <= tolerance)
+ /* val and fraction are within tolerance */
+ return 1;
+
+ factor = val / fraction;
+ remainder = val % fraction;
+ if (remainder > tolerance) {
+ /* no exact match */
+ if ((fraction - remainder) <= tolerance)
+ /* remainder is within tolerance */
+ factor++;
+ else
+ factor = 0;
+ }
+ return factor;
+}
+
+/**
+ * DOC: Singleton Pulse and Sequence Pools
+ *
+ * Instances of pri_sequence and pulse_elem are kept in singleton pools to
+ * reduce the number of dynamic allocations. They are shared between all
+ * instances and grow up to the peak number of simultaneously used objects.
+ *
+ * Memory is freed after all references to the pools are released.
+ */
+static u32 singleton_pool_references;
+static LIST_HEAD(pulse_pool);
+static LIST_HEAD(pseq_pool);
+static DEFINE_SPINLOCK(pool_lock);
+
+static void pool_register_ref(void)
+{
+ spin_lock_bh(&pool_lock);
+ singleton_pool_references++;
+ DFS_POOL_STAT_INC(pool_reference);
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_deregister_ref(void)
+{
+ spin_lock_bh(&pool_lock);
+ singleton_pool_references--;
+ DFS_POOL_STAT_DEC(pool_reference);
+ if (singleton_pool_references == 0) {
+ /* free singleton pools with no references left */
+ struct pri_sequence *ps, *ps0;
+ struct pulse_elem *p, *p0;
+
+ list_for_each_entry_safe(p, p0, &pulse_pool, head) {
+ list_del(&p->head);
+ DFS_POOL_STAT_DEC(pulse_allocated);
+ kfree(p);
+ }
+ list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
+ list_del(&ps->head);
+ DFS_POOL_STAT_DEC(pseq_allocated);
+ kfree(ps);
+ }
+ }
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pulse_elem(struct pulse_elem *pe)
+{
+ spin_lock_bh(&pool_lock);
+ list_add(&pe->head, &pulse_pool);
+ DFS_POOL_STAT_DEC(pulse_used);
+ spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pseq_elem(struct pri_sequence *pse)
+{
+ spin_lock_bh(&pool_lock);
+ list_add(&pse->head, &pseq_pool);
+ DFS_POOL_STAT_DEC(pseq_used);
+ spin_unlock_bh(&pool_lock);
+}
+
+static struct pri_sequence *pool_get_pseq_elem(void)
+{
+ struct pri_sequence *pse = NULL;
+ spin_lock_bh(&pool_lock);
+ if (!list_empty(&pseq_pool)) {
+ pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
+ list_del(&pse->head);
+ DFS_POOL_STAT_INC(pseq_used);
+ }
+ spin_unlock_bh(&pool_lock);
+ return pse;
+}
+
+static struct pulse_elem *pool_get_pulse_elem(void)
+{
+ struct pulse_elem *pe = NULL;
+ spin_lock_bh(&pool_lock);
+ if (!list_empty(&pulse_pool)) {
+ pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
+ list_del(&pe->head);
+ DFS_POOL_STAT_INC(pulse_used);
+ }
+ spin_unlock_bh(&pool_lock);
+ return pe;
+}
+
+static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
+{
+ struct list_head *l = &pde->pulses;
+ if (list_empty(l))
+ return NULL;
+ return list_entry(l->prev, struct pulse_elem, head);
+}
+
+static bool pulse_queue_dequeue(struct pri_detector *pde)
+{
+ struct pulse_elem *p = pulse_queue_get_tail(pde);
+ if (p != NULL) {
+ list_del_init(&p->head);
+ pde->count--;
+ /* give it back to pool */
+ pool_put_pulse_elem(p);
+ }
+ return (pde->count > 0);
+}
+
+/* remove pulses older than window */
+static void pulse_queue_check_window(struct pri_detector *pde)
+{
+ u64 min_valid_ts;
+ struct pulse_elem *p;
+
+ /* there is no delta time with less than 2 pulses */
+ if (pde->count < 2)
+ return;
+
+ if (pde->last_ts <= pde->window_size)
+ return;
+
+ min_valid_ts = pde->last_ts - pde->window_size;
+ while ((p = pulse_queue_get_tail(pde)) != NULL) {
+ if (p->ts >= min_valid_ts)
+ return;
+ pulse_queue_dequeue(pde);
+ }
+}
+
+static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
+{
+ struct pulse_elem *p = pool_get_pulse_elem();
+ if (p == NULL) {
+ p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ if (p == NULL) {
+ DFS_POOL_STAT_INC(pulse_alloc_error);
+ return false;
+ }
+ DFS_POOL_STAT_INC(pulse_allocated);
+ DFS_POOL_STAT_INC(pulse_used);
+ }
+ INIT_LIST_HEAD(&p->head);
+ p->ts = ts;
+ list_add(&p->head, &pde->pulses);
+ pde->count++;
+ pde->last_ts = ts;
+ pulse_queue_check_window(pde);
+ if (pde->count >= pde->max_count)
+ pulse_queue_dequeue(pde);
+ return true;
+}
+
+static bool pseq_handler_create_sequences(struct pri_detector *pde,
+ u64 ts, u32 min_count)
+{
+ struct pulse_elem *p;
+ list_for_each_entry(p, &pde->pulses, head) {
+ struct pri_sequence ps, *new_ps;
+ struct pulse_elem *p2;
+ u32 tmp_false_count;
+ u64 min_valid_ts;
+ u32 delta_ts = ts - p->ts;
+
+ if (delta_ts < pde->rs->pri_min)
+ /* ignore too small pri */
+ continue;
+
+ if (delta_ts > pde->rs->pri_max)
+ /* stop on too large pri (sorted list) */
+ break;
+
+ /* build a new sequence with new potential pri */
+ ps.count = 2;
+ ps.count_falses = 0;
+ ps.first_ts = p->ts;
+ ps.last_ts = ts;
+ ps.pri = ts - p->ts;
+ ps.dur = ps.pri * (pde->rs->ppb - 1)
+ + 2 * pde->rs->max_pri_tolerance;
+
+ p2 = p;
+ tmp_false_count = 0;
+ min_valid_ts = ts - ps.dur;
+ /* check which past pulses are candidates for new sequence */
+ list_for_each_entry_continue(p2, &pde->pulses, head) {
+ u32 factor;
+ if (p2->ts < min_valid_ts)
+ /* stop on crossing window border */
+ break;
+ /* check if pulse match (multi)PRI */
+ factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
+ pde->rs->max_pri_tolerance);
+ if (factor > 0) {
+ ps.count++;
+ ps.first_ts = p2->ts;
+ /*
+ * on match, add the intermediate falses
+ * and reset counter
+ */
+ ps.count_falses += tmp_false_count;
+ tmp_false_count = 0;
+ } else {
+ /* this is a potential false one */
+ tmp_false_count++;
+ }
+ }
+ if (ps.count < min_count)
+ /* did not reach minimum count, drop sequence */
+ continue;
+
+ /* this is a valid one, add it */
+ ps.deadline_ts = ps.first_ts + ps.dur;
+ new_ps = pool_get_pseq_elem();
+ if (new_ps == NULL) {
+ new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
+ if (new_ps == NULL) {
+ DFS_POOL_STAT_INC(pseq_alloc_error);
+ return false;
+ }
+ DFS_POOL_STAT_INC(pseq_allocated);
+ DFS_POOL_STAT_INC(pseq_used);
+ }
+ memcpy(new_ps, &ps, sizeof(ps));
+ INIT_LIST_HEAD(&new_ps->head);
+ list_add(&new_ps->head, &pde->sequences);
+ }
+ return true;
+}
+
+/* check new ts and add to all matching existing sequences */
+static u32
+pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
+{
+ u32 max_count = 0;
+ struct pri_sequence *ps, *ps2;
+ list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
+ u32 delta_ts;
+ u32 factor;
+
+ /* first ensure that sequence is within window */
+ if (ts > ps->deadline_ts) {
+ list_del_init(&ps->head);
+ pool_put_pseq_elem(ps);
+ continue;
+ }
+
+ delta_ts = ts - ps->last_ts;
+ factor = pde_get_multiple(delta_ts, ps->pri,
+ pde->rs->max_pri_tolerance);
+ if (factor > 0) {
+ ps->last_ts = ts;
+ ps->count++;
+
+ if (max_count < ps->count)
+ max_count = ps->count;
+ } else {
+ ps->count_falses++;
+ }
+ }
+ return max_count;
+}
+
+static struct pri_sequence *
+pseq_handler_check_detection(struct pri_detector *pde)
+{
+ struct pri_sequence *ps;
+
+ if (list_empty(&pde->sequences))
+ return NULL;
+
+ list_for_each_entry(ps, &pde->sequences, head) {
+ /*
+ * we assume to have enough matching confidence if we
+ * 1) have enough pulses
+ * 2) have more matching than false pulses
+ */
+ if ((ps->count >= pde->rs->ppb_thresh) &&
+ (ps->count * pde->rs->num_pri >= ps->count_falses))
+ return ps;
+ }
+ return NULL;
+}
+
+
+/* free pulse queue and sequences list and give objects back to pools */
+static void pri_detector_reset(struct pri_detector *pde, u64 ts)
+{
+ struct pri_sequence *ps, *ps0;
+ struct pulse_elem *p, *p0;
+ list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
+ list_del_init(&ps->head);
+ pool_put_pseq_elem(ps);
+ }
+ list_for_each_entry_safe(p, p0, &pde->pulses, head) {
+ list_del_init(&p->head);
+ pool_put_pulse_elem(p);
+ }
+ pde->count = 0;
+ pde->last_ts = ts;
+}
+
+static void pri_detector_exit(struct pri_detector *de)
+{
+ pri_detector_reset(de, 0);
+ pool_deregister_ref();
+ kfree(de);
+}
+
+static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
+ struct pulse_event *event)
+{
+ u32 max_updated_seq;
+ struct pri_sequence *ps;
+ u64 ts = event->ts;
+ const struct radar_detector_specs *rs = de->rs;
+
+ /* ignore pulses not within width range */
+ if ((rs->width_min > event->width) || (rs->width_max < event->width))
+ return NULL;
+
+ if ((ts - de->last_ts) < rs->max_pri_tolerance)
+ /* if delta to last pulse is too short, don't use this pulse */
+ return NULL;
+ de->last_ts = ts;
+
+ max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
+
+ if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
+ pri_detector_reset(de, ts);
+ return NULL;
+ }
+
+ ps = pseq_handler_check_detection(de);
+
+ if (ps == NULL)
+ pulse_queue_enqueue(de, ts);
+
+ return ps;
+}
+
+struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
+{
+ struct pri_detector *de;
+
+ de = kzalloc(sizeof(*de), GFP_ATOMIC);
+ if (de == NULL)
+ return NULL;
+ de->exit = pri_detector_exit;
+ de->add_pulse = pri_detector_add_pulse;
+ de->reset = pri_detector_reset;
+
+ INIT_LIST_HEAD(&de->sequences);
+ INIT_LIST_HEAD(&de->pulses);
+ de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
+ de->max_count = rs->ppb * 2;
+ de->rs = rs;
+
+ pool_register_ref();
+ return de;
+}
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h
new file mode 100644
index 000000000..79f0fff4d
--- /dev/null
+++ b/drivers/net/wireless/ath/dfs_pri_detector.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DFS_PRI_DETECTOR_H
+#define DFS_PRI_DETECTOR_H
+
+#include <linux/list.h>
+
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
+/**
+ * struct pri_sequence - sequence of pulses matching one PRI
+ * @head: list_head
+ * @pri: pulse repetition interval (PRI) in usecs
+ * @dur: duration of sequence in usecs
+ * @count: number of pulses in this sequence
+ * @count_falses: number of not matching pulses in this sequence
+ * @first_ts: time stamp of first pulse in usecs
+ * @last_ts: time stamp of last pulse in usecs
+ * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
+ */
+struct pri_sequence {
+ struct list_head head;
+ u32 pri;
+ u32 dur;
+ u32 count;
+ u32 count_falses;
+ u64 first_ts;
+ u64 last_ts;
+ u64 deadline_ts;
+};
+
+/**
+ * struct pri_detector - PRI detector element for a dedicated radar type
+ * @exit(): destructor
+ * @add_pulse(): add pulse event, returns pri_sequence if pattern was detected
+ * @reset(): clear states and reset to given time stamp
+ * @rs: detector specs for this detector element
+ * @last_ts: last pulse time stamp considered for this element in usecs
+ * @sequences: list_head holding potential pulse sequences
+ * @pulses: list connecting pulse_elem objects
+ * @count: number of pulses in queue
+ * @max_count: maximum number of pulses to be queued
+ * @window_size: window size back from newest pulse time stamp in usecs
+ */
+struct pri_detector {
+ void (*exit) (struct pri_detector *de);
+ struct pri_sequence *
+ (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
+ void (*reset) (struct pri_detector *de, u64 ts);
+
+/* private: internal use only */
+ const struct radar_detector_specs *rs;
+ u64 last_ts;
+ struct list_head sequences;
+ struct list_head pulses;
+ u32 count;
+ u32 max_count;
+ u32 window_size;
+};
+
+struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs);
+
+#endif /* DFS_PRI_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
new file mode 100644
index 000000000..eae9abf54
--- /dev/null
+++ b/drivers/net/wireless/ath/hw.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+#include "ath.h"
+#include "reg.h"
+
+#define REG_READ (common->ops->read)
+#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
+
+/**
+ * ath_hw_set_bssid_mask - filter out bssids we listen
+ *
+ * @common: the ath_common struct for the device.
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * NOTE: This is a simple filter and does *not* filter out all
+ * relevant frames. Some frames that are not for us might get ACKed from us
+ * by PCU because they just match the mask.
+ *
+ * When handling multiple BSSes you can get the BSSID mask by computing the
+ * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
+ *
+ * When you do this you are essentially computing the common bits of all your
+ * BSSes. Later it is assumed the hardware will "and" (&) the BSSID mask with
+ * the MAC address to obtain the relevant bits and compare the result with
+ * (frame's BSSID & mask) to see if they match.
+ *
+ * Simple example: on your card you have have two BSSes you have created with
+ * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
+ * There is another BSSID-03 but you are not part of it. For simplicity's sake,
+ * assuming only 4 bits for a mac address and for BSSIDs you can then have:
+ *
+ * \
+ * MAC: 0001 |
+ * BSSID-01: 0100 | --> Belongs to us
+ * BSSID-02: 1001 |
+ * /
+ * -------------------
+ * BSSID-03: 0110 | --> External
+ * -------------------
+ *
+ * Our bssid_mask would then be:
+ *
+ * On loop iteration for BSSID-01:
+ * ~(0001 ^ 0100) -> ~(0101)
+ * -> 1010
+ * bssid_mask = 1010
+ *
+ * On loop iteration for BSSID-02:
+ * bssid_mask &= ~(0001 ^ 1001)
+ * bssid_mask = (1010) & ~(0001 ^ 1001)
+ * bssid_mask = (1010) & ~(1000)
+ * bssid_mask = (1010) & (0111)
+ * bssid_mask = 0010
+ *
+ * A bssid_mask of 0010 means "only pay attention to the second least
+ * significant bit". This is because its the only bit common
+ * amongst the MAC and all BSSIDs we support. To findout what the real
+ * common bit is we can simply "&" the bssid_mask now with any BSSID we have
+ * or our MAC address (we assume the hardware uses the MAC address).
+ *
+ * Now, suppose there's an incoming frame for BSSID-03:
+ *
+ * IFRAME-01: 0110
+ *
+ * An easy eye-inspeciton of this already should tell you that this frame
+ * will not pass our check. This is because the bssid_mask tells the
+ * hardware to only look at the second least significant bit and the
+ * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
+ * as 1, which does not match 0.
+ *
+ * So with IFRAME-01 we *assume* the hardware will do:
+ *
+ * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
+ * --> allow = (0010) == 0000 ? 1 : 0;
+ * --> allow = 0
+ *
+ * Lets now test a frame that should work:
+ *
+ * IFRAME-02: 0001 (we should allow)
+ *
+ * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
+ * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
+ * --> allow = (0000) == (0000)
+ * --> allow = 1
+ *
+ * Other examples:
+ *
+ * IFRAME-03: 0100 --> allowed
+ * IFRAME-04: 1001 --> allowed
+ * IFRAME-05: 1101 --> allowed but its not for us!!!
+ *
+ */
+void ath_hw_setbssidmask(struct ath_common *common)
+{
+ void *ah = common->ah;
+ u32 id1;
+
+ REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
+ id1 = REG_READ(ah, AR_STA_ID1) & ~AR_STA_ID1_SADH_MASK;
+ id1 |= get_unaligned_le16(common->macaddr + 4);
+ REG_WRITE(ah, AR_STA_ID1, id1);
+
+ REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
+ REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
+}
+EXPORT_SYMBOL(ath_hw_setbssidmask);
+
+
+/**
+ * ath_hw_cycle_counters_update - common function to update cycle counters
+ *
+ * @common: the ath_common struct for the device.
+ *
+ * This function is used to update all cycle counters in one place.
+ * It has to be called while holding common->cc_lock!
+ */
+void ath_hw_cycle_counters_update(struct ath_common *common)
+{
+ u32 cycles, busy, rx, tx;
+ void *ah = common->ah;
+
+ /* freeze */
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
+
+ /* read */
+ cycles = REG_READ(ah, AR_CCCNT);
+ busy = REG_READ(ah, AR_RCCNT);
+ rx = REG_READ(ah, AR_RFCNT);
+ tx = REG_READ(ah, AR_TFCNT);
+
+ /* clear */
+ REG_WRITE(ah, AR_CCCNT, 0);
+ REG_WRITE(ah, AR_RFCNT, 0);
+ REG_WRITE(ah, AR_RCCNT, 0);
+ REG_WRITE(ah, AR_TFCNT, 0);
+
+ /* unfreeze */
+ REG_WRITE(ah, AR_MIBC, 0);
+
+ /* update all cycle counters here */
+ common->cc_ani.cycles += cycles;
+ common->cc_ani.rx_busy += busy;
+ common->cc_ani.rx_frame += rx;
+ common->cc_ani.tx_frame += tx;
+
+ common->cc_survey.cycles += cycles;
+ common->cc_survey.rx_busy += busy;
+ common->cc_survey.rx_frame += rx;
+ common->cc_survey.tx_frame += tx;
+}
+EXPORT_SYMBOL(ath_hw_cycle_counters_update);
+
+int32_t ath_hw_get_listen_time(struct ath_common *common)
+{
+ struct ath_cycle_counters *cc = &common->cc_ani;
+ int32_t listen_time;
+
+ listen_time = (cc->cycles - cc->rx_frame - cc->tx_frame) /
+ (common->clockrate * 1000);
+
+ memset(cc, 0, sizeof(*cc));
+
+ return listen_time;
+}
+EXPORT_SYMBOL(ath_hw_get_listen_time);
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
new file mode 100644
index 000000000..1816b4e7d
--- /dev/null
+++ b/drivers/net/wireless/ath/key.c
@@ -0,0 +1,609 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "ath.h"
+#include "reg.h"
+
+#define REG_READ (common->ops->read)
+#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ if (common->ops->enable_write_buffer) \
+ common->ops->enable_write_buffer((_ah));
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ if (common->ops->write_flush) \
+ common->ops->write_flush((_ah));
+
+
+#define IEEE80211_WEP_NKID 4 /* number of key ids */
+
+/************************/
+/* Key Cache Management */
+/************************/
+
+bool ath_hw_keyreset(struct ath_common *common, u16 entry)
+{
+ u32 keyType;
+ void *ah = common->ah;
+
+ if (entry >= common->keymax) {
+ ath_err(common, "keyreset: keycache entry %u out of range\n",
+ entry);
+ return false;
+ }
+
+ keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
+
+ if (keyType == AR_KEYTABLE_TYPE_TKIP) {
+ u16 micentry = entry + 64;
+
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+ }
+
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ return true;
+}
+EXPORT_SYMBOL(ath_hw_keyreset);
+
+static bool ath_hw_keysetmac(struct ath_common *common,
+ u16 entry, const u8 *mac)
+{
+ u32 macHi, macLo;
+ u32 unicast_flag = AR_KEYTABLE_VALID;
+ void *ah = common->ah;
+
+ if (entry >= common->keymax) {
+ ath_err(common, "keysetmac: keycache entry %u out of range\n",
+ entry);
+ return false;
+ }
+
+ if (mac != NULL) {
+ /*
+ * AR_KEYTABLE_VALID indicates that the address is a unicast
+ * address, which must match the transmitter address for
+ * decrypting frames.
+ * Not setting this bit allows the hardware to use the key
+ * for multicast frame decryption.
+ */
+ if (mac[0] & 0x01)
+ unicast_flag = 0;
+
+ macLo = get_unaligned_le32(mac);
+ macHi = get_unaligned_le16(mac + 4);
+ macLo >>= 1;
+ macLo |= (macHi & 1) << 31;
+ macHi >>= 1;
+ } else {
+ macLo = macHi = 0;
+ }
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ return true;
+}
+
+static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
+ const struct ath_keyval *k,
+ const u8 *mac)
+{
+ void *ah = common->ah;
+ u32 key0, key1, key2, key3, key4;
+ u32 keyType;
+
+ if (entry >= common->keymax) {
+ ath_err(common, "set-entry: keycache entry %u out of range\n",
+ entry);
+ return false;
+ }
+
+ switch (k->kv_type) {
+ case ATH_CIPHER_AES_OCB:
+ keyType = AR_KEYTABLE_TYPE_AES;
+ break;
+ case ATH_CIPHER_AES_CCM:
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
+ ath_dbg(common, ANY,
+ "AES-CCM not supported by this mac rev\n");
+ return false;
+ }
+ keyType = AR_KEYTABLE_TYPE_CCM;
+ break;
+ case ATH_CIPHER_TKIP:
+ keyType = AR_KEYTABLE_TYPE_TKIP;
+ if (entry + 64 >= common->keymax) {
+ ath_dbg(common, ANY,
+ "entry %u inappropriate for TKIP\n", entry);
+ return false;
+ }
+ break;
+ case ATH_CIPHER_WEP:
+ if (k->kv_len < WLAN_KEY_LEN_WEP40) {
+ ath_dbg(common, ANY, "WEP key length %u too small\n",
+ k->kv_len);
+ return false;
+ }
+ if (k->kv_len <= WLAN_KEY_LEN_WEP40)
+ keyType = AR_KEYTABLE_TYPE_40;
+ else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
+ keyType = AR_KEYTABLE_TYPE_104;
+ else
+ keyType = AR_KEYTABLE_TYPE_128;
+ break;
+ case ATH_CIPHER_CLR:
+ keyType = AR_KEYTABLE_TYPE_CLR;
+ break;
+ default:
+ ath_err(common, "cipher %u not supported\n", k->kv_type);
+ return false;
+ }
+
+ key0 = get_unaligned_le32(k->kv_val + 0);
+ key1 = get_unaligned_le16(k->kv_val + 4);
+ key2 = get_unaligned_le32(k->kv_val + 6);
+ key3 = get_unaligned_le16(k->kv_val + 10);
+ key4 = get_unaligned_le32(k->kv_val + 12);
+ if (k->kv_len <= WLAN_KEY_LEN_WEP104)
+ key4 &= 0xff;
+
+ /*
+ * Note: Key cache registers access special memory area that requires
+ * two 32-bit writes to actually update the values in the internal
+ * memory. Consequently, the exact order and pairs used here must be
+ * maintained.
+ */
+
+ if (keyType == AR_KEYTABLE_TYPE_TKIP) {
+ u16 micentry = entry + 64;
+
+ /*
+ * Write inverted key[47:0] first to avoid Michael MIC errors
+ * on frames that could be sent or received at the same time.
+ * The correct key will be written in the end once everything
+ * else is ready.
+ */
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
+
+ /* Write key[95:48] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
+
+ /* Write key[127:96] and key type */
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
+
+ /* Write MAC address for the entry */
+ (void) ath_hw_keysetmac(common, entry, mac);
+
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
+ /*
+ * TKIP uses two key cache entries:
+ * Michael MIC TX/RX keys in the same key cache entry
+ * (idx = main index + 64):
+ * key0 [31:0] = RX key [31:0]
+ * key1 [15:0] = TX key [31:16]
+ * key1 [31:16] = reserved
+ * key2 [31:0] = RX key [63:32]
+ * key3 [15:0] = TX key [15:0]
+ * key3 [31:16] = reserved
+ * key4 [31:0] = TX key [63:32]
+ */
+ u32 mic0, mic1, mic2, mic3, mic4;
+
+ mic0 = get_unaligned_le32(k->kv_mic + 0);
+ mic2 = get_unaligned_le32(k->kv_mic + 4);
+ mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
+ mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
+ mic4 = get_unaligned_le32(k->kv_txmic + 4);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write RX[31:0] and TX[31:16] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
+
+ /* Write RX[63:32] and TX[15:0] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
+
+ /* Write TX[63:32] and keyType(reserved) */
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ } else {
+ /*
+ * TKIP uses four key cache entries (two for group
+ * keys):
+ * Michael MIC TX/RX keys are in different key cache
+ * entries (idx = main index + 64 for TX and
+ * main index + 32 + 96 for RX):
+ * key0 [31:0] = TX/RX MIC key [31:0]
+ * key1 [31:0] = reserved
+ * key2 [31:0] = TX/RX MIC key [63:32]
+ * key3 [31:0] = reserved
+ * key4 [31:0] = reserved
+ *
+ * Upper layer code will call this function separately
+ * for TX and RX keys when these registers offsets are
+ * used.
+ */
+ u32 mic0, mic2;
+
+ mic0 = get_unaligned_le32(k->kv_mic + 0);
+ mic2 = get_unaligned_le32(k->kv_mic + 4);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write MIC key[31:0] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
+
+ /* Write MIC key[63:32] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
+
+ /* Write TX[63:32] and keyType(reserved) */
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* MAC address registers are reserved for the MIC entry */
+ REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
+
+ /*
+ * Write the correct (un-inverted) key[47:0] last to enable
+ * TKIP now that all other registers are set with correct
+ * values.
+ */
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write key[47:0] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
+ REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
+
+ /* Write key[95:48] */
+ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
+ REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
+
+ /* Write key[127:96] and key type */
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ /* Write MAC address for the entry */
+ (void) ath_hw_keysetmac(common, entry, mac);
+ }
+
+ return true;
+}
+
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
+ struct ath_keyval *hk, const u8 *addr,
+ bool authenticator)
+{
+ const u8 *key_rxmic;
+ const u8 *key_txmic;
+
+ key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /*
+ * Group key installation - only two key cache entries are used
+ * regardless of splitmic capability since group key is only
+ * used either for TX or RX.
+ */
+ if (authenticator) {
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
+ } else {
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
+ }
+ return ath_hw_set_keycache_entry(common, keyix, hk, addr);
+ }
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
+ /* TX and RX keys share the same key cache entry. */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath_hw_set_keycache_entry(common, keyix, hk, addr);
+ }
+
+ /* Separate key cache entries for TX and RX */
+
+ /* TX key goes at first index, RX key at +32. */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
+ /* TX MIC entry failed. No need to proceed further */
+ ath_err(common, "Setting TX MIC Key Failed\n");
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath_hw_set_keycache_entry(common, keyix + 32, hk, addr);
+}
+
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
+{
+ int i;
+
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap))
+ continue; /* At least one part of TKIP key allocated */
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ continue; /* At least one part of TKIP key allocated */
+
+ /* Found a free slot for a TKIP key */
+ return i;
+ }
+ return -1;
+}
+
+static int ath_reserve_key_cache_slot(struct ath_common *common,
+ u32 cipher)
+{
+ int i;
+
+ if (cipher == WLAN_CIPHER_SUITE_TKIP)
+ return ath_reserve_key_cache_slot_tkip(common);
+
+ /* First, try to find slots that would not be available for TKIP. */
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+ if (!test_bit(i, common->keymap) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i;
+ if (!test_bit(i + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 32;
+ if (!test_bit(i + 64, common->keymap) &&
+ (test_bit(i , common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 64;
+ if (!test_bit(i + 64 + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap)))
+ return i + 64 + 32;
+ }
+ } else {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (!test_bit(i, common->keymap) &&
+ test_bit(i + 64, common->keymap))
+ return i;
+ if (test_bit(i, common->keymap) &&
+ !test_bit(i + 64, common->keymap))
+ return i + 64;
+ }
+ }
+
+ /* No partially used TKIP slots, pick any available slot */
+ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
+ /* Do not allow slots that could be needed for TKIP group keys
+ * to be used. This limitation could be removed if we know that
+ * TKIP will not be used. */
+ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
+ continue;
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
+ if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
+ continue;
+ if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
+ continue;
+ }
+
+ if (!test_bit(i, common->keymap))
+ return i; /* Found a free slot for a key */
+ }
+
+ /* No free slot found */
+ return -1;
+}
+
+/*
+ * Configure encryption in the HW.
+ */
+int ath_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_keyval hk;
+ const u8 *mac = NULL;
+ u8 gmac[ETH_ALEN];
+ int ret = 0;
+ int idx;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->cipher) {
+ case 0:
+ hk.kv_type = ATH_CIPHER_CLR;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ hk.kv_type = ATH_CIPHER_WEP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hk.kv_type = ATH_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hk.kv_type = ATH_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hk.kv_len = key->keylen;
+ if (key->keylen)
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ memcpy(gmac, vif->addr, ETH_ALEN);
+ gmac[0] |= 0x01;
+ mac = gmac;
+ idx = ath_reserve_key_cache_slot(common, key->cipher);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!sta) {
+ idx = key->keyidx;
+ break;
+ }
+ memcpy(gmac, sta->addr, ETH_ALEN);
+ gmac[0] |= 0x01;
+ mac = gmac;
+ idx = ath_reserve_key_cache_slot(common, key->cipher);
+ break;
+ default:
+ idx = key->keyidx;
+ break;
+ }
+ } else if (key->keyidx) {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (vif->type != NL80211_IFTYPE_AP) {
+ /* Only keyidx 0 should be used with unicast key, but
+ * allow this for client mode for now. */
+ idx = key->keyidx;
+ } else
+ return -EIO;
+ } else {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ idx = ath_reserve_key_cache_slot(common, key->cipher);
+ }
+
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
+ vif->type == NL80211_IFTYPE_AP);
+ else
+ ret = ath_hw_set_keycache_entry(common, idx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ set_bit(idx, common->keymap);
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ set_bit(idx, common->ccmp_keymap);
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ set_bit(idx, common->tkip_keymap);
+ set_bit(idx + 64, common->tkip_keymap);
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
+ set_bit(idx + 32, common->keymap);
+ set_bit(idx + 64 + 32, common->keymap);
+ set_bit(idx + 32, common->tkip_keymap);
+ set_bit(idx + 64 + 32, common->tkip_keymap);
+ }
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL(ath_key_config);
+
+/*
+ * Delete Key.
+ */
+void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
+{
+ ath_hw_keyreset(common, key->hw_key_idx);
+ if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
+ clear_bit(key->hw_key_idx, common->ccmp_keymap);
+ if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ clear_bit(key->hw_key_idx + 64, common->keymap);
+
+ clear_bit(key->hw_key_idx, common->tkip_keymap);
+ clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
+
+ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
+ ath_hw_keyreset(common, key->hw_key_idx + 32);
+ clear_bit(key->hw_key_idx + 32, common->keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+
+ clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
+ }
+}
+EXPORT_SYMBOL(ath_key_delete);
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
new file mode 100644
index 000000000..338d72337
--- /dev/null
+++ b/drivers/net/wireless/ath/main.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "ath.h"
+#include "trace.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
+ u32 len,
+ gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+ u32 off;
+
+ /*
+ * Cache-line-align. This is important (for the
+ * 5210 at least) as not doing so causes bogus data
+ * in rx'd frames.
+ */
+
+ /* Note: the kernel can allocate a value greater than
+ * what we ask it to give us. We really only need 4 KB as that
+ * is this hardware supports and in fact we need at least 3849
+ * as that is the MAX AMSDU size this hardware supports.
+ * Unfortunately this means we may get 8 KB here from the
+ * kernel... and that is actually what is observed on some
+ * systems :( */
+ skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
+ if (skb != NULL) {
+ off = ((unsigned long) skb->data) % common->cachelsz;
+ if (off != 0)
+ skb_reserve(skb, common->cachelsz - off);
+ } else {
+ pr_err("skbuff alloc of size %u failed\n", len);
+ return NULL;
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(ath_rxbuf_alloc);
+
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr)
+{
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal_64bits(hdr->addr3, common->curbssid);
+}
+EXPORT_SYMBOL(ath_is_mybeacon);
+
+void ath_printk(const char *level, const struct ath_common* common,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (common && common->hw && common->hw->wiphy) {
+ printk("%sath: %s: %pV",
+ level, wiphy_name(common->hw->wiphy), &vaf);
+ trace_ath_log(common->hw->wiphy, &vaf);
+ } else {
+ printk("%sath: %pV", level, &vaf);
+ }
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath_printk);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
new file mode 100644
index 000000000..3ad4c774b
--- /dev/null
+++ b/drivers/net/wireless/ath/reg.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH_REGISTERS_H
+#define ATH_REGISTERS_H
+
+#define AR_MIBC 0x0040
+#define AR_MIBC_COW 0x00000001
+#define AR_MIBC_FMC 0x00000002
+#define AR_MIBC_CMC 0x00000004
+#define AR_MIBC_MCS 0x00000008
+
+#define AR_STA_ID0 0x8000
+#define AR_STA_ID1 0x8004
+#define AR_STA_ID1_SADH_MASK 0x0000ffff
+
+/*
+ * BSSID mask registers. See ath_hw_set_bssid_mask()
+ * for detailed documentation about these registers.
+ */
+#define AR_BSSMSKL 0x80e0
+#define AR_BSSMSKU 0x80e4
+
+#define AR_TFCNT 0x80ec
+#define AR_RFCNT 0x80f0
+#define AR_RCCNT 0x80f4
+#define AR_CCCNT 0x80f8
+
+#define AR_KEYTABLE_0 0x8800
+#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
+#define AR_KEY_CACHE_SIZE 128
+#define AR_RSVD_KEYTABLE_ENTRIES 4
+#define AR_KEY_TYPE 0x00000007
+#define AR_KEYTABLE_TYPE_40 0x00000000
+#define AR_KEYTABLE_TYPE_104 0x00000001
+#define AR_KEYTABLE_TYPE_128 0x00000003
+#define AR_KEYTABLE_TYPE_TKIP 0x00000004
+#define AR_KEYTABLE_TYPE_AES 0x00000005
+#define AR_KEYTABLE_TYPE_CCM 0x00000006
+#define AR_KEYTABLE_TYPE_CLR 0x00000007
+#define AR_KEYTABLE_ANT 0x00000008
+#define AR_KEYTABLE_VALID 0x00008000
+#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
+#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
+#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
+#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
+#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
+#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
+#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
+#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
+
+#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
new file mode 100644
index 000000000..06ea6cc9e
--- /dev/null
+++ b/drivers/net/wireless/ath/regd.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include "regd.h"
+#include "regd_common.h"
+
+static int __ath_regd_init(struct ath_regulatory *reg);
+
+/*
+ * This is a set of common rules used by our world regulatory domains.
+ * We have 12 world regulatory domains. To save space we consolidate
+ * the regulatory domains in 5 structures by frequency and change
+ * the flags on our reg_notifier() on a case by case basis.
+ */
+
+/* Only these channels all allow active scan on all world regulatory domains */
+#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/* We enable active scan on these a case by case basis by regulatory domain */
+#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+ NL80211_RRF_NO_IR)
+#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\
+ NL80211_RRF_NO_IR | \
+ NL80211_RRF_NO_OFDM)
+
+/* We allow IBSS on these on a case by case basis by regulatory domain */
+#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \
+ ATH9K_2GHZ_CH12_13, \
+ ATH9K_2GHZ_CH14
+
+#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
+ ATH9K_5GHZ_5470_5850
+
+/* This one skips what we call "mid band" */
+#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
+ ATH9K_5GHZ_5725_5850
+
+/* Can be used for:
+ * 0x60, 0x61, 0x62 */
+static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = {
+ .n_reg_rules = 5,
+ .alpha2 = "99",
+ .reg_rules = {
+ ATH9K_2GHZ_ALL,
+ ATH9K_5GHZ_ALL,
+ }
+};
+
+/* Can be used by 0x63 and 0x65 */
+static const struct ieee80211_regdomain ath_world_regdom_63_65 = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ ATH9K_2GHZ_CH01_11,
+ ATH9K_2GHZ_CH12_13,
+ ATH9K_5GHZ_NO_MIDBAND,
+ }
+};
+
+/* Can be used by 0x64 only */
+static const struct ieee80211_regdomain ath_world_regdom_64 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ ATH9K_2GHZ_CH01_11,
+ ATH9K_5GHZ_NO_MIDBAND,
+ }
+};
+
+/* Can be used by 0x66 and 0x69 */
+static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ ATH9K_2GHZ_CH01_11,
+ ATH9K_5GHZ_ALL,
+ }
+};
+
+/* Can be used by 0x67, 0x68, 0x6A and 0x6C */
+static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ ATH9K_2GHZ_CH01_11,
+ ATH9K_2GHZ_CH12_13,
+ ATH9K_5GHZ_ALL,
+ }
+};
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+ if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ return true;
+
+ switch (reg->country_code) {
+ case CTRY_UNITED_STATES:
+ case CTRY_JAPAN1:
+ case CTRY_JAPAN2:
+ case CTRY_JAPAN3:
+ case CTRY_JAPAN4:
+ case CTRY_JAPAN5:
+ case CTRY_JAPAN6:
+ case CTRY_JAPAN7:
+ case CTRY_JAPAN8:
+ case CTRY_JAPAN9:
+ case CTRY_JAPAN10:
+ case CTRY_JAPAN11:
+ case CTRY_JAPAN12:
+ case CTRY_JAPAN13:
+ case CTRY_JAPAN14:
+ case CTRY_JAPAN15:
+ case CTRY_JAPAN16:
+ case CTRY_JAPAN17:
+ case CTRY_JAPAN18:
+ case CTRY_JAPAN19:
+ case CTRY_JAPAN20:
+ case CTRY_JAPAN21:
+ case CTRY_JAPAN22:
+ case CTRY_JAPAN23:
+ case CTRY_JAPAN24:
+ case CTRY_JAPAN25:
+ case CTRY_JAPAN26:
+ case CTRY_JAPAN27:
+ case CTRY_JAPAN28:
+ case CTRY_JAPAN29:
+ case CTRY_JAPAN30:
+ case CTRY_JAPAN31:
+ case CTRY_JAPAN32:
+ case CTRY_JAPAN33:
+ case CTRY_JAPAN34:
+ case CTRY_JAPAN35:
+ case CTRY_JAPAN36:
+ case CTRY_JAPAN37:
+ case CTRY_JAPAN38:
+ case CTRY_JAPAN39:
+ case CTRY_JAPAN40:
+ case CTRY_JAPAN41:
+ case CTRY_JAPAN42:
+ case CTRY_JAPAN43:
+ case CTRY_JAPAN44:
+ case CTRY_JAPAN45:
+ case CTRY_JAPAN46:
+ case CTRY_JAPAN47:
+ case CTRY_JAPAN48:
+ case CTRY_JAPAN49:
+ case CTRY_JAPAN50:
+ case CTRY_JAPAN51:
+ case CTRY_JAPAN52:
+ case CTRY_JAPAN53:
+ case CTRY_JAPAN54:
+ case CTRY_JAPAN55:
+ case CTRY_JAPAN56:
+ case CTRY_JAPAN57:
+ case CTRY_JAPAN58:
+ case CTRY_JAPAN59:
+ return false;
+ }
+
+ return true;
+}
+
+static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
+{
+ if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ return false;
+ if (!dynamic_country_user_possible(reg))
+ return false;
+ return true;
+}
+
+static inline bool is_wwr_sku(u16 regd)
+{
+ return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+ (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+ (regd == WORLD));
+}
+
+static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
+{
+ return reg->current_rd & ~WORLDWIDE_ROAMING_FLAG;
+}
+
+bool ath_is_world_regd(struct ath_regulatory *reg)
+{
+ return is_wwr_sku(ath_regd_get_eepromRD(reg));
+}
+EXPORT_SYMBOL(ath_is_world_regd);
+
+static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
+{
+ /* this is the most restrictive */
+ return &ath_world_regdom_64;
+}
+
+static const struct
+ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
+{
+ switch (reg->regpair->reg_domain) {
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ return &ath_world_regdom_60_61_62;
+ case 0x63:
+ case 0x65:
+ return &ath_world_regdom_63_65;
+ case 0x64:
+ return &ath_world_regdom_64;
+ case 0x66:
+ case 0x69:
+ return &ath_world_regdom_66_69;
+ case 0x67:
+ case 0x68:
+ case 0x6A:
+ case 0x6C:
+ return &ath_world_regdom_67_68_6A_6C;
+ default:
+ WARN_ON(1);
+ return ath_default_world_regdomain();
+ }
+}
+
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+ /* possibly more */
+ return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
+/* Frequency is one where radar detection is required */
+static bool ath_is_radar_freq(u16 center_freq)
+{
+ return (center_freq >= 5260 && center_freq <= 5700);
+}
+
+static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *ch)
+{
+ const struct ieee80211_reg_rule *reg_rule;
+
+ reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
+ if (IS_ERR(reg_rule))
+ return;
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_clear_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_clear_no_ir_chan(wiphy, ch);
+}
+
+static void ath_force_no_ir_chan(struct ieee80211_channel *ch)
+{
+ ch->flags |= IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+ struct ieee80211_channel *ch;
+
+ ch = ieee80211_get_channel(wiphy, center_freq);
+ if (!ch)
+ return;
+
+ ath_force_no_ir_chan(ch);
+}
+
+static void
+__ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *ch)
+{
+ if (ath_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ switch (initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_force_clear_no_ir_chan(wiphy, ch);
+ break;
+ default:
+ if (ch->beacon_found)
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+ }
+}
+
+/*
+ * These exception rules do not apply radar frequencies.
+ *
+ * - We enable initiating radiation if the country IE says its fine:
+ * - If no country IE has been processed and a we determine we have
+ * received a beacon on a channel we can enable initiating radiation.
+ */
+static void
+ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ __ath_reg_apply_beaconing_flags(wiphy, reg,
+ initiator, ch);
+ }
+ }
+}
+
+/**
+ * ath_reg_apply_ir_flags()
+ * @wiphy: the wiphy to use
+ * @initiator: the regulatory hint initiator
+ *
+ * If no country IE has been received always enable passive scan
+ * and no-ibss on these channels. This is only done for specific
+ * regulatory SKUs.
+ *
+ * If a country IE has been received check its rule for this
+ * channel first before enabling active scan. The passive scan
+ * would have been enforced by the initial processing of our
+ * custom regulatory domain.
+ */
+static void
+ath_reg_apply_ir_flags(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ enum nl80211_reg_initiator initiator)
+{
+ struct ieee80211_supported_band *sband;
+
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (!sband)
+ return;
+
+ switch(initiator) {
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (!ath_reg_dyn_country_user_allow(reg))
+ break;
+ ath_force_clear_no_ir_freq(wiphy, 2467);
+ ath_force_clear_no_ir_freq(wiphy, 2472);
+ break;
+ default:
+ ath_force_no_ir_freq(wiphy, 2467);
+ ath_force_no_ir_freq(wiphy, 2472);
+ }
+}
+
+/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
+static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ return;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!ath_is_radar_freq(ch->center_freq))
+ continue;
+ /* We always enable radar detection/DFS on this
+ * frequency range. Additionally we also apply on
+ * this frequency range:
+ * - If STA mode does not yet have DFS supports disable
+ * active scanning
+ * - If adhoc mode does not support DFS yet then
+ * disable adhoc in the frequency.
+ * - If AP mode does not yet support radar detection/DFS
+ * do not allow AP mode
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IR;
+ }
+}
+
+static void ath_reg_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct ath_regulatory *reg)
+{
+ switch (reg->regpair->reg_domain) {
+ case 0x60:
+ case 0x63:
+ case 0x66:
+ case 0x67:
+ case 0x6C:
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+ break;
+ case 0x68:
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+ ath_reg_apply_ir_flags(wiphy, reg, initiator);
+ break;
+ default:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+ }
+}
+
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (!memcmp(allCountries[i].isoName, alpha2, 2))
+ return allCountries[i].countryCode;
+ }
+
+ return -1;
+}
+
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ u16 country_code;
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ !ath_is_world_regd(reg))
+ return -EINVAL;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ return -EINVAL;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+ return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+ struct ath_regulatory *reg,
+ struct regulatory_request *request)
+{
+ if (__ath_reg_dyn_country(wiphy, reg, request))
+ return;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x "
+ "dynamically updated by %s\n",
+ reg->current_rd,
+ reg_initiator_name(request->initiator));
+}
+
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg)
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ /* We always apply this */
+ ath_reg_apply_radar_flags(wiphy);
+
+ /*
+ * This would happen when we have sent a custom regulatory request
+ * a world regulatory domain and the scheduler hasn't yet processed
+ * any pending requests in the queue.
+ */
+ if (!request)
+ return;
+
+ reg->region = request->dfs_region;
+ switch (request->initiator) {
+ case NL80211_REGDOM_SET_BY_CORE:
+ /*
+ * If common->reg_world_copy is world roaming it means we *were*
+ * world roaming... so we now have to restore that data.
+ */
+ if (!ath_is_world_regd(&common->reg_world_copy))
+ break;
+
+ memcpy(reg, &common->reg_world_copy,
+ sizeof(struct ath_regulatory));
+ break;
+ case NL80211_REGDOM_SET_BY_DRIVER:
+ break;
+ case NL80211_REGDOM_SET_BY_USER:
+ if (ath_reg_dyn_country_user_allow(reg))
+ ath_reg_dyn_country(wiphy, reg, request);
+ break;
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ ath_reg_dyn_country(wiphy, reg, request);
+ break;
+ }
+}
+EXPORT_SYMBOL(ath_reg_notifier_apply);
+
+static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
+{
+ u16 rd = ath_regd_get_eepromRD(reg);
+ int i;
+
+ if (rd & COUNTRY_ERD_FLAG) {
+ /* EEPROM value is a country code */
+ u16 cc = rd & ~COUNTRY_ERD_FLAG;
+ printk(KERN_DEBUG
+ "ath: EEPROM indicates we should expect "
+ "a country code\n");
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++)
+ if (allCountries[i].countryCode == cc)
+ return true;
+ } else {
+ /* EEPROM value is a regpair value */
+ if (rd != CTRY_DEFAULT)
+ printk(KERN_DEBUG "ath: EEPROM indicates we "
+ "should expect a direct regpair map\n");
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
+ if (regDomainPairs[i].reg_domain == rd)
+ return true;
+ }
+ printk(KERN_DEBUG
+ "ath: invalid regulatory domain/country code 0x%x\n", rd);
+ return false;
+}
+
+/* EEPROM country code to regpair mapping */
+static struct country_code_to_enum_rd*
+ath_regd_find_country(u16 countryCode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countryCode == countryCode)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+/* EEPROM rd code to regpair mapping */
+static struct country_code_to_enum_rd*
+ath_regd_find_country_by_rd(int regdmn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].regDmnEnum == regdmn)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+/* Returns the map of the EEPROM set RD to a country code */
+static u16 ath_regd_get_default_country(u16 rd)
+{
+ if (rd & COUNTRY_ERD_FLAG) {
+ struct country_code_to_enum_rd *country = NULL;
+ u16 cc = rd & ~COUNTRY_ERD_FLAG;
+
+ country = ath_regd_find_country(cc);
+ if (country != NULL)
+ return cc;
+ }
+
+ return CTRY_DEFAULT;
+}
+
+static struct reg_dmn_pair_mapping*
+ath_get_regpair(int regdmn)
+{
+ int i;
+
+ if (regdmn == NO_ENUMRD)
+ return NULL;
+ for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
+ if (regDomainPairs[i].reg_domain == regdmn)
+ return &regDomainPairs[i];
+ }
+ return NULL;
+}
+
+static int
+ath_regd_init_wiphy(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
+ REGULATORY_CUSTOM_REG;
+
+ if (ath_is_world_regd(reg)) {
+ /*
+ * Anything applied here (prior to wiphy registration) gets
+ * saved on the wiphy orig_* parameters
+ */
+ regd = ath_world_regdomain(reg);
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_FOLLOW_POWER;
+ } else {
+ /*
+ * This gets applied in the case of the absence of CRDA,
+ * it's our own custom world regulatory domain, similar to
+ * cfg80211's but we enable passive scanning.
+ */
+ regd = ath_default_world_regdomain();
+ }
+
+ wiphy_apply_custom_regulatory(wiphy, regd);
+ ath_reg_apply_radar_flags(wiphy);
+ ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ return 0;
+}
+
+/*
+ * Some users have reported their EEPROM programmed with
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to 0x64, which is the
+ * default Atheros world regulatory domain.
+ */
+static void ath_regd_sanitize(struct ath_regulatory *reg)
+{
+ if (reg->current_rd != COUNTRY_ERD_FLAG)
+ return;
+ printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
+ reg->current_rd = 0x64;
+}
+
+static int __ath_regd_init(struct ath_regulatory *reg)
+{
+ struct country_code_to_enum_rd *country = NULL;
+ u16 regdmn;
+
+ if (!reg)
+ return -EINVAL;
+
+ ath_regd_sanitize(reg);
+
+ printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
+
+ if (!ath_regd_is_eeprom_valid(reg)) {
+ pr_err("Invalid EEPROM contents\n");
+ return -EINVAL;
+ }
+
+ regdmn = ath_regd_get_eepromRD(reg);
+ reg->country_code = ath_regd_get_default_country(regdmn);
+
+ if (reg->country_code == CTRY_DEFAULT &&
+ regdmn == CTRY_DEFAULT) {
+ printk(KERN_DEBUG "ath: EEPROM indicates default "
+ "country code should be used\n");
+ reg->country_code = CTRY_UNITED_STATES;
+ }
+
+ if (reg->country_code == CTRY_DEFAULT) {
+ country = NULL;
+ } else {
+ printk(KERN_DEBUG "ath: doing EEPROM country->regdmn "
+ "map search\n");
+ country = ath_regd_find_country(reg->country_code);
+ if (country == NULL) {
+ printk(KERN_DEBUG
+ "ath: no valid country maps found for "
+ "country code: 0x%0x\n",
+ reg->country_code);
+ return -EINVAL;
+ } else {
+ regdmn = country->regDmnEnum;
+ printk(KERN_DEBUG "ath: country maps to "
+ "regdmn code: 0x%0x\n",
+ regdmn);
+ }
+ }
+
+ reg->regpair = ath_get_regpair(regdmn);
+
+ if (!reg->regpair) {
+ printk(KERN_DEBUG "ath: "
+ "No regulatory domain pair found, cannot continue\n");
+ return -EINVAL;
+ }
+
+ if (!country)
+ country = ath_regd_find_country_by_rd(regdmn);
+
+ if (country) {
+ reg->alpha2[0] = country->isoName[0];
+ reg->alpha2[1] = country->isoName[1];
+ } else {
+ reg->alpha2[0] = '0';
+ reg->alpha2[1] = '0';
+ }
+
+ printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
+ reg->alpha2[0], reg->alpha2[1]);
+ printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
+ reg->regpair->reg_domain);
+
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ int r;
+
+ r = __ath_regd_init(reg);
+ if (r)
+ return r;
+
+ if (ath_is_world_regd(reg))
+ memcpy(&common->reg_world_copy, reg,
+ sizeof(struct ath_regulatory));
+
+ ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath_regd_init);
+
+u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
+ enum ieee80211_band band)
+{
+ if (!reg->regpair ||
+ (reg->country_code == CTRY_DEFAULT &&
+ is_wwr_sku(ath_regd_get_eepromRD(reg)))) {
+ return SD_NO_CTL;
+ }
+
+ if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
+ switch (reg->region) {
+ case NL80211_DFS_FCC:
+ return CTL_FCC;
+ case NL80211_DFS_ETSI:
+ return CTL_ETSI;
+ case NL80211_DFS_JP:
+ return CTL_MKK;
+ default:
+ break;
+ }
+ }
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ return reg->regpair->reg_2ghz_ctl;
+ case IEEE80211_BAND_5GHZ:
+ return reg->regpair->reg_5ghz_ctl;
+ default:
+ return NO_CTL;
+ }
+}
+EXPORT_SYMBOL(ath_regd_get_band_ctl);
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
new file mode 100644
index 000000000..37f53bd8f
--- /dev/null
+++ b/drivers/net/wireless/ath/regd.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REGD_H
+#define REGD_H
+
+#include <linux/nl80211.h>
+#include <net/cfg80211.h>
+
+#include "ath.h"
+
+enum ctl_group {
+ CTL_FCC = 0x10,
+ CTL_MKK = 0x40,
+ CTL_ETSI = 0x30,
+};
+
+#define NO_CTL 0xff
+#define SD_NO_CTL 0xE0
+#define NO_CTL 0xff
+#define CTL_11A 0
+#define CTL_11B 1
+#define CTL_11G 2
+#define CTL_2GHT20 5
+#define CTL_5GHT20 6
+#define CTL_2GHT40 7
+#define CTL_5GHT40 8
+
+#define CTRY_DEBUG 0x1ff
+#define CTRY_DEFAULT 0
+
+#define COUNTRY_ERD_FLAG 0x8000
+#define WORLDWIDE_ROAMING_FLAG 0x4000
+
+#define MULTI_DOMAIN_MASK 0xFF00
+
+#define WORLD_SKU_MASK 0x00F0
+#define WORLD_SKU_PREFIX 0x0060
+
+#define CHANNEL_HALF_BW 10
+#define CHANNEL_QUARTER_BW 5
+
+struct country_code_to_enum_rd {
+ u16 countryCode;
+ u16 regDmnEnum;
+ const char *isoName;
+};
+
+enum CountryCode {
+ CTRY_ALBANIA = 8,
+ CTRY_ALGERIA = 12,
+ CTRY_ARGENTINA = 32,
+ CTRY_ARMENIA = 51,
+ CTRY_ARUBA = 533,
+ CTRY_AUSTRALIA = 36,
+ CTRY_AUSTRIA = 40,
+ CTRY_AZERBAIJAN = 31,
+ CTRY_BAHRAIN = 48,
+ CTRY_BANGLADESH = 50,
+ CTRY_BARBADOS = 52,
+ CTRY_BELARUS = 112,
+ CTRY_BELGIUM = 56,
+ CTRY_BELIZE = 84,
+ CTRY_BOLIVIA = 68,
+ CTRY_BOSNIA_HERZ = 70,
+ CTRY_BRAZIL = 76,
+ CTRY_BRUNEI_DARUSSALAM = 96,
+ CTRY_BULGARIA = 100,
+ CTRY_CAMBODIA = 116,
+ CTRY_CANADA = 124,
+ CTRY_CHILE = 152,
+ CTRY_CHINA = 156,
+ CTRY_COLOMBIA = 170,
+ CTRY_COSTA_RICA = 188,
+ CTRY_CROATIA = 191,
+ CTRY_CYPRUS = 196,
+ CTRY_CZECH = 203,
+ CTRY_DENMARK = 208,
+ CTRY_DOMINICAN_REPUBLIC = 214,
+ CTRY_ECUADOR = 218,
+ CTRY_EGYPT = 818,
+ CTRY_EL_SALVADOR = 222,
+ CTRY_ESTONIA = 233,
+ CTRY_FAEROE_ISLANDS = 234,
+ CTRY_FINLAND = 246,
+ CTRY_FRANCE = 250,
+ CTRY_GEORGIA = 268,
+ CTRY_GERMANY = 276,
+ CTRY_GREECE = 300,
+ CTRY_GREENLAND = 304,
+ CTRY_GRENADA = 308,
+ CTRY_GUAM = 316,
+ CTRY_GUATEMALA = 320,
+ CTRY_HAITI = 332,
+ CTRY_HONDURAS = 340,
+ CTRY_HONG_KONG = 344,
+ CTRY_HUNGARY = 348,
+ CTRY_ICELAND = 352,
+ CTRY_INDIA = 356,
+ CTRY_INDONESIA = 360,
+ CTRY_IRAN = 364,
+ CTRY_IRAQ = 368,
+ CTRY_IRELAND = 372,
+ CTRY_ISRAEL = 376,
+ CTRY_ITALY = 380,
+ CTRY_JAMAICA = 388,
+ CTRY_JAPAN = 392,
+ CTRY_JORDAN = 400,
+ CTRY_KAZAKHSTAN = 398,
+ CTRY_KENYA = 404,
+ CTRY_KOREA_NORTH = 408,
+ CTRY_KOREA_ROC = 410,
+ CTRY_KOREA_ROC2 = 411,
+ CTRY_KOREA_ROC3 = 412,
+ CTRY_KUWAIT = 414,
+ CTRY_LATVIA = 428,
+ CTRY_LEBANON = 422,
+ CTRY_LIBYA = 434,
+ CTRY_LIECHTENSTEIN = 438,
+ CTRY_LITHUANIA = 440,
+ CTRY_LUXEMBOURG = 442,
+ CTRY_MACAU = 446,
+ CTRY_MACEDONIA = 807,
+ CTRY_MALAYSIA = 458,
+ CTRY_MALTA = 470,
+ CTRY_MEXICO = 484,
+ CTRY_MONACO = 492,
+ CTRY_MOROCCO = 504,
+ CTRY_NEPAL = 524,
+ CTRY_NETHERLANDS = 528,
+ CTRY_NETHERLANDS_ANTILLES = 530,
+ CTRY_NEW_ZEALAND = 554,
+ CTRY_NICARAGUA = 558,
+ CTRY_NORWAY = 578,
+ CTRY_OMAN = 512,
+ CTRY_PAKISTAN = 586,
+ CTRY_PANAMA = 591,
+ CTRY_PAPUA_NEW_GUINEA = 598,
+ CTRY_PARAGUAY = 600,
+ CTRY_PERU = 604,
+ CTRY_PHILIPPINES = 608,
+ CTRY_POLAND = 616,
+ CTRY_PORTUGAL = 620,
+ CTRY_PUERTO_RICO = 630,
+ CTRY_QATAR = 634,
+ CTRY_ROMANIA = 642,
+ CTRY_RUSSIA = 643,
+ CTRY_SAUDI_ARABIA = 682,
+ CTRY_SERBIA_MONTENEGRO = 891,
+ CTRY_SINGAPORE = 702,
+ CTRY_SLOVAKIA = 703,
+ CTRY_SLOVENIA = 705,
+ CTRY_SOUTH_AFRICA = 710,
+ CTRY_SPAIN = 724,
+ CTRY_SRI_LANKA = 144,
+ CTRY_SWEDEN = 752,
+ CTRY_SWITZERLAND = 756,
+ CTRY_SYRIA = 760,
+ CTRY_TAIWAN = 158,
+ CTRY_THAILAND = 764,
+ CTRY_TRINIDAD_Y_TOBAGO = 780,
+ CTRY_TUNISIA = 788,
+ CTRY_TURKEY = 792,
+ CTRY_UAE = 784,
+ CTRY_UKRAINE = 804,
+ CTRY_UNITED_KINGDOM = 826,
+ CTRY_UNITED_STATES = 840,
+ CTRY_UNITED_STATES_FCC49 = 842,
+ CTRY_URUGUAY = 858,
+ CTRY_UZBEKISTAN = 860,
+ CTRY_VENEZUELA = 862,
+ CTRY_VIET_NAM = 704,
+ CTRY_YEMEN = 887,
+ CTRY_ZIMBABWE = 716,
+ CTRY_JAPAN1 = 393,
+ CTRY_JAPAN2 = 394,
+ CTRY_JAPAN3 = 395,
+ CTRY_JAPAN4 = 396,
+ CTRY_JAPAN5 = 397,
+ CTRY_JAPAN6 = 4006,
+ CTRY_JAPAN7 = 4007,
+ CTRY_JAPAN8 = 4008,
+ CTRY_JAPAN9 = 4009,
+ CTRY_JAPAN10 = 4010,
+ CTRY_JAPAN11 = 4011,
+ CTRY_JAPAN12 = 4012,
+ CTRY_JAPAN13 = 4013,
+ CTRY_JAPAN14 = 4014,
+ CTRY_JAPAN15 = 4015,
+ CTRY_JAPAN16 = 4016,
+ CTRY_JAPAN17 = 4017,
+ CTRY_JAPAN18 = 4018,
+ CTRY_JAPAN19 = 4019,
+ CTRY_JAPAN20 = 4020,
+ CTRY_JAPAN21 = 4021,
+ CTRY_JAPAN22 = 4022,
+ CTRY_JAPAN23 = 4023,
+ CTRY_JAPAN24 = 4024,
+ CTRY_JAPAN25 = 4025,
+ CTRY_JAPAN26 = 4026,
+ CTRY_JAPAN27 = 4027,
+ CTRY_JAPAN28 = 4028,
+ CTRY_JAPAN29 = 4029,
+ CTRY_JAPAN30 = 4030,
+ CTRY_JAPAN31 = 4031,
+ CTRY_JAPAN32 = 4032,
+ CTRY_JAPAN33 = 4033,
+ CTRY_JAPAN34 = 4034,
+ CTRY_JAPAN35 = 4035,
+ CTRY_JAPAN36 = 4036,
+ CTRY_JAPAN37 = 4037,
+ CTRY_JAPAN38 = 4038,
+ CTRY_JAPAN39 = 4039,
+ CTRY_JAPAN40 = 4040,
+ CTRY_JAPAN41 = 4041,
+ CTRY_JAPAN42 = 4042,
+ CTRY_JAPAN43 = 4043,
+ CTRY_JAPAN44 = 4044,
+ CTRY_JAPAN45 = 4045,
+ CTRY_JAPAN46 = 4046,
+ CTRY_JAPAN47 = 4047,
+ CTRY_JAPAN48 = 4048,
+ CTRY_JAPAN49 = 4049,
+ CTRY_JAPAN50 = 4050,
+ CTRY_JAPAN51 = 4051,
+ CTRY_JAPAN52 = 4052,
+ CTRY_JAPAN53 = 4053,
+ CTRY_JAPAN54 = 4054,
+ CTRY_JAPAN55 = 4055,
+ CTRY_JAPAN56 = 4056,
+ CTRY_JAPAN57 = 4057,
+ CTRY_JAPAN58 = 4058,
+ CTRY_JAPAN59 = 4059,
+ CTRY_AUSTRALIA2 = 5000,
+ CTRY_CANADA2 = 5001,
+ CTRY_BELGIUM2 = 5002
+};
+
+bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
+int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
+u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
+ enum ieee80211_band band);
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg);
+
+#endif
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
new file mode 100644
index 000000000..bdd2b4d61
--- /dev/null
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef REGD_COMMON_H
+#define REGD_COMMON_H
+
+enum EnumRd {
+ NO_ENUMRD = 0x00,
+ NULL1_WORLD = 0x03,
+ NULL1_ETSIB = 0x07,
+ NULL1_ETSIC = 0x08,
+ FCC1_FCCA = 0x10,
+ FCC1_WORLD = 0x11,
+ FCC4_FCCA = 0x12,
+ FCC5_FCCA = 0x13,
+ FCC6_FCCA = 0x14,
+
+ FCC2_FCCA = 0x20,
+ FCC2_WORLD = 0x21,
+ FCC2_ETSIC = 0x22,
+ FCC6_WORLD = 0x23,
+ FRANCE_RES = 0x31,
+ FCC3_FCCA = 0x3A,
+ FCC3_WORLD = 0x3B,
+
+ ETSI1_WORLD = 0x37,
+ ETSI3_ETSIA = 0x32,
+ ETSI2_WORLD = 0x35,
+ ETSI3_WORLD = 0x36,
+ ETSI4_WORLD = 0x30,
+ ETSI4_ETSIC = 0x38,
+ ETSI5_WORLD = 0x39,
+ ETSI6_WORLD = 0x34,
+ ETSI_RESERVED = 0x33,
+
+ MKK1_MKKA = 0x40,
+ MKK1_MKKB = 0x41,
+ APL4_WORLD = 0x42,
+ MKK2_MKKA = 0x43,
+ APL_RESERVED = 0x44,
+ APL2_WORLD = 0x45,
+ APL2_APLC = 0x46,
+ APL3_WORLD = 0x47,
+ MKK1_FCCA = 0x48,
+ APL2_APLD = 0x49,
+ MKK1_MKKA1 = 0x4A,
+ MKK1_MKKA2 = 0x4B,
+ MKK1_MKKC = 0x4C,
+
+ APL3_FCCA = 0x50,
+ APL1_WORLD = 0x52,
+ APL1_FCCA = 0x53,
+ APL1_APLA = 0x54,
+ APL1_ETSIC = 0x55,
+ APL2_ETSIC = 0x56,
+ APL5_WORLD = 0x58,
+ APL6_WORLD = 0x5B,
+ APL7_FCCA = 0x5C,
+ APL8_WORLD = 0x5D,
+ APL9_WORLD = 0x5E,
+
+ WOR0_WORLD = 0x60,
+ WOR1_WORLD = 0x61,
+ WOR2_WORLD = 0x62,
+ WOR3_WORLD = 0x63,
+ WOR4_WORLD = 0x64,
+ WOR5_ETSIC = 0x65,
+
+ WOR01_WORLD = 0x66,
+ WOR02_WORLD = 0x67,
+ EU1_WORLD = 0x68,
+
+ WOR9_WORLD = 0x69,
+ WORA_WORLD = 0x6A,
+ WORB_WORLD = 0x6B,
+ WORC_WORLD = 0x6C,
+
+ MKK3_MKKB = 0x80,
+ MKK3_MKKA2 = 0x81,
+ MKK3_MKKC = 0x82,
+
+ MKK4_MKKB = 0x83,
+ MKK4_MKKA2 = 0x84,
+ MKK4_MKKC = 0x85,
+
+ MKK5_MKKB = 0x86,
+ MKK5_MKKA2 = 0x87,
+ MKK5_MKKC = 0x88,
+
+ MKK6_MKKB = 0x89,
+ MKK6_MKKA2 = 0x8A,
+ MKK6_MKKC = 0x8B,
+
+ MKK7_MKKB = 0x8C,
+ MKK7_MKKA2 = 0x8D,
+ MKK7_MKKC = 0x8E,
+
+ MKK8_MKKB = 0x8F,
+ MKK8_MKKA2 = 0x90,
+ MKK8_MKKC = 0x91,
+
+ MKK14_MKKA1 = 0x92,
+ MKK15_MKKA1 = 0x93,
+
+ MKK10_FCCA = 0xD0,
+ MKK10_MKKA1 = 0xD1,
+ MKK10_MKKC = 0xD2,
+ MKK10_MKKA2 = 0xD3,
+
+ MKK11_MKKA = 0xD4,
+ MKK11_FCCA = 0xD5,
+ MKK11_MKKA1 = 0xD6,
+ MKK11_MKKC = 0xD7,
+ MKK11_MKKA2 = 0xD8,
+
+ MKK12_MKKA = 0xD9,
+ MKK12_FCCA = 0xDA,
+ MKK12_MKKA1 = 0xDB,
+ MKK12_MKKC = 0xDC,
+ MKK12_MKKA2 = 0xDD,
+
+ MKK13_MKKB = 0xDE,
+
+ MKK3_MKKA = 0xF0,
+ MKK3_MKKA1 = 0xF1,
+ MKK3_FCCA = 0xF2,
+ MKK4_MKKA = 0xF3,
+ MKK4_MKKA1 = 0xF4,
+ MKK4_FCCA = 0xF5,
+ MKK9_MKKA = 0xF6,
+ MKK10_MKKA = 0xF7,
+ MKK6_MKKA1 = 0xF8,
+ MKK6_FCCA = 0xF9,
+ MKK7_MKKA1 = 0xFA,
+ MKK7_FCCA = 0xFB,
+ MKK9_FCCA = 0xFC,
+ MKK9_MKKA1 = 0xFD,
+ MKK9_MKKC = 0xFE,
+ MKK9_MKKA2 = 0xFF,
+
+ WORLD = 0x0199,
+ DEBUG_REG_DMN = 0x01ff,
+};
+
+/* Regpair to CTL band mapping */
+static struct reg_dmn_pair_mapping regDomainPairs[] = {
+ /* regpair, 5 GHz CTL, 2 GHz CTL */
+ {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN},
+ {NULL1_WORLD, NO_CTL, CTL_ETSI},
+ {NULL1_ETSIB, NO_CTL, CTL_ETSI},
+ {NULL1_ETSIC, NO_CTL, CTL_ETSI},
+
+ {FCC2_FCCA, CTL_FCC, CTL_FCC},
+ {FCC2_WORLD, CTL_FCC, CTL_ETSI},
+ {FCC2_ETSIC, CTL_FCC, CTL_ETSI},
+ {FCC3_FCCA, CTL_FCC, CTL_FCC},
+ {FCC3_WORLD, CTL_FCC, CTL_ETSI},
+ {FCC4_FCCA, CTL_FCC, CTL_FCC},
+ {FCC5_FCCA, CTL_FCC, CTL_FCC},
+ {FCC6_FCCA, CTL_FCC, CTL_FCC},
+ {FCC6_WORLD, CTL_FCC, CTL_ETSI},
+
+ {ETSI1_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI2_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI3_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
+
+ /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
+ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
+ {FRANCE_RES, CTL_ETSI, CTL_ETSI},
+
+ {FCC1_WORLD, CTL_FCC, CTL_ETSI},
+ {FCC1_FCCA, CTL_FCC, CTL_FCC},
+ {APL1_WORLD, CTL_FCC, CTL_ETSI},
+ {APL2_WORLD, CTL_FCC, CTL_ETSI},
+ {APL3_WORLD, CTL_FCC, CTL_ETSI},
+ {APL4_WORLD, CTL_FCC, CTL_ETSI},
+ {APL5_WORLD, CTL_FCC, CTL_ETSI},
+ {APL6_WORLD, CTL_ETSI, CTL_ETSI},
+ {APL8_WORLD, CTL_ETSI, CTL_ETSI},
+ {APL9_WORLD, CTL_ETSI, CTL_ETSI},
+
+ {APL3_FCCA, CTL_FCC, CTL_FCC},
+ {APL7_FCCA, CTL_FCC, CTL_FCC},
+ {APL1_ETSIC, CTL_FCC, CTL_ETSI},
+ {APL2_ETSIC, CTL_FCC, CTL_ETSI},
+ {APL2_APLD, CTL_FCC, NO_CTL},
+
+ {MKK1_MKKA, CTL_MKK, CTL_MKK},
+ {MKK1_MKKB, CTL_MKK, CTL_MKK},
+ {MKK1_FCCA, CTL_MKK, CTL_FCC},
+ {MKK1_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK1_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK1_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK2_MKKA, CTL_MKK, CTL_MKK},
+ {MKK3_MKKA, CTL_MKK, CTL_MKK},
+ {MKK3_MKKB, CTL_MKK, CTL_MKK},
+ {MKK3_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK3_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK3_MKKC, CTL_MKK, CTL_MKK},
+ {MKK3_FCCA, CTL_MKK, CTL_FCC},
+
+ {MKK4_MKKA, CTL_MKK, CTL_MKK},
+ {MKK4_MKKB, CTL_MKK, CTL_MKK},
+ {MKK4_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK4_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK4_MKKC, CTL_MKK, CTL_MKK},
+ {MKK4_FCCA, CTL_MKK, CTL_FCC},
+
+ {MKK5_MKKB, CTL_MKK, CTL_MKK},
+ {MKK5_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK5_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK6_MKKB, CTL_MKK, CTL_MKK},
+ {MKK6_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK6_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK6_MKKC, CTL_MKK, CTL_MKK},
+ {MKK6_FCCA, CTL_MKK, CTL_FCC},
+
+ {MKK7_MKKB, CTL_MKK, CTL_MKK},
+ {MKK7_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK7_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK7_MKKC, CTL_MKK, CTL_MKK},
+ {MKK7_FCCA, CTL_MKK, CTL_FCC},
+
+ {MKK8_MKKB, CTL_MKK, CTL_MKK},
+ {MKK8_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK8_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK9_MKKA, CTL_MKK, CTL_MKK},
+ {MKK9_FCCA, CTL_MKK, CTL_FCC},
+ {MKK9_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK9_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK9_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK10_MKKA, CTL_MKK, CTL_MKK},
+ {MKK10_FCCA, CTL_MKK, CTL_FCC},
+ {MKK10_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK10_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK10_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK11_MKKA, CTL_MKK, CTL_MKK},
+ {MKK11_FCCA, CTL_MKK, CTL_FCC},
+ {MKK11_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK11_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK11_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK12_MKKA, CTL_MKK, CTL_MKK},
+ {MKK12_FCCA, CTL_MKK, CTL_FCC},
+ {MKK12_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK12_MKKA2, CTL_MKK, CTL_MKK},
+ {MKK12_MKKC, CTL_MKK, CTL_MKK},
+
+ {MKK13_MKKB, CTL_MKK, CTL_MKK},
+ {MKK14_MKKA1, CTL_MKK, CTL_MKK},
+ {MKK15_MKKA1, CTL_MKK, CTL_MKK},
+
+ {WOR0_WORLD, NO_CTL, NO_CTL},
+ {WOR1_WORLD, NO_CTL, NO_CTL},
+ {WOR2_WORLD, NO_CTL, NO_CTL},
+ {WOR3_WORLD, NO_CTL, NO_CTL},
+ {WOR4_WORLD, NO_CTL, NO_CTL},
+ {WOR5_ETSIC, NO_CTL, NO_CTL},
+ {WOR01_WORLD, NO_CTL, NO_CTL},
+ {WOR02_WORLD, NO_CTL, NO_CTL},
+ {EU1_WORLD, NO_CTL, NO_CTL},
+ {WOR9_WORLD, NO_CTL, NO_CTL},
+ {WORA_WORLD, NO_CTL, NO_CTL},
+ {WORB_WORLD, NO_CTL, NO_CTL},
+ {WORC_WORLD, NO_CTL, NO_CTL},
+};
+
+static struct country_code_to_enum_rd allCountries[] = {
+ {CTRY_DEBUG, NO_ENUMRD, "DB"},
+ {CTRY_DEFAULT, FCC1_FCCA, "CO"},
+ {CTRY_ALBANIA, NULL1_WORLD, "AL"},
+ {CTRY_ALGERIA, NULL1_WORLD, "DZ"},
+ {CTRY_ARGENTINA, FCC3_WORLD, "AR"},
+ {CTRY_ARMENIA, ETSI4_WORLD, "AM"},
+ {CTRY_ARUBA, ETSI1_WORLD, "AW"},
+ {CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
+ {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
+ {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
+ {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
+ {CTRY_BAHRAIN, APL6_WORLD, "BH"},
+ {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
+ {CTRY_BARBADOS, FCC2_WORLD, "BB"},
+ {CTRY_BELARUS, ETSI1_WORLD, "BY"},
+ {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
+ {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
+ {CTRY_BELIZE, APL1_ETSIC, "BZ"},
+ {CTRY_BOLIVIA, APL1_ETSIC, "BO"},
+ {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
+ {CTRY_BRAZIL, FCC3_WORLD, "BR"},
+ {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
+ {CTRY_BULGARIA, ETSI6_WORLD, "BG"},
+ {CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
+ {CTRY_CANADA, FCC3_FCCA, "CA"},
+ {CTRY_CANADA2, FCC6_FCCA, "CA"},
+ {CTRY_CHILE, APL6_WORLD, "CL"},
+ {CTRY_CHINA, APL1_WORLD, "CN"},
+ {CTRY_COLOMBIA, FCC1_FCCA, "CO"},
+ {CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
+ {CTRY_CROATIA, ETSI1_WORLD, "HR"},
+ {CTRY_CYPRUS, ETSI1_WORLD, "CY"},
+ {CTRY_CZECH, ETSI3_WORLD, "CZ"},
+ {CTRY_DENMARK, ETSI1_WORLD, "DK"},
+ {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO"},
+ {CTRY_ECUADOR, FCC1_WORLD, "EC"},
+ {CTRY_EGYPT, ETSI3_WORLD, "EG"},
+ {CTRY_EL_SALVADOR, FCC1_WORLD, "SV"},
+ {CTRY_ESTONIA, ETSI1_WORLD, "EE"},
+ {CTRY_FINLAND, ETSI1_WORLD, "FI"},
+ {CTRY_FRANCE, ETSI1_WORLD, "FR"},
+ {CTRY_GEORGIA, ETSI4_WORLD, "GE"},
+ {CTRY_GERMANY, ETSI1_WORLD, "DE"},
+ {CTRY_GREECE, ETSI1_WORLD, "GR"},
+ {CTRY_GREENLAND, ETSI1_WORLD, "GL"},
+ {CTRY_GRENADA, FCC3_FCCA, "GD"},
+ {CTRY_GUAM, FCC1_FCCA, "GU"},
+ {CTRY_GUATEMALA, FCC1_FCCA, "GT"},
+ {CTRY_HAITI, ETSI1_WORLD, "HT"},
+ {CTRY_HONDURAS, NULL1_WORLD, "HN"},
+ {CTRY_HONG_KONG, FCC3_WORLD, "HK"},
+ {CTRY_HUNGARY, ETSI1_WORLD, "HU"},
+ {CTRY_ICELAND, ETSI1_WORLD, "IS"},
+ {CTRY_INDIA, APL6_WORLD, "IN"},
+ {CTRY_INDONESIA, NULL1_WORLD, "ID"},
+ {CTRY_IRAN, APL1_WORLD, "IR"},
+ {CTRY_IRELAND, ETSI1_WORLD, "IE"},
+ {CTRY_ISRAEL, NULL1_WORLD, "IL"},
+ {CTRY_ITALY, ETSI1_WORLD, "IT"},
+ {CTRY_JAMAICA, FCC3_WORLD, "JM"},
+
+ {CTRY_JAPAN, MKK1_MKKA, "JP"},
+ {CTRY_JAPAN1, MKK1_MKKB, "JP"},
+ {CTRY_JAPAN2, MKK1_FCCA, "JP"},
+ {CTRY_JAPAN3, MKK2_MKKA, "JP"},
+ {CTRY_JAPAN4, MKK1_MKKA1, "JP"},
+ {CTRY_JAPAN5, MKK1_MKKA2, "JP"},
+ {CTRY_JAPAN6, MKK1_MKKC, "JP"},
+ {CTRY_JAPAN7, MKK3_MKKB, "JP"},
+ {CTRY_JAPAN8, MKK3_MKKA2, "JP"},
+ {CTRY_JAPAN9, MKK3_MKKC, "JP"},
+ {CTRY_JAPAN10, MKK4_MKKB, "JP"},
+ {CTRY_JAPAN11, MKK4_MKKA2, "JP"},
+ {CTRY_JAPAN12, MKK4_MKKC, "JP"},
+ {CTRY_JAPAN13, MKK5_MKKB, "JP"},
+ {CTRY_JAPAN14, MKK5_MKKA2, "JP"},
+ {CTRY_JAPAN15, MKK5_MKKC, "JP"},
+ {CTRY_JAPAN16, MKK6_MKKB, "JP"},
+ {CTRY_JAPAN17, MKK6_MKKA2, "JP"},
+ {CTRY_JAPAN18, MKK6_MKKC, "JP"},
+ {CTRY_JAPAN19, MKK7_MKKB, "JP"},
+ {CTRY_JAPAN20, MKK7_MKKA2, "JP"},
+ {CTRY_JAPAN21, MKK7_MKKC, "JP"},
+ {CTRY_JAPAN22, MKK8_MKKB, "JP"},
+ {CTRY_JAPAN23, MKK8_MKKA2, "JP"},
+ {CTRY_JAPAN24, MKK8_MKKC, "JP"},
+ {CTRY_JAPAN25, MKK3_MKKA, "JP"},
+ {CTRY_JAPAN26, MKK3_MKKA1, "JP"},
+ {CTRY_JAPAN27, MKK3_FCCA, "JP"},
+ {CTRY_JAPAN28, MKK4_MKKA1, "JP"},
+ {CTRY_JAPAN29, MKK4_FCCA, "JP"},
+ {CTRY_JAPAN30, MKK6_MKKA1, "JP"},
+ {CTRY_JAPAN31, MKK6_FCCA, "JP"},
+ {CTRY_JAPAN32, MKK7_MKKA1, "JP"},
+ {CTRY_JAPAN33, MKK7_FCCA, "JP"},
+ {CTRY_JAPAN34, MKK9_MKKA, "JP"},
+ {CTRY_JAPAN35, MKK10_MKKA, "JP"},
+ {CTRY_JAPAN36, MKK4_MKKA, "JP"},
+ {CTRY_JAPAN37, MKK9_FCCA, "JP"},
+ {CTRY_JAPAN38, MKK9_MKKA1, "JP"},
+ {CTRY_JAPAN39, MKK9_MKKC, "JP"},
+ {CTRY_JAPAN40, MKK9_MKKA2, "JP"},
+ {CTRY_JAPAN41, MKK10_FCCA, "JP"},
+ {CTRY_JAPAN42, MKK10_MKKA1, "JP"},
+ {CTRY_JAPAN43, MKK10_MKKC, "JP"},
+ {CTRY_JAPAN44, MKK10_MKKA2, "JP"},
+ {CTRY_JAPAN45, MKK11_MKKA, "JP"},
+ {CTRY_JAPAN46, MKK11_FCCA, "JP"},
+ {CTRY_JAPAN47, MKK11_MKKA1, "JP"},
+ {CTRY_JAPAN48, MKK11_MKKC, "JP"},
+ {CTRY_JAPAN49, MKK11_MKKA2, "JP"},
+ {CTRY_JAPAN50, MKK12_MKKA, "JP"},
+ {CTRY_JAPAN51, MKK12_FCCA, "JP"},
+ {CTRY_JAPAN52, MKK12_MKKA1, "JP"},
+ {CTRY_JAPAN53, MKK12_MKKC, "JP"},
+ {CTRY_JAPAN54, MKK12_MKKA2, "JP"},
+ {CTRY_JAPAN57, MKK13_MKKB, "JP"},
+ {CTRY_JAPAN58, MKK14_MKKA1, "JP"},
+ {CTRY_JAPAN59, MKK15_MKKA1, "JP"},
+
+ {CTRY_JORDAN, ETSI2_WORLD, "JO"},
+ {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ"},
+ {CTRY_KOREA_NORTH, APL9_WORLD, "KP"},
+ {CTRY_KOREA_ROC, APL9_WORLD, "KR"},
+ {CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
+ {CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
+ {CTRY_KUWAIT, ETSI3_WORLD, "KW"},
+ {CTRY_LATVIA, ETSI1_WORLD, "LV"},
+ {CTRY_LEBANON, NULL1_WORLD, "LB"},
+ {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
+ {CTRY_LITHUANIA, ETSI1_WORLD, "LT"},
+ {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"},
+ {CTRY_MACAU, FCC2_WORLD, "MO"},
+ {CTRY_MACEDONIA, NULL1_WORLD, "MK"},
+ {CTRY_MALAYSIA, APL8_WORLD, "MY"},
+ {CTRY_MALTA, ETSI1_WORLD, "MT"},
+ {CTRY_MEXICO, FCC1_FCCA, "MX"},
+ {CTRY_MONACO, ETSI4_WORLD, "MC"},
+ {CTRY_MOROCCO, APL4_WORLD, "MA"},
+ {CTRY_NEPAL, APL1_WORLD, "NP"},
+ {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
+ {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
+ {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
+ {CTRY_NORWAY, ETSI1_WORLD, "NO"},
+ {CTRY_OMAN, FCC3_WORLD, "OM"},
+ {CTRY_PAKISTAN, NULL1_WORLD, "PK"},
+ {CTRY_PANAMA, FCC1_FCCA, "PA"},
+ {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
+ {CTRY_PERU, APL1_WORLD, "PE"},
+ {CTRY_PHILIPPINES, APL1_WORLD, "PH"},
+ {CTRY_POLAND, ETSI1_WORLD, "PL"},
+ {CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
+ {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
+ {CTRY_QATAR, APL1_WORLD, "QA"},
+ {CTRY_ROMANIA, NULL1_WORLD, "RO"},
+ {CTRY_RUSSIA, NULL1_WORLD, "RU"},
+ {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
+ {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
+ {CTRY_SINGAPORE, APL6_WORLD, "SG"},
+ {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
+ {CTRY_SLOVENIA, ETSI1_WORLD, "SI"},
+ {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA"},
+ {CTRY_SPAIN, ETSI1_WORLD, "ES"},
+ {CTRY_SRI_LANKA, FCC3_WORLD, "LK"},
+ {CTRY_SWEDEN, ETSI1_WORLD, "SE"},
+ {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
+ {CTRY_SYRIA, NULL1_WORLD, "SY"},
+ {CTRY_TAIWAN, APL3_FCCA, "TW"},
+ {CTRY_THAILAND, FCC3_WORLD, "TH"},
+ {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
+ {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
+ {CTRY_TURKEY, ETSI3_WORLD, "TR"},
+ {CTRY_UKRAINE, NULL1_WORLD, "UA"},
+ {CTRY_UAE, NULL1_WORLD, "AE"},
+ {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
+ {CTRY_UNITED_STATES, FCC3_FCCA, "US"},
+ /* This "PS" is for US public safety actually... to support this we
+ * would need to assign new special alpha2 to CRDA db as with the world
+ * regdomain and use another alpha2 */
+ {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
+ {CTRY_URUGUAY, FCC3_WORLD, "UY"},
+ {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
+ {CTRY_VENEZUELA, APL2_ETSIC, "VE"},
+ {CTRY_VIET_NAM, NULL1_WORLD, "VN"},
+ {CTRY_YEMEN, NULL1_WORLD, "YE"},
+ {CTRY_ZIMBABWE, NULL1_WORLD, "ZW"},
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
new file mode 100644
index 000000000..0d742acb1
--- /dev/null
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_COMMON_H
+#define SPECTRAL_COMMON_H
+
+#define SPECTRAL_HT20_NUM_BINS 56
+#define SPECTRAL_HT20_40_NUM_BINS 128
+
+/* TODO: could possibly be 512, but no samples this large
+ * could be acquired so far.
+ */
+#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+ ATH_FFT_SAMPLE_HT20_40,
+ ATH_FFT_SAMPLE_ATH10K,
+};
+
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+
+ u8 max_exp;
+
+ __be16 freq;
+ s8 rssi;
+ s8 noise;
+
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+
+ __be64 tsf;
+
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
+struct fft_sample_ht20_40 {
+ struct fft_sample_tlv tlv;
+
+ u8 channel_type;
+ __be16 freq;
+
+ s8 lower_rssi;
+ s8 upper_rssi;
+
+ __be64 tsf;
+
+ s8 lower_noise;
+ s8 upper_noise;
+
+ __be16 lower_max_magnitude;
+ __be16 upper_max_magnitude;
+
+ u8 lower_max_index;
+ u8 upper_max_index;
+
+ u8 lower_bitmap_weight;
+ u8 upper_bitmap_weight;
+
+ u8 max_exp;
+
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+struct fft_sample_ath10k {
+ struct fft_sample_tlv tlv;
+ u8 chan_width_mhz;
+ __be16 freq1;
+ __be16 freq2;
+ __be16 noise;
+ __be16 max_magnitude;
+ __be16 total_gain_db;
+ __be16 base_pwr_db;
+ __be64 tsf;
+ s8 max_index;
+ u8 rssi;
+ u8 relpwr_db;
+ u8 avgpwr_db;
+ u8 max_exp;
+
+ u8 data[0];
+} __packed;
+
+#endif /* SPECTRAL_COMMON_H */
diff --git a/drivers/net/wireless/ath/trace.c b/drivers/net/wireless/ath/trace.c
new file mode 100644
index 000000000..18fb3a071
--- /dev/null
+++ b/drivers/net/wireless/ath/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
new file mode 100644
index 000000000..ba711644d
--- /dev/null
+++ b/drivers/net/wireless/ath/trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_H
+
+#include <linux/tracepoint.h>
+#include "ath.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath
+
+#if !defined(CONFIG_ATH_TRACEPOINTS)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) static inline void trace_ ## name(proto) {}
+
+#endif /* CONFIG_ATH_TRACEPOINTS */
+
+TRACE_EVENT(ath_log,
+
+ TP_PROTO(struct wiphy *wiphy,
+ struct va_format *vaf),
+
+ TP_ARGS(wiphy, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, wiphy_name(wiphy))
+ __string(driver, KBUILD_MODNAME)
+ __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, wiphy_name(wiphy));
+ __assign_str(driver, KBUILD_MODNAME);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH_DBG_MAX_LEN,
+ vaf->fmt,
+ *vaf->va) >= ATH_DBG_MAX_LEN);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+#endif /* _TRACE_H || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644
index 000000000..591ebaea8
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -0,0 +1,16 @@
+config WCN36XX
+ tristate "Qualcomm Atheros WCN3660/3680 support"
+ depends on MAC80211 && HAS_DMA
+ ---help---
+ This module adds support for wireless adapters based on
+ Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+ If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+ bool "WCN36XX debugfs support"
+ depends on WCN36XX
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644
index 000000000..50c43b438
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y += main.o \
+ dxe.o \
+ txrx.o \
+ smd.o \
+ pmc.o \
+ debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644
index 000000000..ef44a2da6
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ char buf[3];
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ if (vif_priv->pw_state == WCN36XX_BMPS)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ break;
+ }
+ }
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ char buf[32];
+ int buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ switch (buf[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type) {
+ wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ }
+ }
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ break;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+ .open = simple_open,
+ .read = read_file_bool_bmps,
+ .write = write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ char buf[255], *tmp;
+ int buf_size;
+ u32 arg[WCN36xx_MAX_DUMP_ARGS];
+ int i;
+
+ memset(buf, 0, sizeof(buf));
+ memset(arg, 0, sizeof(arg));
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ tmp = buf;
+
+ for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+ char *begin;
+ begin = strsep(&tmp, " ");
+ if (begin == NULL)
+ break;
+
+ if (kstrtou32(begin, 0, &arg[i]) != 0)
+ break;
+ }
+
+ wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+ arg[3], arg[4]);
+ wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+ return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+ .open = simple_open,
+ .write = write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data) \
+ do { \
+ struct dentry *d; \
+ d = debugfs_create_file(__stringify(name), \
+ mode, dfs->rootdir, \
+ priv_data, fop); \
+ dfs->file_##name.dentry = d; \
+ if (IS_ERR(d)) { \
+ wcn36xx_warn("Create the debugfs entry failed");\
+ dfs->file_##name.dentry = NULL; \
+ } \
+ } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+ dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wcn->hw->wiphy->debugfsdir);
+ if (IS_ERR(dfs->rootdir)) {
+ wcn36xx_warn("Create the debugfs failed\n");
+ dfs->rootdir = NULL;
+ }
+
+ ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+ &fops_wcn36xx_bmps, wcn);
+ ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+ struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+ debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644
index 000000000..46307aa56
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS 5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+ struct dentry *dentry;
+ u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+ struct dentry *rootdir;
+ struct wcn36xx_dfs_file file_bmps_switcher;
+ struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644
index 000000000..086549b73
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+ struct wcn36xx_dxe_ch *ch = is_low ?
+ &wcn->dxe_tx_l_ch :
+ &wcn->dxe_tx_h_ch;
+
+ return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->mmio + addr);
+}
+
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
+do { \
+ if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+ else \
+ wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0) \
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+ *data = readl(wcn->mmio + addr);
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+ addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+ int i;
+
+ for (i = 0; i < ch->desc_num && ctl; i++) {
+ next = ctl->next;
+ kfree(ctl);
+ ctl = next;
+ }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ int i;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+ if (!cur_ctl)
+ goto out_fail;
+
+ spin_lock_init(&cur_ctl->skb_lock);
+ cur_ctl->ctl_blk_order = i;
+ if (i == 0) {
+ ch->head_blk_ctl = cur_ctl;
+ ch->tail_blk_ctl = cur_ctl;
+ } else if (ch->desc_num - 1 == i) {
+ prev_ctl->next = cur_ctl;
+ cur_ctl->next = ch->head_blk_ctl;
+ } else {
+ prev_ctl->next = cur_ctl;
+ }
+ prev_ctl = cur_ctl;
+ }
+
+ return 0;
+
+out_fail:
+ wcn36xx_dxe_free_ctl_block(ch);
+ return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+ int ret;
+
+ wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+ wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+ wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+ wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+ wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+ wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+ wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
+ wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
+
+ wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+ wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+ wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+ wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+ wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+ wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+ wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+ wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+ /* DXE control block allocation */
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+ if (ret)
+ goto out_err;
+ ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+ if (ret)
+ goto out_err;
+
+ /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
+ ret = wcn->ctrl_ops->smsm_change_state(
+ WCN36XX_SMSM_WLAN_TX_ENABLE,
+ WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+ return 0;
+
+out_err:
+ wcn36xx_err("Failed to allocate DXE control blocks\n");
+ wcn36xx_dxe_free_ctl_blks(wcn);
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_desc *cur_dxe = NULL;
+ struct wcn36xx_dxe_desc *prev_dxe = NULL;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+ size_t size;
+ int i;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ GFP_KERNEL);
+ if (!wcn_ch->cpu_addr)
+ return -ENOMEM;
+
+ memset(wcn_ch->cpu_addr, 0, size);
+
+ cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ cur_ctl->desc = cur_dxe;
+ cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+ i * sizeof(struct wcn36xx_dxe_desc);
+
+ switch (wcn_ch->ch_type) {
+ case WCN36XX_DXE_CH_TX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+ break;
+ case WCN36XX_DXE_CH_TX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+ cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+ break;
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ }
+ if (0 == i) {
+ cur_dxe->phy_next_l = 0;
+ } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ } else if (i == (wcn_ch->desc_num - 1)) {
+ prev_dxe->phy_next_l =
+ cur_ctl->desc_phy_addr;
+ cur_dxe->phy_next_l =
+ wcn_ch->head_blk_ctl->desc_phy_addr;
+ }
+ cur_ctl = cur_ctl->next;
+ prev_dxe = cur_dxe;
+ cur_dxe++;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+ struct wcn36xx_dxe_mem_pool *pool)
+{
+ int i, chunk_size = pool->chunk_size;
+ dma_addr_t bd_phy_addr = pool->phy_addr;
+ void *bd_cpu_addr = pool->virt_addr;
+ struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+ for (i = 0; i < ch->desc_num; i++) {
+ /* Only every second dxe needs a bd pointer,
+ the other will point to the skb data */
+ if (!(i & 1)) {
+ cur->bd_phy_addr = bd_phy_addr;
+ cur->bd_cpu_addr = bd_cpu_addr;
+ bd_phy_addr += chunk_size;
+ bd_cpu_addr += chunk_size;
+ } else {
+ cur->bd_phy_addr = 0;
+ cur->bd_cpu_addr = NULL;
+ }
+ cur = cur->next;
+ }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+ int reg_data = 0;
+
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ &reg_data);
+
+ reg_data |= wcn_ch;
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_INT_MASK_REG,
+ (int)reg_data);
+ return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ dxe->dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ ctl->skb = skb;
+
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ int i;
+ struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+ cur_ctl = wcn_ch->head_blk_ctl;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_ctl);
+ cur_ctl = cur_ctl->next;
+ }
+
+ return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
+{
+ struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+ int i;
+
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ kfree_skb(cur->skb);
+ cur = cur->next;
+ }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb) {
+ wcn36xx_warn("Spurious TX complete indication\n");
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (status == 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /*
+ * Make at least one loop of do-while because in case ring is
+ * completely full head and tail are pointing to the same element
+ * and while-do will not make any cycles.
+ */
+ do {
+ if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
+ break;
+ if (ctl->skb) {
+ dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ ctl->skb->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(ctl->skb);
+ if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+ /* Keep frame until TX status comes */
+ ieee80211_free_txskb(wcn->hw, ctl->skb);
+ }
+ spin_lock_irqsave(&ctl->skb_lock, flags);
+ if (wcn->queues_stopped) {
+ wcn->queues_stopped = false;
+ ieee80211_wake_queues(wcn->hw);
+ }
+ spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+ ctl->skb = NULL;
+ }
+ ctl = ctl->next;
+ } while (ctl != ch->head_blk_ctl &&
+ !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+ ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+ int int_src, int_reason;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+ &int_reason);
+
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_H);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ }
+
+ if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+ wcn36xx_dxe_read_register(wcn,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+ &int_reason);
+ /* TODO: Check int_reason */
+
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+ WCN36XX_INT_MASK_CHAN_TX_L);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+ struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+ disable_irq_nosync(wcn->rx_irq);
+ wcn36xx_dxe_rx_frame(wcn);
+ enable_irq(wcn->rx_irq);
+ return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+ int ret;
+
+ ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+ IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc tx irq\n");
+ goto out_err;
+ }
+
+ ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+ "wcn36xx_rx", wcn);
+ if (ret) {
+ wcn36xx_err("failed to alloc rx irq\n");
+ goto out_txirq;
+ }
+
+ enable_irq_wake(wcn->rx_irq);
+
+ return 0;
+
+out_txirq:
+ free_irq(wcn->tx_irq, wcn);
+out_err:
+ return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = ctl->skb;
+ dma_addr = dxe->dst_addr_l;
+ wcn36xx_dxe_fill_skb(ctl);
+
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ break;
+ default:
+ wcn36xx_warn("Unknown channel\n");
+ }
+
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ ctl = ctl->next;
+ dxe = ctl->desc;
+ }
+
+ ch->head_blk_ctl = ctl;
+
+ return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+ int int_src;
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }
+
+ if (!int_src)
+ wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+ size_t s;
+ void *cpu_addr;
+
+ /* Allocate BD headers for MGMT frames */
+
+ /* Where this come from ask QC */
+ wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ /* Allocate BD headers for DATA frames */
+
+ /* Where this come from ask QC */
+ wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+ 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+ s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+ cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
+ if (!cpu_addr)
+ goto out_err;
+
+ wcn->data_mem_pool.virt_addr = cpu_addr;
+ memset(cpu_addr, 0, s);
+
+ return 0;
+
+out_err:
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_err("Failed to allocate BD mempool\n");
+ return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+ if (wcn->mgmt_mem_pool.virt_addr)
+ dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+ wcn->mgmt_mem_pool.virt_addr,
+ wcn->mgmt_mem_pool.phy_addr);
+
+ if (wcn->data_mem_pool.virt_addr) {
+ dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+ wcn->data_mem_pool.virt_addr,
+ wcn->data_mem_pool.phy_addr);
+ }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low)
+{
+ struct wcn36xx_dxe_ctl *ctl = NULL;
+ struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_ch *ch = NULL;
+ unsigned long flags;
+
+ ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+ ctl = ch->head_blk_ctl;
+
+ spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+ /*
+ * If skb is not null that means that we reached the tail of the ring
+ * hence ring is full. Stop queues to let mac80211 back off until ring
+ * has an empty slot again.
+ */
+ if (NULL != ctl->next->skb) {
+ ieee80211_stop_queues(wcn->hw);
+ wcn->queues_stopped = true;
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+ ctl->skb = NULL;
+ desc = ctl->desc;
+
+ /* Set source address of the BD we send */
+ desc->src_addr_l = ctl->bd_phy_addr;
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+ desc->ctrl = ch->ctrl_bd;
+
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+ "BD >>> ", (char *)ctl->bd_cpu_addr,
+ sizeof(struct wcn36xx_tx_bd));
+
+ /* Set source address of the SKB we send */
+ ctl = ctl->next;
+ ctl->skb = skb;
+ desc = ctl->desc;
+ if (ctl->bd_cpu_addr) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ return -EINVAL;
+ }
+
+ desc->src_addr_l = dma_map_single(NULL,
+ ctl->skb->data,
+ ctl->skb->len,
+ DMA_TO_DEVICE);
+
+ desc->dst_addr_l = ch->dxe_wq;
+ desc->fr_len = ctl->skb->len;
+
+ /* set dxe descriptor to VALID */
+ desc->ctrl = ch->ctrl_skb;
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+ (char *)desc, sizeof(*desc));
+ wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
+ (char *)ctl->skb->data, ctl->skb->len);
+
+ /* Move the head of the ring to the next empty descriptor */
+ ch->head_blk_ctl = ctl->next;
+
+ /*
+ * When connected and trying to send data frame chip can be in sleep
+ * mode and writing to the register will not wake up the chip. Instead
+ * notify chip about new frame through SMSM bus.
+ */
+ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
+ wcn->ctrl_ops->smsm_change_state(
+ 0,
+ WCN36XX_SMSM_WLAN_TX_ENABLE);
+ } else {
+ /* indicate End Of Packet and generate interrupt on descriptor
+ * done.
+ */
+ wcn36xx_dxe_write_register(wcn,
+ ch->reg_ctrl, ch->def_ctrl);
+ }
+
+ return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+ int reg_data = 0, ret;
+
+ reg_data = WCN36XX_DXE_REG_RESET;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+ /* Setting interrupt path */
+ reg_data = WCN36XX_DXE_CCU_INT;
+ wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+ /***************************************/
+ /* Init descriptors for TX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+ wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX LOW */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+ WCN36XX_DXE_WQ_TX_L);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+ /***************************************/
+ /* Init descriptors for TX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+ wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Program DMA destination addr for TX HIGH */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+ WCN36XX_DXE_WQ_TX_H);
+
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+ /***************************************/
+ /* Init descriptors for RX LOW channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+ /* For RX we need to preallocated buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+ /* Write channel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+ WCN36XX_DXE_WQ_RX_L);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+ wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_L,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+ /***************************************/
+ /* Init descriptors for RX HIGH channel */
+ /***************************************/
+ wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+ /* For RX we need to prealocat buffers */
+ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+ /* Write chanel head to a NEXT register */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+ /* Write DMA source address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+ WCN36XX_DXE_WQ_RX_H);
+
+ /* Program preallocated destination address */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+ wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+ /* Enable default control registers */
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_REG_CTL_RX_H,
+ WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+ /* Enable channel interrupts */
+ wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+ ret = wcn36xx_dxe_request_irqs(wcn);
+ if (ret < 0)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+ free_irq(wcn->tx_irq, wcn);
+ free_irq(wcn->rx_irq, wcn);
+
+ if (wcn->tx_ack_skb) {
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ }
+
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644
index 000000000..35ee7e966
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH = DMA4
+RX_LOW = DMA1
+RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_REG 0x202000
+
+#define WCN36XX_DXE_CCU_INT 0xA0011
+#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L 0x328a44
+#define WCN36XX_DXE_CTRL_TX_H 0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L 0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H 0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD 0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB 0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD 0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB 0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L 0x17
+#define WCN36XX_DXE_WQ_TX_H 0x17
+#define WCN36XX_DXE_WQ_RX_L 0xB
+#define WCN36XX_DXE_WQ_RX_H 0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H 0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H 0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ /* #define WCN36XX_DXE_INT_CH6_MASK 0x00000040 */
+ /* #define WCN36XX_DXE_INT_CH5_MASK 0x00000020 */
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ /* #define WCN36XX_DXE_INT_CH2_MASK 0x00000004 */
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET 0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW 0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH 0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET 0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET 0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET 0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET 0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR 0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR 0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR 0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR 0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET + \
+ WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L (WCN36XX_DXE_MEM_REG + \
+ WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L 0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L 0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H 0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H 0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE 128
+
+#define WCN36XX_PKT_SIZE 0xF20
+enum wcn36xx_dxe_ch_type {
+ WCN36XX_DXE_CH_TX_L,
+ WCN36XX_DXE_CH_TX_H,
+ WCN36XX_DXE_CH_RX_L,
+ WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+ WCN36XX_DXE_CH_DESC_NUMB_TX_L = 128,
+ WCN36XX_DXE_CH_DESC_NUMB_TX_H = 10,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_L = 512,
+ WCN36XX_DXE_CH_DESC_NUMB_RX_H = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ * u32 valid :1; //0 = DMA stop, 1 = DMA continue with this
+ * //descriptor
+ * u32 transfer_type :2; //0 = Host to Host space
+ * u32 eop :1; //End of Packet
+ * u32 bd_handling :1; //if transferType = Host to BMU, then 0
+ * // means first 128 bytes contain BD, and 1
+ * // means create new empty BD
+ * u32 siq :1; // SIQ
+ * u32 diq :1; // DIQ
+ * u32 pdu_rel :1; //0 = don't release BD and PDUs when done,
+ * // 1 = release them
+ * u32 bthld_sel :4; //BMU Threshold Select
+ * u32 prio :3; //Specifies the priority level to use for
+ * // the transfer
+ * u32 stop_channel :1; //1 = DMA stops processing further, channel
+ * //requires re-enabling after this
+ * u32 intr :1; //Interrupt on Descriptor Done
+ * u32 rsvd :1; //reserved
+ * u32 size :14;//14 bits used - ignored for BMU transfers,
+ * //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+ u32 ctrl;
+ u32 fr_len;
+
+ u32 src_addr_l;
+ u32 dst_addr_l;
+ u32 phy_next_l;
+ u32 src_addr_h;
+ u32 dst_addr_h;
+ u32 phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+ struct wcn36xx_dxe_ctl *next;
+ struct wcn36xx_dxe_desc *desc;
+ unsigned int desc_phy_addr;
+ int ctl_blk_order;
+ struct sk_buff *skb;
+ spinlock_t skb_lock;
+ void *bd_cpu_addr;
+ dma_addr_t bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+ enum wcn36xx_dxe_ch_type ch_type;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ enum wcn36xx_dxe_ch_desc_num desc_num;
+ /* DXE control block ring */
+ struct wcn36xx_dxe_ctl *head_blk_ctl;
+ struct wcn36xx_dxe_ctl *tail_blk_ctl;
+
+ /* DXE channel specific configs */
+ u32 dxe_wq;
+ u32 ctrl_bd;
+ u32 ctrl_skb;
+ u32 reg_ctrl;
+ u32 def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+ int chunk_size;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+ struct wcn36xx_vif *vif_priv,
+ struct sk_buff *skb,
+ bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644
index 000000000..a1f1127d7
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -0,0 +1,4659 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+ API VERSIONING INFORMATION
+
+ The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+ The MAJOR is incremented for major product/architecture changes
+ (and then MINOR/VERSION/REVISION are zeroed)
+ The MINOR is incremented for minor product/architecture changes
+ (and then VERSION/REVISION are zeroed)
+ The VERSION is incremented if a significant API change occurs
+ (and then REVISION is zeroed)
+ The REVISION is incremented if an insignificant API change occurs
+ or if a new API is added
+ All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE 0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE 0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC 8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC 4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN 4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED (1 << 2)
+#define PARAM_llACOEXIST_CHANGED (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED (1<<9)
+#define PARAM_OBSS_MODE_CHANGED (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+ (PARAM_BCN_INTERVAL_CHANGED | \
+ PARAM_SHORT_PREAMBLE_CHANGED | \
+ PARAM_SHORT_SLOT_TIME_CHANGED | \
+ PARAM_llACOEXIST_CHANGED | \
+ PARAM_llBCOEXIST_CHANGED | \
+ PARAM_llGCOEXIST_CHANGED | \
+ PARAM_HT20MHZCOEXIST_CHANGED | \
+ PARAM_NON_GF_DEVICES_PRESENT_CHANGED | \
+ PARAM_RIFS_MODE_CHANGED | \
+ PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED | \
+ PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH 64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+ /* Init/De-Init */
+ WCN36XX_HAL_START_REQ = 0,
+ WCN36XX_HAL_START_RSP = 1,
+ WCN36XX_HAL_STOP_REQ = 2,
+ WCN36XX_HAL_STOP_RSP = 3,
+
+ /* Scan */
+ WCN36XX_HAL_INIT_SCAN_REQ = 4,
+ WCN36XX_HAL_INIT_SCAN_RSP = 5,
+ WCN36XX_HAL_START_SCAN_REQ = 6,
+ WCN36XX_HAL_START_SCAN_RSP = 7,
+ WCN36XX_HAL_END_SCAN_REQ = 8,
+ WCN36XX_HAL_END_SCAN_RSP = 9,
+ WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+ WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+ /* HW STA configuration/deconfiguration */
+ WCN36XX_HAL_CONFIG_STA_REQ = 12,
+ WCN36XX_HAL_CONFIG_STA_RSP = 13,
+ WCN36XX_HAL_DELETE_STA_REQ = 14,
+ WCN36XX_HAL_DELETE_STA_RSP = 15,
+ WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+ WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+ WCN36XX_HAL_DELETE_BSS_REQ = 18,
+ WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+ /* Infra STA asscoiation */
+ WCN36XX_HAL_JOIN_REQ = 20,
+ WCN36XX_HAL_JOIN_RSP = 21,
+ WCN36XX_HAL_POST_ASSOC_REQ = 22,
+ WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+ /* Security */
+ WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+ WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+ WCN36XX_HAL_SET_STAKEY_REQ = 26,
+ WCN36XX_HAL_SET_STAKEY_RSP = 27,
+ WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+ WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+ WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+ WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+ /* Qos Related */
+ WCN36XX_HAL_ADD_TS_REQ = 32,
+ WCN36XX_HAL_ADD_TS_RSP = 33,
+ WCN36XX_HAL_DEL_TS_REQ = 34,
+ WCN36XX_HAL_DEL_TS_RSP = 35,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+ WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+ WCN36XX_HAL_ADD_BA_REQ = 38,
+ WCN36XX_HAL_ADD_BA_RSP = 39,
+ WCN36XX_HAL_DEL_BA_REQ = 40,
+ WCN36XX_HAL_DEL_BA_RSP = 41,
+
+ WCN36XX_HAL_CH_SWITCH_REQ = 42,
+ WCN36XX_HAL_CH_SWITCH_RSP = 43,
+ WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+ WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+ WCN36XX_HAL_GET_STATS_REQ = 46,
+ WCN36XX_HAL_GET_STATS_RSP = 47,
+ WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+ WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+ WCN36XX_HAL_MISSED_BEACON_IND = 50,
+ WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+ WCN36XX_HAL_MIC_FAILURE_IND = 52,
+ WCN36XX_HAL_FATAL_ERROR_IND = 53,
+ WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+ /* NV Interface */
+ WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+ WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+ WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+ WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+ WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+ WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+ WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+ WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+ WCN36XX_HAL_SEND_BEACON_REQ = 63,
+ WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+ WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+ WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+ WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+ WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+ /* PTT interface support */
+ WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+ WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+ /* BTAMP related events */
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+ WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+ WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+ WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+ WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+ WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+ WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+ WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+ WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+ WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+ WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+ WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+ WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+ WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+ WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+ WCN36XX_HAL_GET_RSSI_REQ = 92,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+ WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+ WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+ WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+ WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+ WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+ WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+ WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+ WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+ WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+ WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+ WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+ WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+ WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+ WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+ WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+ WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+ WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+ WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+ WCN36XX_HAL_GET_RSSI_RSP = 113,
+ WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+ /* 11k related events */
+ WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+ WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+ /* 11R related msgs */
+ WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+ WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+ /* P2P WLAN_FEATURE_P2P */
+ WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+ WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+ /* WLAN Dump commands */
+ WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+ WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+ /* OEM_DATA FEATURE SUPPORT */
+ WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+ WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+ /* ADD SELF STA REQ and RSP */
+ WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+ WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+ /* DEL SELF STA SUPPORT */
+ WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+ WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+ /* Coex Indication */
+ WCN36XX_HAL_COEX_IND = 129,
+
+ /* Tx Complete Indication */
+ WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+ /* Host Suspend/resume messages */
+ WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+ WCN36XX_HAL_HOST_RESUME_REQ = 132,
+ WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+ WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+ WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+ WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+ WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+ WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+ WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+ WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+ WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+ WCN36XX_HAL_RADAR_DETECT_IND = 143,
+ WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+ WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+ WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+ /* PNO messages */
+ WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+ WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+ WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+ WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+ WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+ WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+ WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+ WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+ WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+ WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+ WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+ WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+ WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+ WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+ WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+ /*
+ * This is temp fix. Should be removed once Host and Riva code is
+ * in sync.
+ */
+ WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+ WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+ WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+ WCN36XX_HAL_TSM_STATS_REQ = 168,
+ WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+ /* wake reason indication (WOW) */
+ WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+ /* GTK offload support */
+ WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+ WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+ WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+ WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+ WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+ WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+ WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+ WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+ WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+ WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+ WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+ WCN36XX_HAL_DEL_BA_IND = 188,
+ WCN36XX_HAL_DHCP_START_IND = 189,
+ WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+ WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+ WCN36XX_HAL_MSG_VERSION0 = 0,
+ WCN36XX_HAL_MSG_VERSION1 = 1,
+ /* define as 2 bytes data */
+ WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+ WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+ DRIVER_TYPE_PRODUCTION = 0,
+ DRIVER_TYPE_MFG = 1,
+ DRIVER_TYPE_DVT = 2,
+ DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+ HAL_STOP_TYPE_SYS_RESET,
+ HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+ HAL_STOP_TYPE_RF_KILL,
+ HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+ HAL_SYS_MODE_NORMAL,
+ HAL_SYS_MODE_LEARN,
+ HAL_SYS_MODE_SCAN,
+ HAL_SYS_MODE_PROMISC,
+ HAL_SYS_MODE_SUSPEND_LINK,
+ HAL_SYS_MODE_ROAM_SCAN,
+ HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+ HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+ /* 20MHz IF bandwidth centered on IF carrier */
+ PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+ /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+ /* 40MHz IF bandwidth centered on IF carrier */
+ PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+ /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+ /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+ /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+ /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+ /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+ PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+ PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+ /* Static SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+ /* Dynamic SM Power Save mode */
+ WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+ /* reserved */
+ WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+ /* SM Power Save disabled */
+ WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+ WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+ STA_TAURUS = 0,
+ STA_TITAN,
+ STA_POLARIS,
+ STA_11b,
+ STA_11bg,
+ STA_11a,
+ STA_11n,
+ STA_11ac,
+ STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES 4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES 8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES 3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET 16
+
+enum wcn36xx_hal_bss_type {
+ WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+ /* Added for softAP support */
+ WCN36XX_HAL_INFRA_AP_MODE,
+
+ WCN36XX_HAL_IBSS_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_STA_MODE,
+
+ /* Added for BT-AMP support */
+ WCN36XX_HAL_BTAMP_AP_MODE,
+
+ WCN36XX_HAL_AUTO_MODE,
+
+ WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+ WCN36XX_HAL_11A_NW_TYPE,
+ WCN36XX_HAL_11B_NW_TYPE,
+ WCN36XX_HAL_11G_NW_TYPE,
+ WCN36XX_HAL_11N_NW_TYPE,
+ WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX 12
+
+enum wcn36xx_hal_ht_operating_mode {
+ /* No Protection */
+ WCN36XX_HAL_HT_OP_MODE_PURE,
+
+ /* Overlap Legacy device present, protection is optional */
+ WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+ /* No legacy device, but 20 MHz HT present */
+ WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+ /* Protection is required */
+ WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+ WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+ WCN36XX_HAL_ED_NONE,
+ WCN36XX_HAL_ED_WEP40,
+ WCN36XX_HAL_ED_WEP104,
+ WCN36XX_HAL_ED_TKIP,
+ WCN36XX_HAL_ED_CCMP,
+ WCN36XX_HAL_ED_WPI,
+ WCN36XX_HAL_ED_AES_128_CMAC,
+ WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN 16
+#define WLAN_WAPI_KEY_RSC_LEN 16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH 32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+ WCN36XX_HAL_TX_ONLY,
+ WCN36XX_HAL_RX_ONLY,
+ WCN36XX_HAL_TX_RX,
+ WCN36XX_HAL_TX_DEFAULT,
+ WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+ WCN36XX_HAL_WEP_STATIC,
+ WCN36XX_HAL_WEP_DYNAMIC,
+ WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+ WCN36XX_HAL_LINK_IDLE_STATE = 0,
+ WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+ WCN36XX_HAL_LINK_AP_STATE = 3,
+ WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+ /* BT-AMP Case */
+ WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+ WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+ WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+ WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+ /* Reserved for HAL Internal Use */
+ WCN36XX_HAL_LINK_LEARN_STATE = 9,
+ WCN36XX_HAL_LINK_SCAN_STATE = 10,
+ WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+ WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+ WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+ WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+ WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+ HAL_SUMMARY_STATS_INFO = 0x00000001,
+ HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+ BTAMP_EVENT_CONNECTION_START,
+ BTAMP_EVENT_CONNECTION_STOP,
+ BTAMP_EVENT_CONNECTION_TERMINATED,
+
+ /* This and beyond are invalid values */
+ BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+ PE_SUMMARY_STATS_INFO = 0x00000001,
+ PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+ PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+ PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+ PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+ PE_PER_STA_STATS_INFO = 0x00000020,
+
+ /* This and beyond are invalid values */
+ PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID 0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA 1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA 2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE 3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN 4
+#define WCN36XX_HAL_CFG_CAL_PERIOD 5
+#define WCN36XX_HAL_CFG_CAL_CONTROL 6
+#define WCN36XX_HAL_CFG_PROXIMITY 7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY 8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME 9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU 10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD 11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT 12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT 13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD 14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO 15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE 16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO 17
+#define WCN36XX_HAL_CFG_FIXED_RATE 18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY 19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY 20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY 21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION 22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ 23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ 24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ 25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ 26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS 27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT 28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER 29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR 30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE 31
+#define WCN36XX_HAL_CFG_STATS_PERIOD 32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION 33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED 34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD 35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK 36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE 37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO 38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI 39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH 40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD 42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG 43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG 44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG 45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG 46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS 47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL 48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD 49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER 50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD 52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD 53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM 56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE 57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT 58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN 59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI 60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS 61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI 62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS 63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE 64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST 65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING 70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4 72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD 74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP 75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE 76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK 77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT 79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION 80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT 81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT 82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST 83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT 84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT 85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT 86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT 87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN 88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN 89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN 90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN 91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT 92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN 93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC 94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP 95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO 96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER 97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT 98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION 99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER 100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT 101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
+#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+ u8 revision;
+ u8 version;
+ u8 minor;
+ u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+ u8 id;
+
+ /* 0 for multicast */
+ u8 unicast;
+
+ enum ani_key_direction direction;
+
+ /* Usage is unknown */
+ u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+ /* =1 for authenticator,=0 for supplicant */
+ u8 pae_role;
+
+ u16 length;
+ u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+ /* STA Index */
+ u16 sta_index;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* STATIC/DYNAMIC - valid only for WEP */
+ enum ani_wep_type wep_type;
+
+ /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+ u8 def_wep_idx;
+
+ /* valid only for non-static WEP encyrptions */
+ struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /*
+ * Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX
+ */
+ u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+ enum wcn36xx_hal_host_msg_type msg_type:16;
+ enum wcn36xx_hal_host_msg_version msg_version:16;
+ u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+ /* Cfg Id. The Id required by HAL is exported by HAL
+ * in shared header file between UMAC and HAL.*/
+ u16 id;
+
+ /* Length of the Cfg. This parameter is used to go to next cfg
+ * in the TLV format.*/
+ u16 len;
+
+ /* Padding bytes for unaligned address's */
+ u16 pad_bytes;
+
+ /* Reserve bytes for making cfgVal to align address */
+ u16 reserve;
+
+ /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+ * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+ /* Drive Type - Production or FTM etc */
+ enum driver_type type;
+
+ /* Length of the config buffer */
+ u32 len;
+
+ /* Following this there is a TLV formatted buffer of length
+ * "len" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+ /* config buffer must start in TLV format just here */
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+ /* success or failure */
+ u16 status;
+
+ /* Max number of STA supported by the device */
+ u8 stations;
+
+ /* Max number of BSS supported by the device */
+ u8 bssids;
+
+ /* API Version */
+ struct wcnss_wlan_version version;
+
+ /* CRM build information */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+ /* hardware/chipset/misc version information */
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+ /* The reason for which the device is being stopped */
+ enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+ /*
+ * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+ * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+ */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+ u32 len;
+
+ /*
+ * Following this there is a TLV formatted buffer of length
+ * "uConfigBufferLen" bytes containing all config values.
+ * The TLV is expected to be formatted like this:
+ * 0 15 31 31+CFG_LEN-1 length-1
+ * | CFG_ID | CFG_LEN | CFG_BODY | CFG_ID |......|
+ */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+ u8 subType:4;
+ u8 type:2;
+ u8 protVer:2;
+
+ u8 order:1;
+ u8 wep:1;
+ u8 moreData:1;
+ u8 powerMgmt:1;
+ u8 retry:1;
+ u8 moreFrag:1;
+ u8 fromDS:1;
+ u8 toDS:1;
+
+#else
+
+ u8 protVer:2;
+ u8 type:2;
+ u8 subType:4;
+
+ u8 toDS:1;
+ u8 fromDS:1;
+ u8 moreFrag:1;
+ u8 retry:1;
+ u8 powerMgmt:1;
+ u8 moreData:1;
+ u8 wep:1;
+ u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+ u8 fragNum:4;
+ u8 seqNumLo:4;
+ u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+ struct wcn36xx_hal_mac_frame_ctl fc;
+ u8 durationLo;
+ u8 durationHi;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssId[6];
+ struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID 2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+ u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+ u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_len;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* LEARN - AP Role
+ SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+ /* Single NoA usage in Scanning */
+ u8 use_noa;
+
+ /* Indicates the scan duration (in ms) */
+ u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to scan */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u32 start_tsf[2];
+ u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the channel to stop scanning. Not used really. But
+ * retained for symmetry with "start Scan" message. It can also
+ * help in error check if needed. */
+ u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Identifies the operational state of the AP/STA
+ * LEARN - AP Role SCAN - STA Role */
+ enum wcn36xx_hal_sys_mode mode;
+
+ /* Operating channel to tune to. */
+ u8 oper_channel;
+
+ /* Channel Bonding state If 20/40 MHz is operational, this will
+ * indicate the 40 MHz extension channel in combination with the
+ * control channel */
+ enum phy_chan_bond_state cb_state;
+
+ /* BSSID of the BSS */
+ u8 bssid[ETH_ALEN];
+
+ /* Whether BSS needs to be notified */
+ u8 notify;
+
+ /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+ * Null, or CTS to Self). Must always be a valid frame type. */
+ u8 frame_type;
+
+ /* UMAC has the option of passing the MAC frame to be used for
+ * notifying the BSS. If non-zero, HAL will use the MAC frame
+ * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+ * appropriate MAC frame based on frameType. */
+ u8 frame_length;
+
+ /* Following the framelength there is a MAC frame buffer if
+ * frameLength is non-zero. */
+ struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+ /* Entry to hold number of active BSS idx's */
+ struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+ HW_RATE_INDEX_1MBPS = 0x82,
+ HW_RATE_INDEX_2MBPS = 0x84,
+ HW_RATE_INDEX_5_5MBPS = 0x8B,
+ HW_RATE_INDEX_6MBPS = 0x0C,
+ HW_RATE_INDEX_9MBPS = 0x12,
+ HW_RATE_INDEX_11MBPS = 0x96,
+ HW_RATE_INDEX_12MBPS = 0x18,
+ HW_RATE_INDEX_18MBPS = 0x24,
+ HW_RATE_INDEX_24MBPS = 0x30,
+ HW_RATE_INDEX_36MBPS = 0x48,
+ HW_RATE_INDEX_48MBPS = 0x60,
+ HW_RATE_INDEX_54MBPS = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+ /*
+ * For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved. */
+ /* Titan and Taurus Rates */
+ u32 enhanced_rate_bitmap;
+
+ /*
+ * 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /*
+ * RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* TODO move this parameter to the end for 3680 */
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* TODO add this parameter for 3680. */
+ /* Reserved to align next field on a dword boundary */
+ /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+ /* BSSID of STA */
+ u8 bssid[ETH_ALEN];
+
+ /* ASSOC ID, as assigned by UMAC */
+ u16 aid;
+
+ /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+ u8 type;
+
+ /* Short Preamble Supported. */
+ u8 short_preamble_supported;
+
+ /* MAC Address of STA */
+ u8 mac[ETH_ALEN];
+
+ /* Listen interval of the STA */
+ u16 listen_interval;
+
+ /* Support for 11e/WMM */
+ u8 wmm_enabled;
+
+ /* 11n HT capable STA */
+ u8 ht_capable;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* RIFS mode 0 - NA, 1 - Allowed */
+ u8 rifs_mode;
+
+ /* L-SIG TXOP Protection mechanism
+ 0 - No Support, 1 - Supported
+ SG - there is global field */
+ u8 lsig_txop_protection;
+
+ /* Max Ampdu Size supported by STA. TPE programming.
+ 0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+ u8 max_ampdu_size;
+
+ /* Max Ampdu density. Used by RA. 3 : 0~7 : 2^(11nAMPDUdensity -4) */
+ u8 max_ampdu_density;
+
+ /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+ u8 max_amsdu_size;
+
+ /* Short GI support for 40Mhz packets */
+ u8 sgi_40mhz;
+
+ /* Short GI support for 20Mhz packets */
+ u8 sgi_20Mhz;
+
+ /* Robust Management Frame (RMF) enabled/disabled */
+ u8 rmf;
+
+ /* The unicast encryption type in the association */
+ u32 encrypt_type;
+
+ /* HAL should update the existing STA entry, if this flag is set. UMAC
+ will set this flag in case of RE-ASSOC, where we want to reuse the
+ old STA ID. 0 = Add, 1 = Update */
+ u8 action;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* Max SP Length */
+ u8 max_sp_len;
+
+ /* 11n Green Field preamble support
+ 0 - Not supported, 1 - Supported */
+ u8 green_field_capable;
+
+ /* MIMO Power Save mode */
+ enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+ /* Delayed BA Support */
+ u8 delayed_ba_support;
+
+ /* Max AMPDU duration in 32us */
+ u8 max_ampdu_duration;
+
+ /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+ * set it to 0 if AP does not support it. This indication is sent
+ * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+ * rates. */
+ u8 dsss_cck_mode_40mhz;
+
+ /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+ * Retained for backward compalibity with existing HAL code */
+ u8 sta_index;
+
+ /* BSSID of BSS to which station is associated. Set to 0xFF when
+ * invalid. Retained for backward compalibity with existing HAL
+ * code */
+ u8 bssid_index;
+
+ u8 p2p;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* These rates are the intersection of peer and self capabilities. */
+ struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+ /* success or failure */
+ u32 status;
+
+ /* Station index; valid only when 'status' field value SUCCESS */
+ u8 sta_index;
+
+ /* BSSID Index of BSS to which the station is associated */
+ u8 bssid_index;
+
+ /* DPU Index for PTK */
+ u8 dpu_index;
+
+ /* DPU Index for GTK */
+ u8 bcast_dpu_index;
+
+ /* DPU Index for IGTK */
+ u8 bcast_mgmt_dpu_idx;
+
+ /* PTK DPU signature */
+ u8 uc_ucast_sig;
+
+ /* GTK DPU isignature */
+ u8 uc_bcast_sig;
+
+ /* IGTK DPU signature */
+ u8 uc_mgmt_sig;
+
+ u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Index of STA to delete */
+ u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Index of STA deleted */
+ u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+ u8 num_rates;
+ u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:1;
+ u8 aci:2;
+ u8 acm:1;
+ u8 aifsn:4;
+#else
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 max:4;
+ u8 min:4;
+#else
+ u8 min:4;
+ u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+ struct wcn36xx_hal_aci_aifsn aci;
+ struct wcn36xx_hal_mac_cw cw;
+ u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ * in the software system. */
+enum wcn36xx_hal_con_mode {
+ WCN36XX_HAL_STA_MODE = 0,
+
+ /* to support softAp mode . This is misleading.
+ It means AP MODE only. */
+ WCN36XX_HAL_STA_SAP_MODE = 1,
+
+ WCN36XX_HAL_P2P_CLIENT_MODE,
+ WCN36XX_HAL_P2P_GO_MODE,
+ WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+ HAL_STA = 1,
+ HAL_SAP = 2,
+
+ /* to support sta, softAp mode . This means STA+AP mode */
+ HAL_STA_SAP = 3,
+
+ HAL_P2P_CLIENT = 4,
+ HAL_P2P_GO = 8,
+ HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* TODO move sta to the end for 3680 */
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params sta;
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* Self Mac Address */
+ u8 self_mac_addr[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Operational Mode: AP =0, STA = 1 */
+ u8 oper_mode;
+
+ /* Network Type */
+ enum wcn36xx_hal_nw_type nw_type;
+
+ /* Used to classify PURE_11G/11G_MIXED to program MTU */
+ u8 short_slot_time_supported;
+
+ /* Co-exist with 11a STA */
+ u8 lla_coexist;
+
+ /* Co-exist with 11b STA */
+ u8 llb_coexist;
+
+ /* Co-exist with 11g STA */
+ u8 llg_coexist;
+
+ /* Coexistence with 11n STA */
+ u8 ht20_coexist;
+
+ /* Non GF coexist flag */
+ u8 lln_non_gf_coexist;
+
+ /* TXOP protection support */
+ u8 lsig_tx_op_protection_full_support;
+
+ /* RIFS mode */
+ u8 rifs_mode;
+
+ /* Beacon Interval in TU */
+ u16 beacon_interval;
+
+ /* DTIM period */
+ u8 dtim_period;
+
+ /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+ u8 tx_channel_width_set;
+
+ /* Operating channel */
+ u8 oper_channel;
+
+ /* Extension channel for channel bonding */
+ u8 ext_channel;
+
+ /* Reserved to align next field on a dword boundary */
+ u8 reserved;
+
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* HAL should update the existing BSS entry, if this flag is set.
+ * UMAC will set this flag in case of reassoc, where we want to
+ * resue the the old BSSID and still return success 0 = Add, 1 =
+ * Update */
+ u8 action;
+
+ /* MAC Rate Set */
+ struct wcn36xx_hal_rate_set rateset;
+
+ /* Enable/Disable HT capabilities of the BSS */
+ u8 ht;
+
+ /* Enable/Disable OBSS protection */
+ u8 obss_prot_enabled;
+
+ /* RMF enabled/disabled */
+ u8 rmf;
+
+ /* HT Operating Mode operating mode of the 802.11n STA */
+ enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+ /* Dual CTS Protection: 0 - Unused, 1 - Used */
+ u8 dual_cts_protection;
+
+ /* Probe Response Max retries */
+ u8 max_probe_resp_retry_limit;
+
+ /* To Enable Hidden ssid */
+ u8 hidden_ssid;
+
+ /* To Enable Disable FW Proxy Probe Resp */
+ u8 proxy_probe_resp;
+
+ /* Boolean to indicate if EDCA params are valid. UMAC might not
+ * have valid EDCA params or might not desire to apply EDCA params
+ * during config BSS. 0 implies Not Valid ; Non-Zero implies
+ * valid */
+ u8 edca_params_valid;
+
+ /* EDCA Parameters for Best Effort Access Category */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* EDCA Parameters forBackground Access Category */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* EDCA Parameters for Video Access Category */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* EDCA Parameters for Voice Access Category */
+ struct wcn36xx_hal_edca_param_record acvo;
+
+ /* Ext Bss Config Msg if set */
+ u8 ext_set_sta_key_param_valid;
+
+ /* SetStaKeyParams for ext bss msg */
+ struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+ /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+ * wcn36xx_hal_con_mode */
+ u8 wcn36xx_hal_persona;
+
+ u8 spectrum_mgt_enable;
+
+ /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+ s8 tx_mgmt_power;
+
+ /* maxTxPower has max power to be used after applying the power
+ * constraint if any */
+ s8 max_tx_power;
+
+ /* Context of the station being added in HW
+ * Add a STA entry for "itself" -
+ *
+ * On AP - Add the AP itself in an "STA context"
+ *
+ * On STA - Add the AP to which this STA is joining in an
+ * "STA context"
+ */
+ struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index allocated by HAL */
+ u8 bss_index;
+
+ /* DPU descriptor index for PTK */
+ u8 dpu_desc_index;
+
+ /* PTK DPU signature */
+ u8 ucast_dpu_signature;
+
+ /* DPU descriptor index for GTK */
+ u8 bcast_dpu_desc_indx;
+
+ /* GTK DPU signature */
+ u8 bcast_dpu_signature;
+
+ /* DPU descriptor for IGTK */
+ u8 mgmt_dpu_desc_index;
+
+ /* IGTK DPU signature */
+ u8 mgmt_dpu_signature;
+
+ /* Station Index for BSS entry */
+ u8 bss_sta_index;
+
+ /* Self station index for this BSS */
+ u8 bss_self_sta_index;
+
+ /* Bcast station for buffering bcast frames in AP role */
+ u8 bss_bcast_sta_idx;
+
+ /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+ u8 mac[ETH_ALEN];
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS index to be deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* BSS index that has been deleted */
+ u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Indicates the BSSID to which STA is going to associate */
+ u8 bssid[ETH_ALEN];
+
+ /* Indicates the channel to switch to. */
+ u8 channel;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* link State */
+ enum wcn36xx_hal_link_state link_state;
+
+ /* Max TX power */
+ s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ struct wcn36xx_hal_config_sta_params sta_params;
+ struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct config_sta_rsp_params sta_rsp_params;
+ struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Number of keys */
+ u8 num_keys;
+
+ /* Array of keys. */
+ struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+ /* Control for Replay Count, 1= Single TID based replay count on Tx
+ * 0 = Per TID based replay count on TX */
+ u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+ struct wcn36xx_hal_set_bss_key_req_msg Msg;
+ u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSS Index of the BSS */
+ u8 bss_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+ * Static/Dynamic keys */
+ enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA Index */
+ u16 sta_idx;
+
+ /* Encryption Type used with peer */
+ enum ani_ed_type enc_type;
+
+ /* Key Id */
+ u8 key_id;
+
+ /* Whether to invalidate the Broadcast key or Unicast key. In case
+ * of WEP, the same key is used for both broadcast and unicast. */
+ u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ tSirMacAddr self_mac_addr;
+ u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Channel number */
+ u8 channel_number;
+
+ /* Local power constraint */
+ u8 local_power_constraint;
+
+ /* Secondary channel offset */
+ enum phy_chan_bond_state secondary_channel_offset;
+
+ /* HAL fills in the tx power used for mgmt frames in this field. */
+ u8 tx_mgmt_power;
+
+ /* Max TX power */
+ u8 max_tx_power;
+
+ /* Self STA MAC */
+ u8 self_sta_mac_addr[ETH_ALEN];
+
+ /* VO WIFI comment: BSSID needed to identify session. As the
+ * request has power constraints, this should be applied only to
+ * that session Since MTU timing and EDCA are sessionized, this
+ * struct needs to be sessionized and bssid needs to be out of the
+ * VOWifi feature flag V IMP: Keep bssId field at the end of this
+ * msg. It is used to mantain backward compatbility by way of
+ * ignoring if using new host/old FW or old host/new FW since it is
+ * at the end of this struct
+ */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Status */
+ u32 status;
+
+ /* Channel number - same as in request */
+ u8 channel_number;
+
+ /* HAL fills in the tx power used for mgmt frames in this field */
+ u8 tx_mgmt_power;
+
+ /* BSSID needed to identify session - same as in request */
+ u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*BSS Index */
+ u16 bss_index;
+
+ /* Best Effort */
+ struct wcn36xx_hal_edca_param_record acbe;
+
+ /* Background */
+ struct wcn36xx_hal_edca_param_record acbk;
+
+ /* Video */
+ struct wcn36xx_hal_edca_param_record acvi;
+
+ /* Voice */
+ struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct dpu_stats_params {
+ /* Index of STA to which the statistics */
+ u16 sta_index;
+
+ /* Encryption mode */
+ u8 enc_mode;
+
+ /* status */
+ u32 status;
+
+ /* Statistics */
+ u32 send_blocks;
+ u32 recv_blocks;
+ u32 replays;
+ u8 mic_error_cnt;
+ u32 prot_excl_cnt;
+ u16 format_err_cnt;
+ u16 un_decryptable_cnt;
+ u32 decrypt_err_cnt;
+ u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+
+ /* Categories of stats requested as specified in eHalStatsMask */
+ u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+ /* Total number of packets(per AC) that were successfully
+ * transmitted with retries */
+ u32 retry_cnt[4];
+
+ /* The number of MSDU packets and MMPDU frames per AC that the
+ * 802.11 station successfully transmitted after more than one
+ * retransmission attempt */
+ u32 multiple_retry_cnt[4];
+
+ /* Total number of packets(per AC) that were successfully
+ * transmitted (with and without retries, including multi-cast,
+ * broadcast) */
+ u32 tx_frm_cnt[4];
+
+ /* Total number of packets that were successfully received (after
+ * appropriate filter rules including multi-cast, broadcast) */
+ u32 rx_frm_cnt;
+
+ /* Total number of duplicate frames received successfully */
+ u32 frm_dup_cnt;
+
+ /* Total number packets(per AC) failed to transmit */
+ u32 fail_cnt[4];
+
+ /* Total number of RTS/CTS sequence failures for transmission of a
+ * packet */
+ u32 rts_fail_cnt;
+
+ /* Total number packets failed transmit because of no ACK from the
+ * remote entity */
+ u32 ack_fail_cnt;
+
+ /* Total number of RTS/CTS sequence success for transmission of a
+ * packet */
+ u32 rts_succ_cnt;
+
+ /* The sum of the receive error count and dropped-receive-buffer
+ * error count. HAL will provide this as a sum of (FCS error) +
+ * (Fail get BD/PDU in HW) */
+ u32 rx_discard_cnt;
+
+ /*
+ * The receive error count. HAL will provide the RxP FCS error
+ * global counter. */
+ u32 rx_error_cnt;
+
+ /* The sum of the transmit-directed byte count, transmit-multicast
+ * byte count and transmit-broadcast byte count. HAL will sum TPE
+ * UC/MC/BCAST global counters to provide this. */
+ u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+ /* Legacy rates */
+ HAL_TX_RATE_LEGACY = 0x1,
+
+ /* HT20 rates */
+ HAL_TX_RATE_HT20 = 0x2,
+
+ /* HT40 rates */
+ HAL_TX_RATE_HT40 = 0x4,
+
+ /* Rate with Short guard interval */
+ HAL_TX_RATE_SGI = 0x8,
+
+ /* Rate with Long guard interval */
+ HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames */
+ u32 rx_frag_cnt;
+
+ /* The number of MPDU frames received by the 802.11 station for
+ * MSDU packets or MMPDU frames when a promiscuous packet filter
+ * was enabled */
+ u32 promiscuous_rx_frag_cnt;
+
+ /* The receiver input sensitivity referenced to a FER of 8% at an
+ * MPDU length of 1024 bytes at the antenna connector. Each element
+ * of the array shall correspond to a supported rate and the order
+ * shall be the same as the supporteRates parameter. */
+ u32 rx_input_sensitivity;
+
+ /* The maximum transmit power in dBm upto one decimal. for eg: if
+ * it is 10.5dBm, the value would be 105 */
+ u32 max_pwr;
+
+ /* Number of times the receiver failed to synchronize with the
+ * incoming signal after detecting the sync in the preamble of the
+ * transmitted PLCP protocol data unit. */
+ u32 sync_fail_cnt;
+
+ /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+ * recently transmitted frame */
+ u32 tx_rate;
+
+ /* mcs index for HT20 and HT40 rates */
+ u32 mcs_index;
+
+ /* to differentiate between HT20 and HT40 rates; short and long
+ * guard interval */
+ u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+ /* The number of unencrypted received MPDU frames that the MAC
+ * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+ * management information base (MIB) object is enabled */
+ u32 rx_wep_unencrypted_frm_cnt;
+
+ /* The number of received MSDU packets that that the 802.11 station
+ * discarded because of MIC failures */
+ u32 rx_mic_fail_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a TKIP ICV error */
+ u32 tkip_icv_err;
+
+ /* The number of received MPDU frames that the 802.11 discarded
+ * because of an invalid AES-CCMP format */
+ u32 aes_ccmp_format_err;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of the AES-CCMP replay protection procedure */
+ u32 aes_ccmp_replay_cnt;
+
+ /* The number of received MPDU frames that the 802.11 station
+ * discarded because of errors detected by the AES-CCMP decryption
+ * algorithm */
+ u32 aes_ccmp_decrpt_err;
+
+ /* The number of encrypted MPDU frames received for which a WEP
+ * decryption key was not available on the 802.11 station */
+ u32 wep_undecryptable_cnt;
+
+ /* The number of encrypted MPDU frames that the 802.11 station
+ * failed to decrypt because of a WEP ICV error */
+ u32 wep_icv_err;
+
+ /* The number of received encrypted packets that the 802.11 station
+ * successfully decrypted */
+ u32 rx_decrypt_succ_cnt;
+
+ /* The number of encrypted packets that the 802.11 station failed
+ * to decrypt */
+ u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+ struct ani_global_security_stats uc_stats;
+ struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+ /* This counter shall be incremented for a received A-MSDU frame
+ * with the stations MAC address in the address 1 field or an
+ * A-MSDU frame with a group address in the address 1 field */
+ u32 rx_amsdu_cnt;
+
+ /* This counter shall be incremented when the MAC receives an AMPDU
+ * from the PHY */
+ u32 rx_ampdu_cnt;
+
+ /* This counter shall be incremented when a Frame is transmitted
+ * only on the primary channel */
+ u32 tx_20_frm_cnt;
+
+ /* This counter shall be incremented when a Frame is received only
+ * on the primary channel */
+ u32 rx_20_frm_cnt;
+
+ /* This counter shall be incremented by the number of MPDUs
+ * received in the A-MPDU when an A-MPDU is received */
+ u32 rx_mpdu_in_ampdu_cnt;
+
+ /* This counter shall be incremented when an MPDU delimiter has a
+ * CRC error when this is the first CRC error in the received AMPDU
+ * or when the previous delimiter has been decoded correctly */
+ u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+ /* The number of MPDU frames that the 802.11 station transmitted
+ * and acknowledged through a received 802.11 ACK frame */
+ u32 tx_frag_cnt[4];
+
+ /* This counter shall be incremented when an A-MPDU is transmitted */
+ u32 tx_ampdu_cnt;
+
+ /* This counter shall increment by the number of MPDUs in the AMPDU
+ * when an A-MPDU is transmitted */
+ u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ /* STA Idx */
+ u32 sta_index;
+
+ /* Categories of STATS being returned as per eHalStatsMask */
+ u32 stats_mask;
+
+ /* message type is same as the request type */
+ u16 msg_type;
+
+ /* length of the entire request, includes the pStatsBuf length too */
+ u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ enum wcn36xx_hal_link_state state;
+ u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u16 ackPolicy:2;
+ u16 userPrio:3;
+ u16 psb:1;
+ u16 aggregation:1;
+ u16 accessPolicy:2;
+ u16 direction:2;
+ u16 tsid:4;
+ u16 trafficType:1;
+#else
+ u16 trafficType:1;
+ u16 tsid:4;
+ u16 direction:2;
+ u16 accessPolicy:2;
+ u16 aggregation:1;
+ u16 psb:1;
+ u16 userPrio:3;
+ u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+ u8 rsvd:7;
+ u8 schedule:1;
+#else
+ u8 schedule:1;
+ u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+ struct wcn36xx_hal_ts_info_tfc traffic;
+ struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+ u8 type;
+ u8 length;
+ struct wcn36xx_hal_ts_info ts_info;
+ u16 nom_msdu_size;
+ u16 max_msdu_size;
+ u32 min_svc_interval;
+ u32 max_svc_interval;
+ u32 inact_interval;
+ u32 suspend_interval;
+ u32 svc_start_time;
+ u32 min_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 max_burst_sz;
+ u32 delay_bound;
+ u32 min_phy_rate;
+ u16 surplus_bw;
+ u16 medium_time;
+};
+
+struct add_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec;
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct del_ts_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+ u16 tspec_index;
+
+ /* To lookup station id using the mac address */
+ u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* Peer MAC Address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* ADDBA Action Frame dialog token
+ HAL will not interpret this object */
+ u8 dialog_token;
+
+ /* TID for which the BA is being setup
+ This identifies the TC or TS of interest */
+ u8 tid;
+
+ /* 0 - Delayed BA (Not supported)
+ 1 - Immediate BA */
+ u8 policy;
+
+ /* Indicates the number of buffers for this TID (baTID)
+ NOTE - This is the requested buffer size. When this
+ is processed by HAL and subsequently by HDD, it is
+ possible that HDD may change this buffer size. Any
+ change in the buffer size should be noted by PE and
+ advertized appropriately in the ADDBA response */
+ u16 buffer_size;
+
+ /* BA timeout in TU's 0 means no timeout will occur */
+ u16 timeout;
+
+ /* b0..b3 - Fragment Number - Always set to 0
+ b4..b15 - Starting Sequence Number of first MSDU
+ for which this BA is setup */
+ u16 ssn;
+
+ /* ADDBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+
+ /* TID for which the BA session has been setup */
+ u8 ba_tid;
+
+ /* BA Buffer Size allocated for the current BA session */
+ u8 ba_buffer_size;
+
+ u8 ba_session_id;
+
+ /* Reordering Window buffer */
+ u8 win_size;
+
+ /* Station Index to id the sta */
+ u8 sta_index;
+
+ /* Starting Sequence Number */
+ u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* Reorder Window Size */
+ u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+ u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Dialog token */
+ u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+ u16 ba_enable:1;
+ u16 starting_seq_num:12;
+ u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+ u8 sta_addr[ETH_ALEN];
+ struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_candidate {
+ u8 sta_index;
+ u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Session Id */
+ u8 session_id;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Candidate List(tTriggerBaCandidate)
+ */
+ u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+
+ /* baCandidateCnt is followed by trigger BA
+ * Rsp Candidate List(tTriggerRspBaCandidate)
+ */
+ u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_index;
+
+ /* TID for which the BA session is being deleted */
+ u8 tid;
+
+ /* DELBA direction
+ 1 - Originator
+ 0 - Recipient */
+ u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Traffic Id */
+ u8 tid;
+
+ u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ /* Uplink Packet Queue delay */
+ u16 uplink_pkt_queue_delay;
+
+ /* Uplink Packet Queue delay histogram */
+ u16 uplink_pkt_queue_delay_hist[4];
+
+ /* Uplink Packet Transmit delay */
+ u32 uplink_pkt_tx_delay;
+
+ /* Uplink Packet loss */
+ u16 uplink_pkt_loss;
+
+ /* Uplink Packet count */
+ u16 uplink_pkt_count;
+
+ /* Roaming count */
+ u8 roaming_count;
+
+ /* Roaming Delay */
+ u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*bssid of the keys */
+ u8 bssidx;
+ u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+ /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+ * messages should be
+ * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+ * nv_img_buffer_size */
+ struct wcn36xx_hal_msg_header header;
+
+ /* Fragment sequence number of the NV Image. Note that NV Image
+ * might not fit into one message due to size limitation of the SMD
+ * channel FIFO. UMAC can hence choose to chop the NV blob into
+ * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+ * The last fragment MUST be indicated by marking the
+ * isLastFragment field to 1. Note that all the NV blobs would be
+ * concatenated together by HAL without any padding bytes in
+ * between.*/
+ u16 frag_number;
+
+ /* Is this the last fragment? When set to 1 it indicates that no
+ * more fragments will be sent by UMAC and HAL can concatenate all
+ * the NV blobs rcvd & proceed with the parsing. HAL would generate
+ * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+ * after it receives each fragment */
+ u16 last_fragment;
+
+ /* NV Image size (number of bytes) */
+ u32 nv_img_buffer_size;
+
+ /* Following the 'nv_img_buffer_size', there should be
+ * nv_img_buffer_size bytes of NV Image i.e.
+ * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure. HAL would generate a
+ * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+ /* Note: The length specified in tHalNvStoreInd messages should be
+ * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+ struct wcn36xx_hal_msg_header header;
+
+ /* NV Item */
+ u32 table_id;
+
+ /* Size of NV Blob */
+ u32 nv_blob_size;
+
+ /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+ * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+
+ /* address used to compute MIC */
+ u8 src_addr[ETH_ALEN];
+
+ /* transmitter address */
+ u8 ta_addr[ETH_ALEN];
+
+ u8 dst_addr[ETH_ALEN];
+
+ u8 multicast;
+
+ /* first byte of IV */
+ u8 iv1;
+
+ /* second byte of IV */
+ u8 key_id;
+
+ /* sequence number */
+ u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+ /* receive address */
+ u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 op_mode;
+ u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+};
+
+struct update_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* shortPreamble mode. HAL should update all the STA rates when it
+ * receives this message */
+ u8 short_preamble;
+
+ /* short Slot time. */
+ u8 short_slot_time;
+
+ /* Beacon Interval */
+ u16 beacon_interval;
+
+ /* Protection related */
+ u8 lla_coexist;
+ u8 llb_coexist;
+ u8 llg_coexist;
+ u8 ht20_coexist;
+ u8 lln_non_gf_coexist;
+ u8 lsig_tx_op_protection_full_support;
+ u8 rifs_mode;
+
+ u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* length of the template. */
+ u32 beacon_length;
+
+ /* Beacon data. */
+ u8 beacon[BEACON_TEMPLATE_SIZE];
+
+ u8 bssid[ETH_ALEN];
+
+ /* TIM IE offset from the beginning of the template. */
+ u32 tim_ie_offset;
+
+ /* P2P IE offset from the begining of the template */
+ u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Link Parameters */
+ u8 bssid[ETH_ALEN];
+
+ /* success or failure */
+ u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* channel number in which the RADAR detected */
+ u8 channel_number;
+
+ /* RADAR pulse width in usecond */
+ u16 radar_pulse_width;
+
+ /* Number of RADAR pulses */
+ u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta[ETH_ALEN];
+ u8 dialog_token;
+ u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+ u32 probe_resp_template_len;
+ u32 proxy_probe_req_valid_ie_bmap[8];
+ u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 aid;
+ u16 sta_id;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+
+ /* HAL copies bssid from the sta table. */
+ u8 addr2[ETH_ALEN];
+
+ /* To unify the keepalive / unknown A2 / tim-based disa */
+ u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+ struct wcn36xx_hal_msg_header header;
+ u8 aid;
+ u8 sta_index;
+ u8 bss_index;
+ u8 reason_code;
+ u32 status;
+};
+
+struct bt_amp_event_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index. originates from HAL */
+ u8 sta_id;
+
+ /* TID for which the transmit queue is being flushed */
+ u8 tid;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+
+ /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+ u64 tbtt;
+#endif
+ u8 dtim_count;
+
+ /* DTIM period given to HAL during association may not be valid, if
+ * association is based on ProbeRsp instead of beacon. */
+ u8 dtim_period;
+
+ /* For CCX and 11R Roaming */
+ u32 rssi_filter_period;
+
+ u32 num_beacon_per_rssi_average;
+ u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 send_data_null;
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+ u8 element_id;
+ u8 check_ie_presence;
+ u8 offset;
+ u8 value;
+ u8 bitmask;
+ u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 capability_info;
+ u16 capability_mask;
+ u16 beacon_interval;
+ u16 ie_num;
+ u8 bss_index;
+ u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 ie_Count;
+ u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD 2
+#define WCN36XX_HAL_IPV6_ADDR_LEN 16
+#define WCN36XX_HAL_OFFLOAD_DISABLE 0
+#define WCN36XX_HAL_OFFLOAD_ENABLE 1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
+ (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+ u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ /* Only support 2 possible Network Advertisement IPv6 address */
+ u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+ u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+ u8 self_addr[ETH_ALEN];
+ u8 src_ipv6_addr_valid:1;
+ u8 target_ipv6_addr1_valid:1;
+ u8 target_ipv6_addr2_valid:1;
+ u8 reserved1:5;
+
+ /* make it DWORD aligned */
+ u8 reserved2;
+
+ /* slot index for this offload */
+ u32 slot_index;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+ u8 offload_Type;
+
+ /* enable or disable */
+ u8 enable;
+
+ union {
+ u8 host_ipv4_addr[4];
+ u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+ } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_host_offload_req host_offload_params;
+ struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP 2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE 0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE 1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD 30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 packet_type;
+ u32 time_period;
+ u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+ u8 dest_addr[ETH_ALEN];
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ s8 threshold1:8;
+ s8 threshold2:8;
+ s8 threshold3:8;
+ u8 thres1_pos_notify:1;
+ u8 thres1_neg_notify:1;
+ u8 thres2_pos_notify:1;
+ u8 thres2_neg_notify:1;
+ u8 thres3_pos_notify:1;
+ u8 thres3_neg_notify:1;
+ u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bk_delivery:1;
+ u8 be_delivery:1;
+ u8 vi_delivery:1;
+ u8 vo_delivery:1;
+ u8 bk_trigger:1;
+ u8 be_trigger:1;
+ u8 vi_trigger:1;
+ u8 vo_trigger:1;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID */
+ u8 id;
+
+ /* Pattern byte offset from beginning of the 802.11 packet to start
+ * of the wake-up pattern */
+ u8 byte_Offset;
+
+ /* Non-Zero Pattern size */
+ u8 size;
+
+ /* Pattern */
+ u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Non-zero pattern mask size */
+ u8 mask_size;
+
+ /* Pattern mask */
+ u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern */
+ u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ /* Extra pattern mask */
+ u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Pattern ID of the wakeup pattern to be deleted */
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enables/disables magic packet filtering */
+ u8 magic_packet_enable;
+
+ /* Magic pattern */
+ u8 magic_pattern[ETH_ALEN];
+
+ /* Enables/disables packet pattern filtering in firmware. Enabling
+ * this flag enables broadcast pattern matching in Firmware. If
+ * unicast pattern matching is also desired,
+ * ucUcastPatternFilteringEnable flag must be set tot true as well
+ */
+ u8 pattern_filtering_enable;
+
+ /* Enables/disables unicast packet pattern filtering. This flag
+ * specifies whether we want to do pattern match on unicast packets
+ * as well and not just broadcast packets. This flag has no effect
+ * if the ucPatternFilteringEnable (main controlling flag) is set
+ * to false
+ */
+ u8 ucast_pattern_filtering_enable;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Channel Switch
+ * Action Frame.
+ */
+ u8 wow_channel_switch_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the
+ * Deauthentication Frame.
+ */
+ u8 wow_deauth_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it receives the Disassociation
+ * Frame.
+ */
+ u8 wow_disassoc_receive;
+
+ /* This configuration is valid only when magicPktEnable=1. It
+ * requests hardware to wake up when it has missed consecutive
+ * beacons. This is a hardware register configuration (NOT a
+ * firmware configuration).
+ */
+ u8 wow_max_missed_beacons;
+
+ /* This configuration is valid only when magicPktEnable=1. This is
+ * a timeout value in units of microsec. It requests hardware to
+ * unconditionally wake up after it has stayed in WoWLAN mode for
+ * some time. Set 0 to disable this feature.
+ */
+ u8 wow_max_sleep;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAP-ID requests embedded in EAPOL frames and use this as a wake
+ * source.
+ */
+ u8 wow_eap_id_request_enable;
+
+ /* This configuration directs the WoW packet filtering to look for
+ * EAPOL-4WAY requests and use this as a wake source.
+ */
+ u8 wow_eapol_4way_enable;
+
+ /* This configuration allows a host wakeup on an network scan
+ * offload match.
+ */
+ u8 wow_net_scan_offload_match;
+
+ /* This configuration allows a host wakeup on any GTK rekeying
+ * error.
+ */
+ u8 wow_gtk_rekey_error;
+
+ /* This configuration allows a host wakeup on BSS connection loss.
+ */
+ u8 wow_bss_connection_loss;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Valid STA Idx for per STA stats request */
+ u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* STA index */
+ u8 sta_idx;
+
+ /* Access Category */
+ u8 ac;
+
+ /* User Priority */
+ u8 up;
+
+ /* Service Interval */
+ u32 service_interval;
+
+ /* Suspend Interval */
+ u32 suspend_interval;
+
+ /* Delay Interval */
+ u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 set_mcst_bcst_filter_setting;
+ u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 rssi_thres1_pos_cross:1;
+ u32 rssi_thres1_neg_cross:1;
+ u32 rssi_thres2_pos_cross:1;
+ u32 rssi_thres2_neg_cross:1;
+ u32 rssi_thres3_pos_cross:1;
+ u32 rssi_thres3_neg_cross:1;
+ u32 avg_rssi:8;
+ u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 sta_id;
+ s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_max_tx_pwr_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSID is needed to identify which session issued this request.
+ * As the request has power constraints, this should be applied
+ * only to that session */
+ u8 bssid[ETH_ALEN];
+
+ u8 self_addr[ETH_ALEN];
+
+ /* In request, power == MaxTx power to be used. */
+ u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* power == tx power used for management frames */
+ u8 power;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+
+ u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* TX Power in milli watts */
+ u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 opp_ps;
+ u32 ct_window;
+ u8 count;
+ u32 duration;
+ u32 interval;
+ u32 single_noa_duration;
+ u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Self STA Index */
+ u8 self_sta_index;
+
+ /* DPU Index (IGTK, PTK, GTK all same) */
+ u8 dpu_index;
+
+ /* DPU Signature */
+ u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /*success or failure */
+ u32 status;
+
+ u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Station Index */
+ u16 sta_idx;
+
+ /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+ * This will carry the bitmap with the bit positions representing
+ * different AC.s */
+ u16 tspec_index;
+
+ /* Tspec info per AC To program TPE with required parameters */
+ struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+ /* U-APSD Flags: 1b per AC. Encoded as follows:
+ b7 b6 b5 b4 b3 b2 b1 b0 =
+ X X X X BE BK VI VO */
+ u8 uapsd;
+
+ /* These parameters are for all the access categories */
+
+ /* Service Interval */
+ u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Suspend Interval */
+ u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+ /* Delay Interval */
+ u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status0;
+
+ /* FIXME PRIMA for future use for 11R */
+ u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+ u32 arg4;
+ u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* Length of the responce message */
+ u32 rsp_length;
+
+ /* FIXME: Currently considering the the responce will be less than
+ * 100bytes */
+ u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Coex Indication Type */
+ u32 type;
+
+ /* Coex Indication Data */
+ u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Tx Complete Indication Success or Failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 configured_mcst_bcst_filter_setting;
+ u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 dot11_exclude_unencrypted;
+ u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 index;
+ u8 opp_ps_flag;
+ u16 ctwin;
+
+ u16 noa1_interval_count;
+ u16 bss_index;
+ u32 noa1_duration;
+ u32 noa1_interval;
+ u32 noa1_starttime;
+
+ u16 noa2_interval_count;
+ u16 reserved2;
+ u32 noa2_duration;
+ u32 noa2_interval;
+ u32 noa2_start_time;
+
+ u32 status;
+};
+
+struct noa_start_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 status;
+ u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u16 sta_idx;
+
+ /* Peer MAC Address, whose BA session has timed out */
+ u8 peer_addr[ETH_ALEN];
+
+ /* TID for which a BA session timeout is being triggered */
+ u8 ba_tid;
+
+ /* DELBA direction
+ * 1 - Originator
+ * 0 - Recipient
+ */
+ u8 direction;
+
+ u32 reason_code;
+
+ /* TO SUPPORT BT-AMP */
+ u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS 26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX 60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS 16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS 10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE 450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+ PNO_MODE_IMMEDIATE,
+ PNO_MODE_ON_SUSPEND,
+ PNO_MODE_ON_RESUME,
+ PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+ AUTH_TYPE_ANY = 0,
+ AUTH_TYPE_OPEN_SYSTEM = 1,
+
+ /* Upper layer authentication types */
+ AUTH_TYPE_WPA = 2,
+ AUTH_TYPE_WPA_PSK = 3,
+
+ AUTH_TYPE_RSN = 4,
+ AUTH_TYPE_RSN_PSK = 5,
+ AUTH_TYPE_FT_RSN = 6,
+ AUTH_TYPE_FT_RSN_PSK = 7,
+ AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+ AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+ AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+ ED_ANY = 0,
+ ED_NONE = 1,
+ ED_WEP = 2,
+ ED_TKIP = 3,
+ ED_CCMP = 4,
+ ED_WPI = 5,
+
+ ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast type */
+enum ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+
+ BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+struct scan_timer {
+ /* How much it should wait */
+ u32 value;
+
+ /* How many times it should repeat that wait value 0 - keep using
+ * this timer until PNO is disabled */
+ u32 repeat;
+
+ /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+ * times - after that it will wait 4s between consecutive scans
+ * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+ /* set to 0 if you wish for PNO to use its default telescopic timer */
+ u8 count;
+
+ /* A set value represents the amount of time that PNO will wait
+ * between two consecutive scan procedures If the desired is for a
+ * uniform timer that fires always at the exact same interval - one
+ * single value is to be set If there is a desire for a more
+ * complex - telescopic like timer multiple values can be set -
+ * once PNO reaches the end of the array it will continue scanning
+ * at intervals presented by the last value */
+ struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+ /* SSID of the BSS */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Authentication type for the network */
+ enum auth_type authentication;
+
+ /* Encryption type for the network */
+ enum ed_type encryption;
+
+ /* SSID broadcast type, normal, hidden or unknown */
+ enum ssid_bcast_type bcast_network_type;
+
+ /* Indicate the channel on which the Network can be found 0 - if
+ * all channels */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Indicates the RSSI threshold for the network to be considered */
+ u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Enable PNO */
+ u32 enable;
+
+ /* Immediate, On Suspend, On Resume */
+ enum pno_mode mode;
+
+ /* Number of networks sent for PNO */
+ u32 networks_count;
+
+ /* The networks that PNO needs to look for */
+ struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+ /* The scan timers required for PNO */
+ struct scan_timers_type scan_timers;
+
+ /* Probe template for 2.4GHz band */
+ u16 band_24g_probe_size;
+ u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+ /* Probe template for 5GHz band */
+ u16 band_5g_probe_size;
+ u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request - just to indicate that PNO has
+ * acknowledged the request and will start scanning */
+ u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Network that was found with the highest RSSI */
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Indicates the RSSI */
+ u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+ struct wcn36xx_hal_msg_header header;
+
+ /* RSSI Threshold */
+ u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* Host setting for 11d */
+ u8 dot11d_enabled;
+
+ /* Lets PNO know that host has determined the regulatory domain */
+ u8 dot11d_resolved;
+
+ /* Channels on which PNO is allowed to scan */
+ u8 channel_count;
+ u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+ /* Minimum channel time */
+ u16 active_min_ch_time;
+
+ /* Maximum channel time */
+ u16 active_max_ch_time;
+
+ /* Minimum channel time */
+ u16 passive_min_ch_time;
+
+ /* Maximum channel time */
+ u16 passive_max_ch_time;
+
+ /* Cb State */
+ enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* 0: disable, 1:enable */
+ u8 tx_per_tracking_enable;
+
+ /* Check period, unit is sec. */
+ u8 tx_per_tracking_period;
+
+ /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+ u8 tx_per_tracking_ratio;
+
+ /* A watermark of check number, once the tx packet exceed this
+ * number, we do the check, default is 5 */
+ u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+ struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define WCN36XX_HAL_PROTOCOL_DATA_LEN 8
+#define WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS 240
+#define WCN36XX_HAL_MAX_NUM_FILTERS 20
+#define WCN36XX_HAL_MAX_CMP_PER_FILTER 10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+ HAL_RCV_FILTER_TYPE_INVALID,
+ HAL_RCV_FILTER_TYPE_FILTER_PKT,
+ HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+ HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+ HAL_FILTER_PROTO_TYPE_INVALID,
+ HAL_FILTER_PROTO_TYPE_MAC,
+ HAL_FILTER_PROTO_TYPE_ARP,
+ HAL_FILTER_PROTO_TYPE_IPV4,
+ HAL_FILTER_PROTO_TYPE_IPV6,
+ HAL_FILTER_PROTO_TYPE_UDP,
+ HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+ HAL_FILTER_CMP_TYPE_INVALID,
+ HAL_FILTER_CMP_TYPE_EQUAL,
+ HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+ HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+ HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+ u8 protocol_layer;
+ u8 cmp_flag;
+
+ /* Length of the data to compare */
+ u16 data_length;
+
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ /* Reserved field */
+ u8 reserved;
+
+ /* Data to compare */
+ u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+ /* Mask to be applied on the received packet data before compare */
+ u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coleasce_time;
+ u8 bss_index;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 id;
+ u8 type;
+ u8 params_count;
+ u32 coalesce_time;
+ struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+ /* from start of the respective frame header */
+ u8 data_offset;
+
+ u32 mc_addr_count;
+ u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+ u8 id;
+ u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Success or Failure */
+ u32 status;
+
+ u32 match_count;
+ struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+ matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+ /* only valid for response message */
+ u32 status;
+ u8 id;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+ u32 status;
+ u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Ignore DTIM */
+ u32 ignore_dtim;
+
+ /* DTIM Period */
+ u32 dtim_period;
+
+ /* Listen Interval */
+ u32 listen_interval;
+
+ /* Broadcast Multicast Filter */
+ u32 bcast_mcast_filter;
+
+ /* Beacon Early Termination */
+ u32 enable_bet;
+
+ /* Beacon Early Termination Interval */
+ u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+#define WCN36XX_HAL_CAPS_SIZE 4
+
+struct wcn36xx_hal_feat_caps_msg {
+
+ struct wcn36xx_hal_msg_header header;
+
+ u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+ WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+ /* rekey detected, but not handled */
+ WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+ /* MIC check error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+ /* decryption error on M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+ /* M1 replay detected */
+ WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+ /* missing GTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+ /* missing iGTK key descriptor in M1 */
+ WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+ /* key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+ /* iGTK key installation error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+ /* GTK rekey M2 response TX error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+ /* non-specific general error */
+ WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+ WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+ /* magic packet match */
+ WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+ /* host defined pattern match */
+ WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+ /* EAP-ID frame detected */
+ WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+ /* start of EAPOL 4-way handshake detected */
+ WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+ /* network scan offload match */
+ WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+ /* GTK rekey status wakeup (see status) */
+ WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+ /* BSS connection lost */
+ WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+ Wake Packet which is saved at tWakeReasonParams.DataStart
+ This data is sent for any wake reasons that involve a packet-based wakeup :
+
+ WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+ WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+ The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* see tWakeReasonType */
+ u32 reason;
+
+ /* argument specific to the reason type */
+ u32 reason_arg;
+
+ /* length of optional data stored in this message, in case HAL
+ * truncates the data (i.e. data packets) this length will be less
+ * than the actual length */
+ u32 stored_data_len;
+
+ /* actual length of data */
+ u32 actual_data_len;
+
+ /* variable length start of data (length == storedDataLen) see
+ * specific wake type */
+ u8 data_start[1];
+
+ u32 bss_index:8;
+ u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG 0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* optional flags */
+ u32 flags;
+
+ /* Key confirmation key */
+ u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+ /* key encryption key */
+ u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+ /* replay counter */
+ u64 key_replay_counter;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+ struct wcn36xx_hal_msg_header header;
+ u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+
+ /* last rekey status when the rekey was offloaded */
+ u32 last_rekey_status;
+
+ /* current replay counter value */
+ u64 key_replay_counter;
+
+ /* total rekey attempts */
+ u32 total_rekey_count;
+
+ /* successful GTK rekeys */
+ u32 gtk_rekey_count;
+
+ /* successful iGTK rekeys */
+ u32 igtk_rekey_count;
+
+ u8 bss_index;
+};
+
+struct dhcp_info {
+ /* Indicates the device mode which indicates about the DHCP activity */
+ u8 device_mode;
+
+ u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+};
+
+/*
+ * Thermal Mitigation mode of operation.
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ * and reducing transmit power
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+ HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+ HAL_THERMAL_MITIGATION_MODE_0,
+ HAL_THERMAL_MITIGATION_MODE_1,
+ HAL_THERMAL_MITIGATION_MODE_2,
+ HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ * Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+ HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+ HAL_THERMAL_MITIGATION_LEVEL_0,
+ HAL_THERMAL_MITIGATION_LEVEL_1,
+ HAL_THERMAL_MITIGATION_LEVEL_2,
+ HAL_THERMAL_MITIGATION_LEVEL_3,
+ HAL_THERMAL_MITIGATION_LEVEL_4,
+ HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Thermal Mitigation Operation Mode */
+ enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+ /* Thermal Mitigation Level */
+ enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+ struct wcn36xx_hal_msg_header header;
+
+ /* status of the request */
+ u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ /* Duration over which this stats was collected */
+ u32 duration;
+
+ /* Per STA Stats */
+
+ /* TX stats */
+ u32 tx_bytes_pushed;
+ u32 tx_packets_pushed;
+
+ /* RX stats */
+ u32 rx_bytes_rcvd;
+ u32 rx_packets_rcvd;
+ u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644
index 000000000..1f4b9e546
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -0,0 +1,1114 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+ CHAN2G(2412, 1), /* Channel 1 */
+ CHAN2G(2417, 2), /* Channel 2 */
+ CHAN2G(2422, 3), /* Channel 3 */
+ CHAN2G(2427, 4), /* Channel 4 */
+ CHAN2G(2432, 5), /* Channel 5 */
+ CHAN2G(2437, 6), /* Channel 6 */
+ CHAN2G(2442, 7), /* Channel 7 */
+ CHAN2G(2447, 8), /* Channel 8 */
+ CHAN2G(2452, 9), /* Channel 9 */
+ CHAN2G(2457, 10), /* Channel 10 */
+ CHAN2G(2462, 11), /* Channel 11 */
+ CHAN2G(2467, 12), /* Channel 12 */
+ CHAN2G(2472, 13), /* Channel 13 */
+ CHAN2G(2484, 14) /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5700, 140),
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (_hw_rate) \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+ RATE(10, HW_RATE_INDEX_1MBPS, 0),
+ RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+ RATE(60, HW_RATE_INDEX_6MBPS, 0),
+ RATE(90, HW_RATE_INDEX_9MBPS, 0),
+ RATE(120, HW_RATE_INDEX_12MBPS, 0),
+ RATE(180, HW_RATE_INDEX_18MBPS, 0),
+ RATE(240, HW_RATE_INDEX_24MBPS, 0),
+ RATE(360, HW_RATE_INDEX_36MBPS, 0),
+ RATE(480, HW_RATE_INDEX_48MBPS, 0),
+ RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+ .channels = wcn_2ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_2ghz_channels),
+ .bitrates = wcn_2ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+ .channels = wcn_5ghz_channels,
+ .n_channels = ARRAY_SIZE(wcn_5ghz_channels),
+ .bitrates = wcn_5ghz_rates,
+ .n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
+ .ht_cap = {
+ .cap = IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+ struct wcn36xx_sta *sta_priv)
+{
+ return NL80211_IFTYPE_STATION == vif->type ?
+ sta_priv->bss_sta_index :
+ sta_priv->sta_index;
+}
+
+static const char * const wcn36xx_caps_names[] = {
+ "MCC", /* 0 */
+ "P2P", /* 1 */
+ "DOT11AC", /* 2 */
+ "SLM_SESSIONIZATION", /* 3 */
+ "DOT11AC_OPMODE", /* 4 */
+ "SAP32STA", /* 5 */
+ "TDLS", /* 6 */
+ "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+ "WLANACTIVE_OFFLOAD", /* 8 */
+ "BEACON_OFFLOAD", /* 9 */
+ "SCAN_OFFLOAD", /* 10 */
+ "ROAM_OFFLOAD", /* 11 */
+ "BCN_MISS_OFFLOAD", /* 12 */
+ "STA_POWERSAVE", /* 13 */
+ "STA_ADVANCED_PWRSAVE", /* 14 */
+ "AP_UAPSD", /* 15 */
+ "AP_DFS", /* 16 */
+ "BLOCKACK", /* 17 */
+ "PHY_ERR", /* 18 */
+ "BCN_FILTER", /* 19 */
+ "RTT", /* 20 */
+ "RATECTRL", /* 21 */
+ "WOW" /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+ int i;
+
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (get_feat_caps(wcn->fw_feat_caps, i))
+ wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+ }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+ if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+ wcn36xx_info("Chip is 3680\n");
+ wcn->chip_version = WCN36XX_CHIP_3680;
+ } else {
+ wcn36xx_info("Chip is 3660\n");
+ wcn->chip_version = WCN36XX_CHIP_3660;
+ }
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+ int ret;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+ /* SMD initialization */
+ ret = wcn36xx_smd_open(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to open smd channel: %d\n", ret);
+ goto out_err;
+ }
+
+ /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+ ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+ goto out_smd_close;
+ }
+
+ ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+ goto out_free_dxe_pool;
+ }
+
+ wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ wcn36xx_err("Failed to allocate smd buf\n");
+ ret = -ENOMEM;
+ goto out_free_dxe_ctl;
+ }
+
+ ret = wcn36xx_smd_load_nv(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to push NV to chip\n");
+ goto out_free_smd_buf;
+ }
+
+ ret = wcn36xx_smd_start(wcn);
+ if (ret) {
+ wcn36xx_err("Failed to start chip\n");
+ goto out_free_smd_buf;
+ }
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ else
+ wcn36xx_feat_caps_info(wcn);
+ }
+
+ wcn36xx_detect_chip_version(wcn);
+
+ /* DMA channel initialization */
+ ret = wcn36xx_dxe_init(wcn);
+ if (ret) {
+ wcn36xx_err("DXE init failed\n");
+ goto out_smd_stop;
+ }
+
+ wcn36xx_debugfs_init(wcn);
+
+ INIT_LIST_HEAD(&wcn->vif_list);
+ spin_lock_init(&wcn->dxe_lock);
+
+ return 0;
+
+out_smd_stop:
+ wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+ kfree(wcn->hal_buf);
+out_free_dxe_pool:
+ wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+ wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+ wcn36xx_smd_close(wcn);
+out_err:
+ return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+ wcn36xx_debugfs_exit(wcn);
+ wcn36xx_smd_stop(wcn);
+ wcn36xx_dxe_deinit(wcn);
+ wcn36xx_smd_close(wcn);
+
+ wcn36xx_dxe_free_mem_pools(wcn);
+ wcn36xx_dxe_free_ctl_blks(wcn);
+
+ kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ int ch = WCN36XX_HW_CHANNEL(wcn);
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+ ch);
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+ }
+
+ return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed,
+ unsigned int *total, u64 multicast)
+{
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+ *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ if (control->sta)
+ sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+ if (wcn36xx_start_tx(wcn, sta_priv, skb))
+ ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ int ret = 0;
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+ cmd, key_conf->cipher, key_conf->keyidx,
+ key_conf->keylen, key_conf->flags);
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+ key_conf->key,
+ key_conf->keylen);
+
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+ break;
+ default:
+ wcn36xx_err("Unsupported key type 0x%x\n",
+ key_conf->cipher);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+ /*
+ * Supplicant is sending key in the wrong order:
+ * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+ * but HW expects it to be in the order as described in
+ * IEEE 802.11 spec (see chapter 11.7) like this:
+ * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+ */
+ memcpy(key, key_conf->key, 16);
+ memcpy(key + 16, key_conf->key + 24, 8);
+ memcpy(key + 24, key_conf->key + 16, 8);
+ } else {
+ memcpy(key, key_conf->key, key_conf->keylen);
+ }
+
+ if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+ sta_priv->is_data_encrypted = true;
+ /* Reconfigure bss with encrypt_type */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ wcn36xx_smd_config_bss(wcn,
+ vif,
+ sta,
+ sta->addr,
+ true);
+
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ } else {
+ wcn36xx_smd_set_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key);
+ if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+ (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+ sta_priv->is_data_encrypted = true;
+ wcn36xx_smd_set_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ key_conf->keylen,
+ key,
+ get_sta_index(vif, sta_priv));
+ }
+ }
+ break;
+ case DISABLE_KEY:
+ if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx);
+ } else {
+ sta_priv->is_data_encrypted = false;
+ /* do not remove key if disassociated */
+ if (sta_priv->aid)
+ wcn36xx_smd_remove_stakey(wcn,
+ vif_priv->encrypt_type,
+ key_conf->keyidx,
+ get_sta_index(vif, sta_priv));
+ }
+ break;
+ default:
+ wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+ wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_smd_end_scan(wcn);
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+ enum ieee80211_band band)
+{
+ int i, size;
+ u16 *rates_table;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ u32 rates = sta->supp_rates[band];
+
+ memset(&sta_priv->supported_rates, 0,
+ sizeof(sta_priv->supported_rates));
+ sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+ rates_table = sta_priv->supported_rates.dsss_rates;
+ if (band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_2ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+ }
+
+ size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+ rates_table = sta_priv->supported_rates.ofdm_rates;
+ for (i = 0; i < size; i++) {
+ if (rates & 0x01) {
+ rates_table[i] = wcn_5ghz_rates[i].hw_value;
+ rates = rates >> 1;
+ }
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+ sizeof(sta_priv->supported_rates.supported_mcs_set));
+ memcpy(sta_priv->supported_rates.supported_mcs_set,
+ sta->ht_cap.mcs.rx_mask,
+ sizeof(sta->ht_cap.mcs.rx_mask));
+ }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+ HW_RATE_INDEX_6MBPS,
+ HW_RATE_INDEX_9MBPS,
+ HW_RATE_INDEX_12MBPS,
+ HW_RATE_INDEX_18MBPS,
+ HW_RATE_INDEX_24MBPS,
+ HW_RATE_INDEX_36MBPS,
+ HW_RATE_INDEX_48MBPS,
+ HW_RATE_INDEX_54MBPS
+ };
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+ HW_RATE_INDEX_1MBPS,
+ HW_RATE_INDEX_2MBPS,
+ HW_RATE_INDEX_5_5MBPS,
+ HW_RATE_INDEX_11MBPS
+ };
+
+ rates->op_rate_mode = STA_11n;
+ memcpy(rates->dsss_rates, dsss_rates,
+ sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+ memcpy(rates->ofdm_rates, ofdm_rates,
+ sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+ rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct sk_buff *skb = NULL;
+ u16 tim_off, tim_len;
+ enum wcn36xx_hal_link_state link_state;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ vif, changed);
+
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed dtim period %d\n",
+ bss_conf->dtim_period);
+
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss PS set %d\n",
+ bss_conf->ps);
+ if (bss_conf->ps) {
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+ bss_conf->bssid);
+
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ vif_priv->is_joining = true;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_join(wcn, bss_conf->bssid,
+ vif->addr, WCN36XX_HW_CHANNEL(wcn));
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ bss_conf->bssid, false);
+ } else {
+ vif_priv->is_joining = false;
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_SSID) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed ssid\n");
+ wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+ bss_conf->ssid, bss_conf->ssid_len);
+
+ vif_priv->ssid.length = bss_conf->ssid_len;
+ memcpy(&vif_priv->ssid.ssid,
+ bss_conf->ssid,
+ bss_conf->ssid_len);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ vif_priv->is_joining = false;
+ if (bss_conf->assoc) {
+ struct ieee80211_sta *sta;
+ struct wcn36xx_sta *sta_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac assoc bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ wcn36xx_err("sta %pM is not found\n",
+ bss_conf->bssid);
+ rcu_read_unlock();
+ goto out;
+ }
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+ wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_POSTASSOC_STATE);
+ wcn36xx_smd_config_bss(wcn, vif, sta,
+ bss_conf->bssid,
+ true);
+ sta_priv->aid = bss_conf->aid;
+ /*
+ * config_sta must be called from because this is the
+ * place where AID is available.
+ */
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ rcu_read_unlock();
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "disassociated bss %pM vif %pM AID=%d\n",
+ bss_conf->bssid,
+ vif->addr,
+ bss_conf->aid);
+ wcn36xx_smd_set_link_st(wcn,
+ bss_conf->bssid,
+ vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+ skb = ieee80211_proberesp_get(hw, vif);
+ if (!skb) {
+ wcn36xx_err("failed to alloc probereq skb\n");
+ goto out;
+ }
+
+ wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+ dev_kfree_skb(skb);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED ||
+ changed & BSS_CHANGED_BEACON) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "mac bss changed beacon enabled %d\n",
+ bss_conf->enable_beacon);
+
+ if (bss_conf->enable_beacon) {
+ vif_priv->dtim_period = bss_conf->dtim_period;
+ vif_priv->bss_index = 0xff;
+ wcn36xx_smd_config_bss(wcn, vif, NULL,
+ vif->addr, false);
+ skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+ &tim_len);
+ if (!skb) {
+ wcn36xx_err("failed to alloc beacon skb\n");
+ goto out;
+ }
+ wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+ dev_kfree_skb(skb);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)
+ link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+ else
+ link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ link_state);
+ } else {
+ wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+ WCN36XX_HAL_LINK_IDLE_STATE);
+ wcn36xx_smd_delete_bss(wcn, vif);
+ }
+ }
+out:
+ return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+ return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+ list_del(&vif_priv->list);
+ wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+ vif, vif->type);
+
+ if (!(NL80211_IFTYPE_STATION == vif->type ||
+ NL80211_IFTYPE_AP == vif->type ||
+ NL80211_IFTYPE_ADHOC == vif->type ||
+ NL80211_IFTYPE_MESH_POINT == vif->type)) {
+ wcn36xx_warn("Unsupported interface type requested: %d\n",
+ vif->type);
+ return -EOPNOTSUPP;
+ }
+
+ list_add(&vif_priv->list, &wcn->vif_list);
+ wcn36xx_smd_add_sta_self(wcn, vif);
+
+ return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ vif, sta->addr);
+
+ spin_lock_init(&sta_priv->ampdu_lock);
+ vif_priv->sta = sta_priv;
+ sta_priv->vif = vif_priv;
+ /*
+ * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+ * at this stage AID is not available yet.
+ */
+ if (NL80211_IFTYPE_STATION != vif->type) {
+ wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+ sta_priv->aid = sta->aid;
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
+ return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ vif, sta->addr, sta_priv->sta_index);
+
+ wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ vif_priv->sta = NULL;
+ sta_priv->vif = NULL;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, true);
+ return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+ flush_workqueue(wcn->hal_ind_wq);
+ wcn36xx_smd_set_power_params(wcn, false);
+ return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_sta *sta_priv = NULL;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ action, tid);
+
+ sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ sta_priv->tid = tid;
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
+ wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+ get_sta_index(vif, sta_priv));
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ spin_lock_bh(&sta_priv->ampdu_lock);
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
+ spin_unlock_bh(&sta_priv->ampdu_lock);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ wcn36xx_err("Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+ .start = wcn36xx_start,
+ .stop = wcn36xx_stop,
+ .add_interface = wcn36xx_add_interface,
+ .remove_interface = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+ .suspend = wcn36xx_suspend,
+ .resume = wcn36xx_resume,
+#endif
+ .config = wcn36xx_config,
+ .configure_filter = wcn36xx_configure_filter,
+ .tx = wcn36xx_tx,
+ .set_key = wcn36xx_set_key,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
+ .bss_info_changed = wcn36xx_bss_info_changed,
+ .set_rts_threshold = wcn36xx_set_rts_threshold,
+ .sta_add = wcn36xx_sta_add,
+ .sta_remove = wcn36xx_sta_remove,
+ .ampdu_action = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+ int ret = 0;
+
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+
+ wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_TIMING_BEACON_ONLY;
+
+ wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+ wcn->hw->wiphy->cipher_suites = cipher_suites;
+ wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+ wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+ wcn->hw->max_listen_interval = 200;
+
+ wcn->hw->queues = 4;
+
+ SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+ wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+ wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+ return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ /* Set TX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlantx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get tx_irq\n");
+ return -ENOENT;
+ }
+ wcn->tx_irq = res->start;
+
+ /* Set RX IRQ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "wcnss_wlanrx_irq");
+ if (!res) {
+ wcn36xx_err("failed to get rx_irq\n");
+ return -ENOENT;
+ }
+ wcn->rx_irq = res->start;
+
+ /* Map the memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wcnss_mmio");
+ if (!res) {
+ wcn36xx_err("failed to get mmio\n");
+ return -ENOENT;
+ }
+ wcn->mmio = ioremap(res->start, resource_size(res));
+ if (!wcn->mmio) {
+ wcn36xx_err("failed to map io memory\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct wcn36xx *wcn;
+ int ret;
+ u8 addr[ETH_ALEN];
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+ hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+ if (!hw) {
+ wcn36xx_err("failed to alloc hw\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ platform_set_drvdata(pdev, hw);
+ wcn = hw->priv;
+ wcn->hw = hw;
+ wcn->dev = &pdev->dev;
+ wcn->ctrl_ops = pdev->dev.platform_data;
+
+ mutex_init(&wcn->hal_mutex);
+
+ if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ wcn36xx_info("mac address: %pM\n", addr);
+ SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+ }
+
+ ret = wcn36xx_platform_get_resources(wcn, pdev);
+ if (ret)
+ goto out_wq;
+
+ wcn36xx_init_ieee80211(wcn);
+ ret = ieee80211_register_hw(wcn->hw);
+ if (ret)
+ goto out_unmap;
+
+ return 0;
+
+out_unmap:
+ iounmap(wcn->mmio);
+out_wq:
+ ieee80211_free_hw(hw);
+out_err:
+ return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct wcn36xx *wcn = hw->priv;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+ release_firmware(wcn->nv);
+ mutex_destroy(&wcn->hal_mutex);
+
+ ieee80211_unregister_hw(hw);
+ iounmap(wcn->mmio);
+ ieee80211_free_hw(hw);
+
+ return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+ {
+ .name = "wcn36xx",
+ .driver_data = 0
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+ .probe = wcn36xx_probe,
+ .remove = wcn36xx_remove,
+ .driver = {
+ .name = "wcn36xx",
+ },
+ .id_table = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+ platform_driver_register(&wcn36xx_driver);
+ return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644
index 000000000..28b515c81
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ /* TODO: Make sure the TX chain clean */
+ ret = wcn36xx_smd_enter_bmps(wcn, vif);
+ if (!ret) {
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+ vif_priv->pw_state = WCN36XX_BMPS;
+ } else {
+ /*
+ * One of the reasons why HW will not enter BMPS is because
+ * driver is trying to enter bmps before first beacon was
+ * received just after auth complete
+ */
+ wcn36xx_err("Can not enter BMPS!\n");
+ }
+ return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (WCN36XX_BMPS != vif_priv->pw_state) {
+ wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+ return -EINVAL;
+ }
+ wcn36xx_smd_exit_bmps(wcn, vif);
+ vif_priv->pw_state = WCN36XX_FULL_POWER;
+ return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+ return wcn36xx_smd_keep_alive_req(wcn, vif,
+ WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644
index 000000000..f72ed68b5
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+ WCN36XX_FULL_POWER,
+ WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644
index 000000000..86a53c912
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -0,0 +1,2235 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+struct wcn36xx_cfg_val {
+ u32 cfg_id;
+ u32 value;
+};
+
+#define WCN36XX_CFG_VAL(id, val) \
+{ \
+ .cfg_id = WCN36XX_HAL_CFG_ ## id, \
+ .value = val \
+}
+
+static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
+ WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
+ WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
+ WCN36XX_CFG_VAL(CAL_PERIOD, 5),
+ WCN36XX_CFG_VAL(CAL_CONTROL, 1),
+ WCN36XX_CFG_VAL(PROXIMITY, 0),
+ WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
+ WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
+ WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
+ WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
+ WCN36XX_CFG_VAL(FIXED_RATE, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
+ WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
+ WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
+ WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
+ WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
+ WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
+ WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
+ WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
+ WCN36XX_CFG_VAL(STATS_PERIOD, 10),
+ WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
+ WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
+ WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
+ WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
+ WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
+ WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
+ WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
+ WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+};
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+ struct wcn36xx_hal_cfg *entry;
+ u32 *val;
+
+ if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+ wcn36xx_err("Not enough room for TLV entry\n");
+ return -ENOMEM;
+ }
+
+ entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+ entry->id = id;
+ entry->len = sizeof(u32);
+ entry->pad_bytes = 0;
+ entry->reserve = 0;
+
+ val = (u32 *) (entry + 1);
+ *val = value;
+
+ *len += sizeof(*entry) + sizeof(u32);
+
+ return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+ else if (sta && sta->ht_cap.ht_supported)
+ bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+ else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+ else
+ bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+ return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params *bss_params)
+{
+ if (sta && sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ bss_params->ht = sta->ht_cap.ht_supported;
+ bss_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ bss_params->lsig_tx_op_protection_full_support =
+ is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+ bss_params->lln_non_gf_coexist =
+ !!(vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+ bss_params->dual_cts_protection = 0;
+ /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+ bss_params->ht20_coexist = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ unsigned long caps = sta->ht_cap.cap;
+ sta_params->ht_capable = sta->ht_cap.ht_supported;
+ sta_params->tx_channel_width_set = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ sta_params->lsig_txop_protection = is_cap_supported(caps,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+ sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+ sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+ sta_params->max_amsdu_size = is_cap_supported(caps,
+ IEEE80211_HT_CAP_MAX_AMSDU);
+ sta_params->sgi_20Mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_20);
+ sta_params->sgi_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_SGI_40);
+ sta_params->green_field_capable = is_cap_supported(caps,
+ IEEE80211_HT_CAP_GRN_FLD);
+ sta_params->delayed_ba_support = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DELAY_BA);
+ sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+ IEEE80211_HT_CAP_DSSSCCK40);
+ }
+}
+
+static void wcn36xx_smd_set_sta_default_ht_params(
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ sta_params->ht_capable = 1;
+ sta_params->tx_channel_width_set = 1;
+ sta_params->lsig_txop_protection = 1;
+ sta_params->max_ampdu_size = 3;
+ sta_params->max_ampdu_density = 5;
+ sta_params->max_amsdu_size = 0;
+ sta_params->sgi_20Mhz = 1;
+ sta_params->sgi_40mhz = 1;
+ sta_params->green_field_capable = 1;
+ sta_params->delayed_ba_support = 0;
+ sta_params->dsss_cck_mode_40mhz = 1;
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params *sta_params)
+{
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_sta *priv_sta = NULL;
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ sta_params->type = 1;
+ sta_params->sta_index = 0xFF;
+ } else {
+ sta_params->type = 0;
+ sta_params->sta_index = 1;
+ }
+
+ sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ /*
+ * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+ * contains our mac address. In AP mode we are bssid so vif
+ * contains bssid and ieee80211_sta contains mac.
+ */
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+ sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->short_preamble_supported =
+ !(WCN36XX_FLAGS(wcn) &
+ IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+ sta_params->rifs_mode = 0;
+ sta_params->rmf = 0;
+ sta_params->action = 0;
+ sta_params->uapsd = 0;
+ sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+ sta_params->max_ampdu_duration = 0;
+ sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->p2p = 0;
+
+ if (sta) {
+ priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ if (NL80211_IFTYPE_STATION == vif->type)
+ memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+ else
+ memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+ sta_params->wmm_enabled = sta->wme;
+ sta_params->max_sp_len = sta->max_sp;
+ sta_params->aid = priv_sta->aid;
+ wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+ memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+ sizeof(priv_sta->supported_rates));
+ } else {
+ wcn36xx_set_default_rates(&sta_params->supported_rates);
+ wcn36xx_smd_set_sta_default_ht_params(sta_params);
+ }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+ int ret = 0;
+ unsigned long start;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+ init_completion(&wcn->hal_rsp_compl);
+ start = jiffies;
+ ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ if (ret) {
+ wcn36xx_err("HAL TX failed\n");
+ goto out;
+ }
+ if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+ msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+ wcn36xx_err("Timeout! No SMD response in %dms\n",
+ HAL_MSG_TIMEOUT);
+ ret = -ETIME;
+ goto out;
+ }
+ wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+ jiffies_to_msecs(jiffies - start));
+out:
+ return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+ do { \
+ memset(&msg_body, 0, sizeof(msg_body)); \
+ msg_body.header.msg_type = type; \
+ msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.len = sizeof(msg_body); \
+ } while (0) \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+ do { \
+ memset(send_buf, 0, msg_body.header.len); \
+ memcpy(send_buf, &msg_body, sizeof(msg_body)); \
+ } while (0) \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp *rsp;
+
+ if (len < sizeof(struct wcn36xx_hal_msg_header) +
+ sizeof(struct wcn36xx_fw_msg_status_rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_fw_msg_status_rsp *)
+ (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+ struct nv_data *nv_d;
+ struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+ int fw_bytes_left;
+ int ret;
+ u16 fm_offset = 0;
+
+ if (!wcn->nv) {
+ ret = reject_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out;
+ }
+ }
+
+ nv_d = (struct nv_data *)wcn->nv->data;
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+ msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+ msg_body.frag_number = 0;
+ /* hal_buf must be protected with mutex */
+ mutex_lock(&wcn->hal_mutex);
+
+ do {
+ fw_bytes_left = wcn->nv->size - fm_offset - 4;
+ if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+ msg_body.last_fragment = 0;
+ msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+ } else {
+ msg_body.last_fragment = 1;
+ msg_body.nv_img_buffer_size = fw_bytes_left;
+
+ /* Do not forget update general message len */
+ msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+ }
+
+ /* Add load NV request message header */
+ memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+ /* Add NV body itself */
+ memcpy(wcn->hal_buf + sizeof(msg_body),
+ &nv_d->table + fm_offset,
+ msg_body.nv_img_buffer_size);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret)
+ goto out_unlock;
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_load_nv response failed err=%d\n",
+ ret);
+ goto out_unlock;
+ }
+ msg_body.frag_number++;
+ fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+ } while (msg_body.last_fragment != 1);
+
+out_unlock:
+ mutex_unlock(&wcn->hal_mutex);
+out: return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+ return -EIO;
+
+ memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+ memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+ WCN36XX_HAL_VERSION_LENGTH);
+
+ /* null terminate the strings, just in case */
+ wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+ wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+ wcn->fw_revision = rsp->start_rsp_params.version.revision;
+ wcn->fw_version = rsp->start_rsp_params.version.version;
+ wcn->fw_minor = rsp->start_rsp_params.version.minor;
+ wcn->fw_major = rsp->start_rsp_params.version.major;
+
+ wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+ wcn->wlan_version, wcn->crm_version);
+
+ wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+ wcn->fw_major, wcn->fw_minor,
+ wcn->fw_version, wcn->fw_revision,
+ rsp->start_rsp_params.stations,
+ rsp->start_rsp_params.bssids);
+
+ return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
+ int ret = 0;
+ int i;
+ size_t len;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+ msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+ msg_body.params.len = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
+ len = body->header.len;
+
+ for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) {
+ ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id,
+ wcn36xx_cfg_vals[i].value);
+ if (ret)
+ goto out;
+ }
+ body->header.len = len;
+ body->params.len = len - sizeof(*body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+ msg_body.params.type);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_mac_stop_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+ msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_init_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_init_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_start_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_end_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+ msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+ msg_body.scan_channel);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_end_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode)
+{
+ struct wcn36xx_hal_finish_scan_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+ msg_body.mode = mode;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+ msg_body.mode);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_finish_scan failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+ int ret = 0;
+
+ ret = wcn36xx_smd_rsp_status_check(buf, len);
+ if (ret)
+ return ret;
+ rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+ rsp->channel_number, rsp->status);
+ return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch)
+{
+ struct wcn36xx_hal_switch_channel_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+ msg_body.channel_number = (u8)ch;
+ msg_body.tx_mgmt_power = 0xbf;
+ msg_body.max_tx_power = 0xbf;
+ memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_switch_channel failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+ rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+ /* Remove the PNO version bit */
+ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+ wcn36xx_warn("error response from update scan\n");
+ return rsp->status;
+ }
+
+ return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_update_scan_params_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+ msg_body.dot11d_enabled = 0;
+ msg_body.dot11d_resolved = 0;
+ msg_body.channel_count = 26;
+ msg_body.active_min_ch_time = 60;
+ msg_body.active_max_ch_time = 120;
+ msg_body.passive_min_ch_time = 60;
+ msg_body.passive_max_ch_time = 110;
+ msg_body.state = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update scan params channel_count %d\n",
+ msg_body.channel_count);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_scan_params failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal add sta self failure: %d\n",
+ rsp->status);
+ return rsp->status;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+ rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+ priv_vif->self_sta_index = rsp->self_sta_index;
+ priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+ return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_add_sta_self_req msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal add sta self self_addr %pM status %d\n",
+ msg_body.self_addr, msg_body.status);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+ struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+ memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta_self failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_delete_sta_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+ msg_body.sta_index = sta_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal delete sta sta_index %d\n",
+ msg_body.sta_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+ struct wcn36xx_hal_join_rsp_msg *rsp;
+
+ if (wcn36xx_smd_rsp_status_check(buf, len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal rsp join status %d tx_mgmt_power %d\n",
+ rsp->status, rsp->tx_mgmt_power);
+
+ return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+ struct wcn36xx_hal_join_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+ msg_body.channel = ch;
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ msg_body.secondary_channel_offset =
+ PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+ else
+ msg_body.secondary_channel_offset =
+ PHY_SINGLE_CHANNEL_CENTERED;
+
+ msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+ msg_body.max_tx_power = 0xbf;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+ msg_body.bssid, msg_body.self_sta_mac_addr,
+ msg_body.channel, msg_body.link_state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_join failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_join response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state)
+{
+ struct wcn36xx_hal_set_link_state_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+ memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+ memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+ msg_body.state = state;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+ msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_link_st failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_params *orig,
+ struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+ /* convert orig to v1 format */
+ memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+ memcpy(&v1->mac, orig->mac, ETH_ALEN);
+ v1->aid = orig->aid;
+ v1->type = orig->type;
+ v1->listen_interval = orig->listen_interval;
+ v1->ht_capable = orig->ht_capable;
+
+ v1->max_ampdu_size = orig->max_ampdu_size;
+ v1->max_ampdu_density = orig->max_ampdu_density;
+ v1->sgi_40mhz = orig->sgi_40mhz;
+ v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+ memcpy(&v1->supported_rates, &orig->supported_rates,
+ sizeof(orig->supported_rates));
+ v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+ struct config_sta_rsp_params *params;
+ struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+ params = &rsp->params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config sta response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ sta_priv->sta_index = params->sta_index;
+ sta_priv->dpu_desc_index = params->dpu_index;
+ sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
+ params->status, params->sta_index, params->bssid_index,
+ params->uc_ucast_sig, params->p2p);
+
+ return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+ struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+ &msg_body.sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta->action, sta->sta_index, sta->bssid_index,
+ sta->bssid, sta->type, sta->mac, sta->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wcn36xx_hal_config_sta_req_msg msg;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+ sta_params = &msg.sta_params;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_sta failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_sta_rsp(wcn,
+ sta,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ /* convert orig to v1 */
+ memcpy(&msg_body.bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+ memcpy(&msg_body.bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+ msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+ msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+ msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+ msg_body.bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+ msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+ msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+ msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+ msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+ msg_body.bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+ msg_body.bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+ msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+ msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+ msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+ msg_body.bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+ msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+ msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+ msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+ memcpy(&msg_body.bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+ msg_body.bss_params.action = orig->bss_params.action;
+ msg_body.bss_params.rateset = orig->bss_params.rateset;
+ msg_body.bss_params.ht = orig->bss_params.ht;
+ msg_body.bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+ msg_body.bss_params.rmf = orig->bss_params.rmf;
+ msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+ msg_body.bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+ msg_body.bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+ msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+ msg_body.bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+ msg_body.bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+ memcpy(&msg_body.bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+ memcpy(&msg_body.bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+ memcpy(&msg_body.bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+ memcpy(&msg_body.bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+ memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+ msg_body.bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+ msg_body.bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+ msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+ msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+ &msg_body.bss_params.sta);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
+
+ priv_vif->bss_index = params->bss_index;
+
+ if (priv_vif->sta) {
+ priv_vif->sta->bss_sta_index = params->bss_sta_index;
+ priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ }
+
+ priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg.bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+
+ memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+ memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+ /* AP */
+ bss->oper_mode = 0;
+ bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+ } else if (vif->type == NL80211_IFTYPE_ADHOC) {
+ bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+ /* STA */
+ bss->oper_mode = 1;
+ } else {
+ wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+ else
+ bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+ bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+ bss->lla_coexist = 0;
+ bss->llb_coexist = 0;
+ bss->llg_coexist = 0;
+ bss->rifs_mode = 0;
+ bss->beacon_interval = vif->bss_conf.beacon_int;
+ bss->dtim_period = vif_priv->dtim_period;
+
+ wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+ bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+ if (conf_is_ht40_minus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ else if (conf_is_ht40_plus(&wcn->hw->conf))
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ bss->reserved = 0;
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ /* wcn->ssid is only valid in AP and IBSS mode */
+ bss->ssid.length = vif_priv->ssid.length;
+ memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+ bss->obss_prot_enabled = 0;
+ bss->rmf = 0;
+ bss->max_probe_resp_retry_limit = 0;
+ bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+ bss->proxy_probe_resp = 0;
+ bss->edca_params_valid = 0;
+
+ /* FIXME: set acbe, acbk, acvi and acvo */
+
+ bss->ext_set_sta_key_param_valid = 0;
+
+ /* FIXME: set ext_set_sta_key_param */
+
+ bss->spectrum_mgt_enable = 0;
+ bss->tx_mgmt_power = 0;
+ bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+ bss->action = update;
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta_params->bssid, sta_params->action,
+ sta_params->sta_index, sta_params->bssid_index,
+ sta_params->aid, sta_params->type,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+ } else {
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_config_bss_rsp(wcn,
+ vif,
+ wcn->hal_buf,
+ wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_delete_bss_req_msg msg_body;
+ struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+ msg_body.bss_index = priv_vif->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_delete_bss failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off)
+{
+ struct wcn36xx_hal_send_beacon_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+ /* TODO need to find out why this is needed? */
+ msg_body.beacon_length = skb_beacon->len + 6;
+
+ if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+ memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+ memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+ skb_beacon->len);
+ } else {
+ wcn36xx_err("Beacon is to big: beacon size=%d\n",
+ msg_body.beacon_length);
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+ /* TODO need to find out why this is needed? */
+ if (vif->type == NL80211_IFTYPE_MESH_POINT)
+ /* mesh beacon don't need this, so push further down */
+ msg_body.tim_ie_offset = 256;
+ else
+ msg_body.tim_ie_offset = tim_off+4;
+ msg_body.p2p_ie_offset = p2p_off;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal send beacon beacon_length %d\n",
+ msg_body.beacon_length);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_send_beacon failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct wcn36xx_hal_send_probe_resp_req_msg msg;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+ if (skb->len > BEACON_TEMPLATE_SIZE) {
+ wcn36xx_warn("probe response template is too big: %d\n",
+ skb->len);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ msg.probe_resp_template_len = skb->len;
+ memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+ memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal update probe rsp len %d bssid %pM\n",
+ msg.probe_resp_template_len, msg.bssid);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+ msg_body.set_sta_key_params.sta_index = sta_index;
+ msg_body.set_sta_key_params.enc_type = enc_type;
+
+ msg_body.set_sta_key_params.key[0].id = keyidx;
+ msg_body.set_sta_key_params.key[0].unicast = 1;
+ msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+ msg_body.set_sta_key_params.key[0].pae_role = 0;
+ msg_body.set_sta_key_params.key[0].length = keylen;
+ memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+ msg_body.set_sta_key_params.single_tid_rc = 1;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key)
+{
+ struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.num_keys = 1;
+ msg_body.keys[0].id = keyidx;
+ msg_body.keys[0].unicast = 0;
+ msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+ msg_body.keys[0].pae_role = 0;
+ msg_body.keys[0].length = keylen;
+ memcpy(msg_body.keys[0].key, key, keylen);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+ msg_body.sta_idx = sta_index;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_stakey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx)
+{
+ struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+ msg_body.bss_idx = 0;
+ msg_body.enc_type = enc_type;
+ msg_body.key_id = keyidx;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_remove_bsskey failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.tbtt = vif->bss_conf.sync_tsf;
+ msg_body.dtim_period = vif_priv->dtim_period;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+ struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_bmps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+ struct wcn36xx_hal_set_power_params_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+ /*
+ * When host is down ignore every second dtim
+ */
+ if (ignore_dtim) {
+ msg_body.ignore_dtim = 1;
+ msg_body.dtim_period = 2;
+ }
+ msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_set_power_params failed\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type)
+{
+ struct wcn36xx_hal_keep_alive_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+ if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+ msg_body.bss_index = vif_priv->bss_index;
+ msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+ msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+ } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+ /* TODO: it also support ARP response type */
+ } else {
+ wcn36xx_warn("unknown keep alive packet type %d\n", packet_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_keep_alive failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5)
+{
+ struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+ msg_body.arg1 = arg1;
+ msg_body.arg2 = arg2;
+ msg_body.arg3 = arg3;
+ msg_body.arg4 = arg4;
+ msg_body.arg5 = arg5;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_dump_cmd failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+ int ret = 0;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+ return ret;
+}
+
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+ int ret = 0, i;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+ set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+ goto out;
+ }
+ if (wcn->hal_rsp_len != sizeof(*rsp)) {
+ wcn36xx_err("Invalid hal_feature_caps_exchange response");
+ goto out;
+ }
+
+ rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+ for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+ wcn->fw_feat_caps[i] = rsp->feat_caps[i];
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index)
+{
+ struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+ msg_body.sta_index = sta_index;
+ memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+ msg_body.dialog_token = 0x10;
+ msg_body.tid = tid;
+
+ /* Immediate BA because Delayed BA is not supported */
+ msg_body.policy = 1;
+ msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+ msg_body.timeout = 0;
+ if (ssn)
+ msg_body.ssn = *ssn;
+ msg_body.direction = direction;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba_session failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_add_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_add_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+ struct wcn36xx_hal_del_ba_req_msg msg_body;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+ msg_body.sta_index = sta_index;
+ msg_body.tid = tid;
+ msg_body.direction = 0;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_del_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+ struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+ struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+ msg_body.session_id = 0;
+ msg_body.candidate_cnt = 1;
+ msg_body.header.len += sizeof(*candidate);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
+ (wcn->hal_buf + sizeof(msg_body));
+ candidate->sta_index = sta_index;
+ candidate->tid_bitmap = 1;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_trigger_ba failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Bad TX complete indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+ return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ /* Old FW does not have bss index */
+ if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ tmp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ }
+ return 0;
+ }
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted missed beacon indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (tmp->bss_index == rsp->bss_index) {
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+ rsp->bss_index);
+ vif = container_of((void *)tmp,
+ struct ieee80211_vif,
+ drv_priv);
+ ieee80211_connection_loss(vif);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+ return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_sta *sta = NULL;
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete sta indication\n");
+ return -EIO;
+ }
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+ sta = container_of((void *)tmp->sta,
+ struct ieee80211_sta,
+ drv_priv);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d\n",
+ rsp->addr2,
+ rsp->sta_id);
+ ieee80211_report_low_ack(sta, 0);
+ return 0;
+ }
+ }
+
+ wcn36xx_warn("STA with addr %pM and index %d not found\n",
+ rsp->addr2,
+ rsp->sta_id);
+ return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+ struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+ size_t len;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+ len = msg_body.header.len;
+
+ put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+ body->header.len = len;
+ body->len = len - sizeof(*body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_cfg failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_msg_header *msg_header = buf;
+ struct wcn36xx_hal_ind_msg *msg_ind;
+ wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_START_RSP:
+ case WCN36XX_HAL_CONFIG_STA_RSP:
+ case WCN36XX_HAL_CONFIG_BSS_RSP:
+ case WCN36XX_HAL_ADD_STA_SELF_RSP:
+ case WCN36XX_HAL_STOP_RSP:
+ case WCN36XX_HAL_DEL_STA_SELF_RSP:
+ case WCN36XX_HAL_DELETE_STA_RSP:
+ case WCN36XX_HAL_INIT_SCAN_RSP:
+ case WCN36XX_HAL_START_SCAN_RSP:
+ case WCN36XX_HAL_END_SCAN_RSP:
+ case WCN36XX_HAL_FINISH_SCAN_RSP:
+ case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+ case WCN36XX_HAL_DELETE_BSS_RSP:
+ case WCN36XX_HAL_SEND_BEACON_RSP:
+ case WCN36XX_HAL_SET_LINK_ST_RSP:
+ case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+ case WCN36XX_HAL_SET_BSSKEY_RSP:
+ case WCN36XX_HAL_SET_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_STAKEY_RSP:
+ case WCN36XX_HAL_RMV_BSSKEY_RSP:
+ case WCN36XX_HAL_ENTER_BMPS_RSP:
+ case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+ case WCN36XX_HAL_EXIT_BMPS_RSP:
+ case WCN36XX_HAL_KEEP_ALIVE_RSP:
+ case WCN36XX_HAL_DUMP_COMMAND_RSP:
+ case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+ case WCN36XX_HAL_ADD_BA_RSP:
+ case WCN36XX_HAL_DEL_BA_RSP:
+ case WCN36XX_HAL_TRIGGER_BA_RSP:
+ case WCN36XX_HAL_UPDATE_CFG_RSP:
+ case WCN36XX_HAL_JOIN_RSP:
+ case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+ case WCN36XX_HAL_CH_SWITCH_RSP:
+ case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ memcpy(wcn->hal_buf, buf, len);
+ wcn->hal_rsp_len = len;
+ complete(&wcn->hal_rsp_compl);
+ break;
+
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+ if (!msg_ind)
+ goto nomem;
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
+ if (!msg_ind->msg) {
+ kfree(msg_ind);
+nomem:
+ /*
+ * FIXME: Do something smarter then just
+ * printing an error.
+ */
+ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+ msg_header->msg_type);
+ break;
+ }
+ mutex_lock(&wcn->hal_ind_mutex);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ mutex_unlock(&wcn->hal_ind_mutex);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, hal_ind_work);
+ struct wcn36xx_hal_msg_header *msg_header;
+ struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+ mutex_lock(&wcn->hal_ind_mutex);
+
+ hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+ struct wcn36xx_hal_ind_msg,
+ list);
+
+ msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+ switch (msg_header->msg_type) {
+ case WCN36XX_HAL_OTA_TX_COMPL_IND:
+ wcn36xx_smd_tx_compl_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_MISSED_BEACON_IND:
+ wcn36xx_smd_missed_beacon_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+ wcn36xx_smd_delete_sta_context_ind(wcn,
+ hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
+ default:
+ wcn36xx_err("SMD_EVENT (%d) not supported\n",
+ msg_header->msg_type);
+ }
+ list_del(wcn->hal_ind_queue.next);
+ kfree(hal_ind_msg->msg);
+ kfree(hal_ind_msg);
+ mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+ int ret = 0;
+ wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+ if (!wcn->hal_ind_wq) {
+ wcn36xx_err("failed to allocate wq\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+ INIT_LIST_HEAD(&wcn->hal_ind_queue);
+ mutex_init(&wcn->hal_ind_mutex);
+
+ ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+ if (ret) {
+ wcn36xx_err("failed to open control channel\n");
+ goto free_wq;
+ }
+
+ return ret;
+
+free_wq:
+ destroy_workqueue(wcn->hal_ind_wq);
+out:
+ return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+ wcn->ctrl_ops->close();
+ destroy_workqueue(wcn->hal_ind_wq);
+ mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644
index 000000000..008d03423
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE 3072
+
+#define WCN36XX_HAL_BUF_SIZE 4096
+
+#define HAL_MSG_TIMEOUT 500
+#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK 0x8000
+
+enum wcn36xx_fw_msg_result {
+ WCN36XX_FW_MSG_RESULT_SUCCESS = 0,
+ WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC = 1,
+
+ WCN36XX_FW_MSG_RESULT_MEM_FAIL = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+ u32 status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+ struct list_head list;
+ u8 *msg;
+ size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+ enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+ const u8 *sta_mac,
+ enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct sk_buff *skb_beacon, u16 tim_off,
+ u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key,
+ u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 keylen,
+ u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx,
+ u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+ enum ani_ed_type enc_type,
+ u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ u16 tid,
+ u16 *ssn,
+ u8 direction,
+ u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644
index 000000000..9bec82372
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+ return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status status;
+ struct ieee80211_hdr *hdr;
+ struct wcn36xx_rx_bd *bd;
+ u16 fc, sn;
+
+ /*
+ * All fields must be 0, otherwise it can lead to
+ * unexpected consequences.
+ */
+ memset(&status, 0, sizeof(status));
+
+ bd = (struct wcn36xx_rx_bd *)skb->data;
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+ "BD <<< ", (char *)bd,
+ sizeof(struct wcn36xx_rx_bd));
+
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
+
+ status.mactime = 10;
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
+ status.signal = -get_rssi0(bd);
+ status.antenna = 1;
+ status.rate_idx = 1;
+ status.flag = 0;
+ status.rx_flags = 0;
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_DECRYPTED;
+
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = __le16_to_cpu(hdr->frame_control);
+ sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ } else {
+ wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+ skb, skb->len, fc, sn);
+ wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+ (char *)skb->data, skb->len);
+ }
+
+ ieee80211_rx_irqsafe(wcn->hw, skb);
+
+ return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+ u32 mpdu_header_len,
+ u32 len,
+ u16 tid)
+{
+ bd->pdu.mpdu_header_len = mpdu_header_len;
+ bd->pdu.mpdu_header_off = sizeof(*bd);
+ bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+ bd->pdu.mpdu_header_off;
+ bd->pdu.mpdu_len = len;
+ bd->pdu.tid = tid;
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+ u8 *addr)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ vif = container_of((void *)vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+ if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+ return vif_priv;
+ }
+ wcn36xx_warn("vif %pM not found\n", addr);
+ return NULL;
+}
+
+static void wcn36xx_tx_start_ampdu(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_sta *sta;
+ u8 *qc, tid;
+
+ if (!conf_is_ht(&wcn->hw->conf))
+ return;
+
+ sta = wcn36xx_priv_to_sta(sta_priv);
+
+ if (WARN_ON(!ieee80211_is_data_qos(hdr->frame_control)))
+ return;
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+
+ spin_lock(&sta_priv->ampdu_lock);
+ if (sta_priv->ampdu_state[tid] != WCN36XX_AMPDU_NONE)
+ goto out_unlock;
+
+ if (sta_priv->non_agg_frame_ct++ >= WCN36XX_AMPDU_START_THRESH) {
+ sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
+ sta_priv->non_agg_frame_ct = 0;
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+ }
+out_unlock:
+ spin_unlock(&sta_priv->ampdu_lock);
+}
+
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb,
+ bool bcast)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *__vif_priv = NULL;
+ bool is_data_qos;
+
+ bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+ /*
+ * For not unicast frames mac80211 will not set sta pointer so use
+ * self_sta_index instead.
+ */
+ if (sta_priv) {
+ __vif_priv = sta_priv->vif;
+ vif = container_of((void *)__vif_priv,
+ struct ieee80211_vif,
+ drv_priv);
+
+ bd->dpu_sign = sta_priv->ucast_dpu_sign;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bd->sta_index = sta_priv->bss_sta_index;
+ bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ bd->sta_index = sta_priv->sta_index;
+ bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+ }
+ } else {
+ __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
+ }
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ (sta_priv && !sta_priv->is_data_encrypted))
+ bd->dpu_ne = 1;
+
+ if (bcast) {
+ bd->ub = 1;
+ bd->ack_policy = 1;
+ }
+ *vif_priv = __vif_priv;
+
+ is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
+
+ wcn36xx_set_tx_pdu(bd,
+ is_data_qos ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, sta_priv ? sta_priv->tid : 0);
+
+ if (sta_priv && is_data_qos)
+ wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+ struct wcn36xx *wcn,
+ struct wcn36xx_vif **vif_priv,
+ struct sk_buff *skb,
+ bool bcast)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *__vif_priv =
+ get_vif_by_addr(wcn, hdr->addr2);
+ bd->sta_index = __vif_priv->self_sta_index;
+ bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_ne = 1;
+
+ /* default rate for unicast */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ WCN36XX_BD_RATE_CTRL :
+ WCN36XX_BD_RATE_MGMT;
+ else if (ieee80211_is_ctl(hdr->frame_control))
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ else
+ wcn36xx_warn("frame control type unknown\n");
+
+ /*
+ * In joining state trick hardware that probe is sent as
+ * unicast even if address is broadcast.
+ */
+ if (__vif_priv->is_joining &&
+ ieee80211_is_probe_req(hdr->frame_control))
+ bcast = false;
+
+ if (bcast) {
+ /* broadcast */
+ bd->ub = 1;
+ /* No ack needed not unicast */
+ bd->ack_policy = 1;
+ bd->queue_id = WCN36XX_TX_B_WQ_ID;
+ } else
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ *vif_priv = __vif_priv;
+
+ wcn36xx_set_tx_pdu(bd,
+ ieee80211_is_data_qos(hdr->frame_control) ?
+ sizeof(struct ieee80211_qos_hdr) :
+ sizeof(struct ieee80211_hdr_3addr),
+ skb->len, WCN36XX_TID);
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
+ bool is_low = ieee80211_is_data(hdr->frame_control);
+ bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+ if (!bd) {
+ /*
+ * TX DXE are used in pairs. One for the BD and one for the
+ * actual frame. The BD DXE's has a preallocated buffer while
+ * the skb ones does not. If this isn't true something is really
+ * wierd. TODO: Recover from this situation
+ */
+
+ wcn36xx_err("bd address may not be NULL for BD DXE\n");
+ return -EINVAL;
+ }
+
+ memset(bd, 0, sizeof(*bd));
+
+ wcn36xx_dbg(WCN36XX_DBG_TX,
+ "tx skb %p len %d fc %04x sn %d %s %s\n",
+ skb, skb->len, __le16_to_cpu(hdr->frame_control),
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+ wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+ bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+ bd->tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS);
+ if (bd->tx_comp) {
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ if (wcn->tx_ack_skb) {
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+ wcn36xx_warn("tx_ack_skb already set\n");
+ return -EINVAL;
+ }
+
+ wcn->tx_ack_skb = skb;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ /* Only one at a time is supported by fw. Stop the TX queues
+ * until the ack status gets back.
+ *
+ * TODO: Add watchdog in case FW does not answer
+ */
+ ieee80211_stop_queues(wcn->hw);
+ }
+
+ /* Data frames served first*/
+ if (is_low)
+ wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, skb, bcast);
+ else
+ /* MGMT and CTRL frames are handeld here*/
+ wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, skb, bcast);
+
+ buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ bd->tx_bd_sign = 0xbdbdbdbd;
+
+ return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644
index 000000000..032216e82
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN 24
+#define WCN36XX_BMU_WQ_TX 25
+#define WCN36XX_TID 7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID 0xA
+#define WCN36XX_TX_U_WQ_ID 0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+enum wcn36xx_txbd_ssn_type {
+ WCN36XX_TXBD_SSN_FILL_HOST = 0,
+ WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS = 1,
+ WCN36XX_TXBD_SSN_FILL_DPU_QOS = 2,
+};
+
+struct wcn36xx_pdu {
+ u32 dpu_fb:8;
+ u32 adu_fb:8;
+ u32 pdu_id:16;
+
+ /* 0x04*/
+ u32 tail_pdu_idx:16;
+ u32 head_pdu_idx:16;
+
+ /* 0x08*/
+ u32 pdu_count:7;
+ u32 mpdu_data_off:9;
+ u32 mpdu_header_off:8;
+ u32 mpdu_header_len:8;
+
+ /* 0x0c*/
+ u32 reserved4:8;
+ u32 tid:4;
+ u32 bd_ssn:2;
+ u32 reserved3:2;
+ u32 mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 rx_key_id:3;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 uma_bypass:1;
+ u32 csr11:1;
+ u32 reserved0:1;
+ u32 scan_learn:1;
+ u32 rx_ch:4;
+ u32 rtsf:1;
+ u32 bsf:1;
+ u32 a2hf:1;
+ u32 st_auf:1;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 addr3:8;
+ u32 addr2:8;
+ u32 addr1:8;
+ u32 dpu_desc_idx:8;
+
+ /* 0x18*/
+ u32 rxp_flags:23;
+ u32 rate_id:9;
+
+ u32 phy_stat0;
+ u32 phy_stat1;
+
+ /* 0x24 */
+ u32 rx_times;
+
+ u32 pmi_cmd[6];
+
+ /* 0x40 */
+ u32 reserved7:4;
+ u32 reorder_slot_id:6;
+ u32 reorder_fwd_id:6;
+ u32 reserved6:12;
+ u32 reorder_code:4;
+
+ /* 0x44 */
+ u32 exp_seq_num:12;
+ u32 cur_seq_num:12;
+ u32 fr_type_subtype:8;
+
+ /* 0x48 */
+ u32 msdu_size:16;
+ u32 sub_fr_id:4;
+ u32 proc_order:4;
+ u32 reserved9:4;
+ u32 aef:1;
+ u32 lsf:1;
+ u32 esf:1;
+ u32 asf:1;
+};
+
+struct wcn36xx_tx_bd {
+ u32 bdt:2;
+ u32 ft:1;
+ u32 dpu_ne:1;
+ u32 fw_tx_comp:1;
+ u32 tx_comp:1;
+ u32 reserved1:1;
+ u32 ub:1;
+ u32 rmf:1;
+ u32 reserved0:12;
+ u32 dpu_sign:3;
+ u32 dpu_rf:8;
+
+ struct wcn36xx_pdu pdu;
+
+ /* 0x14*/
+ u32 reserved5:7;
+ u32 queue_id:5;
+ u32 bd_rate:2;
+ u32 ack_policy:2;
+ u32 sta_index:8;
+ u32 dpu_desc_idx:8;
+
+ u32 tx_bd_sign;
+ u32 reserved6;
+ u32 dxe_start_time;
+ u32 dxe_end_time;
+
+ /*u32 tcp_udp_start_off:10;
+ u32 header_cks:16;
+ u32 reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+ struct wcn36xx_sta *sta_priv,
+ struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644
index 000000000..0555c9502
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE "/*(DEBLOBBED)*/"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+/* How many frames until we start a-mpdu TX session */
+#define WCN36XX_AMPDU_START_THRESH 20
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+ WCN36XX_DBG_DXE = 0x00000001,
+ WCN36XX_DBG_DXE_DUMP = 0x00000002,
+ WCN36XX_DBG_SMD = 0x00000004,
+ WCN36XX_DBG_SMD_DUMP = 0x00000008,
+ WCN36XX_DBG_RX = 0x00000010,
+ WCN36XX_DBG_RX_DUMP = 0x00000020,
+ WCN36XX_DBG_TX = 0x00000040,
+ WCN36XX_DBG_TX_DUMP = 0x00000080,
+ WCN36XX_DBG_HAL = 0x00000100,
+ WCN36XX_DBG_HAL_DUMP = 0x00000200,
+ WCN36XX_DBG_MAC = 0x00000400,
+ WCN36XX_DBG_BEACON = 0x00000800,
+ WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+ WCN36XX_DBG_PMC = 0x00002000,
+ WCN36XX_DBG_PMC_DUMP = 0x00004000,
+ WCN36XX_DBG_ANY = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...) \
+ printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg)
+
+#define wcn36xx_warn(fmt, arg...) \
+ printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...) \
+ printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##arg); \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do { \
+ if (wcn36xx_dbg_mask & mask) \
+ print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str), \
+ DUMP_PREFIX_OFFSET, 32, 1, \
+ buf, len, false); \
+} while (0)
+
+enum wcn36xx_ampdu_state {
+ WCN36XX_AMPDU_NONE,
+ WCN36XX_AMPDU_INIT,
+ WCN36XX_AMPDU_START,
+ WCN36XX_AMPDU_OPERATIONAL,
+};
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+ int is_valid;
+ u8 table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+ int (*open)(void *drv_priv, void *rsp_cb);
+ void (*close)(void);
+ int (*tx)(char *buf, size_t len);
+ int (*get_hw_mac)(u8 *addr);
+ int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+ struct list_head list;
+ struct wcn36xx_sta *sta;
+ u8 dtim_period;
+ enum ani_ed_type encrypt_type;
+ bool is_joining;
+ struct wcn36xx_hal_mac_ssid ssid;
+
+ /* Power management */
+ enum wcn36xx_power_state pw_state;
+
+ u8 bss_index;
+ /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+ u8 self_sta_index;
+ u8 self_dpu_desc_index;
+ u8 self_ucast_dpu_sign;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * | | STA | AP |
+ * |______________|_____________|_______________|
+ * | TX BD |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index| sta_index |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta| sta_index | sta_index |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+ struct wcn36xx_vif *vif;
+ u16 aid;
+ u16 tid;
+ u8 sta_index;
+ u8 dpu_desc_index;
+ u8 ucast_dpu_sign;
+ u8 bss_sta_index;
+ u8 bss_dpu_desc_index;
+ bool is_data_encrypted;
+ /* Rates */
+ struct wcn36xx_hal_supported_rates supported_rates;
+
+ spinlock_t ampdu_lock; /* protects next two fields */
+ enum wcn36xx_ampdu_state ampdu_state[16];
+ int non_agg_frame_ct;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+ struct list_head vif_list;
+
+ const struct firmware *nv;
+
+ u8 fw_revision;
+ u8 fw_version;
+ u8 fw_minor;
+ u8 fw_major;
+ u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+ u32 chip_version;
+
+ /* extra byte for the NULL termination */
+ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+ u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+ /* IRQs */
+ int tx_irq;
+ int rx_irq;
+ void __iomem *mmio;
+
+ struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+ /*
+ * smd_buf must be protected with smd_mutex to garantee
+ * that all messages are sent one after another
+ */
+ u8 *hal_buf;
+ size_t hal_rsp_len;
+ struct mutex hal_mutex;
+ struct completion hal_rsp_compl;
+ struct workqueue_struct *hal_ind_wq;
+ struct work_struct hal_ind_work;
+ struct mutex hal_ind_mutex;
+ struct list_head hal_ind_queue;
+
+ /* DXE channels */
+ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */
+ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */
+ struct wcn36xx_dxe_ch dxe_rx_l_ch; /* RX low */
+ struct wcn36xx_dxe_ch dxe_rx_h_ch; /* RX high */
+
+ /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+ spinlock_t dxe_lock;
+ bool queues_stopped;
+
+ /* Memory pools */
+ struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+ struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+ struct sk_buff *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+ /* Debug file system entry */
+ struct wcn36xx_dfs_entry dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+#define WCN36XX_CHIP_3660 0
+#define WCN36XX_CHIP_3680 1
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+ u8 major,
+ u8 minor,
+ u8 version,
+ u8 revision)
+{
+ return (wcn->fw_major == major &&
+ wcn->fw_minor == minor &&
+ wcn->fw_version == version &&
+ wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+static inline
+struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
+{
+ return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
+}
+
+#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000..ce8c03818
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,41 @@
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ depends on CFG80211
+ depends on PCI
+ default n
+ ---help---
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ http://wireless.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ ---help---
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
+
+config WIL6210_TRACING
+ bool "wil6210 tracing support"
+ depends on WIL6210
+ depends on EVENT_TRACING
+ default y
+ ---help---
+ Say Y here to enable tracepoints for the wil6210 driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000..caa717bf5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,22 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-y := main.o
+wil6210-y += netdev.o
+wil6210-y += cfg80211.o
+wil6210-y += pcie_bus.o
+wil6210-y += debugfs.o
+wil6210-y += wmi.o
+wil6210-y += interrupt.o
+wil6210-y += txrx.o
+wil6210-y += debug.o
+wil6210-y += rx_reorder.o
+wil6210-y += ioctl.o
+wil6210-y += fw.o
+wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
+wil6210-y += wil_platform.o
+wil6210-y += ethtool.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
+
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000..b97172667
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,1047 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) { \
+ .band = IEEE80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+ struct station_info *sinfo)
+{
+ struct wmi_notify_req_cmd cmd = {
+ .cid = cid,
+ .interval_usec = 0,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ int rc;
+
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ wil_dbg_wmi(wil, "Link status for CID %d: {\n"
+ " MCS %d TSF 0x%016llx\n"
+ " BF status 0x%08x SNR 0x%08x SQI %d%%\n"
+ " Tx Tpt %d goodput %d Rx goodput %d\n"
+ " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+ cid, le16_to_cpu(reply.evt.bf_mcs),
+ le64_to_cpu(reply.evt.tsf), reply.evt.status,
+ le32_to_cpu(reply.evt.snr_val),
+ reply.evt.sqi,
+ le32_to_cpu(reply.evt.tx_tpt),
+ le32_to_cpu(reply.evt.tx_goodput),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector));
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_RX_BITRATE) |
+ BIT(NL80211_STA_INFO_TX_BITRATE) |
+ BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_TX_FAILED);
+
+ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
+ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->rxrate.mcs = stats->last_mcs_rx;
+ sinfo->rx_bytes = stats->rx_bytes;
+ sinfo->rx_packets = stats->rx_packets;
+ sinfo->rx_dropped_misc = stats->rx_dropped;
+ sinfo->tx_bytes = stats->tx_bytes;
+ sinfo->tx_packets = stats->tx_packets;
+ sinfo->tx_failed = stats->tx_errors;
+
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = reply.evt.sqi;
+ }
+
+ return rc;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ int cid = wil_find_cid(wil, mac);
+
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ if (cid < 0)
+ return cid;
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
+}
+
+/*
+ * Find @idx-th active STA for station dump.
+ */
+static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (idx == 0)
+ return i;
+ idx--;
+ }
+
+ return -ENOENT;
+}
+
+static int wil_cfg80211_dump_station(struct wiphy *wiphy,
+ struct net_device *dev, int idx,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ int cid = wil_find_cid_by_idx(wil, idx);
+
+ if (cid < 0)
+ return -ENOENT;
+
+ ether_addr_copy(mac, wil->sta[cid].addr);
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (flags)
+ wil->monitor_flags = *flags;
+ else
+ wil->monitor_flags = 0;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+ int rc;
+
+ if (wil->scan_request) {
+ wil_err(wil, "Already scanning\n");
+ return -EAGAIN;
+ }
+
+ /* check we are client side */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, wil->status)) {
+ wil_err(wil, "Can't scan now\n");
+ return -EBUSY;
+ }
+
+ wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
+ wil->scan_request = request;
+ mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ if (request->ie_len)
+ print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
+ request->ie, request->ie_len);
+ else
+ wil_dbg_misc(wil, "Scan has no IE's\n");
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
+ request->ie);
+ if (rc) {
+ wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+ goto out;
+ }
+
+ rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+
+out:
+ if (rc) {
+ del_timer_sync(&wil->scan_timer);
+ wil->scan_request = NULL;
+ }
+
+ return rc;
+}
+
+static void wil_print_crypto(struct wil6210_priv *wil,
+ struct cfg80211_crypto_settings *c)
+{
+ int i, n;
+
+ wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
+ c->wpa_versions, c->cipher_group);
+ wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
+ n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->ciphers_pairwise[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
+ n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
+ for (i = 0; i < n; i++)
+ wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
+ c->akm_suites[i]);
+ wil_dbg_misc(wil, "}\n");
+ wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
+ c->control_port, be16_to_cpu(c->control_port_ethertype),
+ c->control_port_no_encrypt);
+}
+
+static void wil_print_connect_params(struct wil6210_priv *wil,
+ struct cfg80211_connect_params *sme)
+{
+ wil_info(wil, "Connecting to:\n");
+ if (sme->channel) {
+ wil_info(wil, " Channel: %d freq %d\n",
+ sme->channel->hw_value, sme->channel->center_freq);
+ }
+ if (sme->bssid)
+ wil_info(wil, " BSSID: %pM\n", sme->bssid);
+ if (sme->ssid)
+ print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
+ 16, 1, sme->ssid, sme->ssid_len, true);
+ wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
+ wil_print_crypto(wil, &sme->crypto);
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+
+ wil_print_connect_params(wil, sme);
+
+ if (test_bit(wil_status_fwconnecting, wil->status) ||
+ test_bit(wil_status_fwconnected, wil->status))
+ return -EALREADY;
+
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
+ return -ERANGE;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+
+ if (sme->privacy && !rsn_eid) {
+ wil_err(wil, "Missing RSN IE for secure connection\n");
+ return -EINVAL;
+ }
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+ wil->privacy = sme->privacy;
+
+ if (wil->privacy) {
+ /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info
+ * elements. Send it also in case it's empty, to erase previously set
+ * ies in FW.
+ */
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc) {
+ wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ goto out;
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ conn.network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ conn.network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ goto out;
+ }
+ if (wil->privacy) {
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ } else {
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ conn.channel = ch - 1;
+
+ ether_addr_copy(conn.bssid, bss->bssid);
+ ether_addr_copy(conn.dst_mac, bss->bssid);
+
+ set_bit(wil_status_fwconnecting, wil->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+ if (rc == 0) {
+ netif_carrier_on(ndev);
+ /* Connect can take lots of time */
+ mod_timer(&wil->connect_timer,
+ jiffies + msecs_to_jiffies(2000));
+ } else {
+ clear_bit(wil_status_fwconnecting, wil->status);
+ }
+
+ out:
+ cfg80211_put_bss(wiphy, bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ return rc;
+}
+
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ bool tx_status = false;
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_cmd *cmd;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt;
+
+ cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc == 0)
+ tx_status = !evt.evt.status;
+
+ kfree(cmd);
+ out:
+ cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
+ tx_status, GFP_KERNEL);
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wdev->preset_chandef = *chandef;
+
+ return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_add_cipher_key(wil, key_index, mac_addr,
+ params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ return 0;
+}
+
+static int wil_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* TODO: handle duration */
+ wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+
+ rc = wmi_set_channel(wil, chan->hw_value);
+ if (rc)
+ return rc;
+
+ rc = wmi_rxon(wil, true);
+
+ return rc;
+}
+
+static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_info(wil, "%s()\n", __func__);
+
+ rc = wmi_rxon(wil, false);
+
+ return rc;
+}
+
+static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
+{
+ print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET,
+ b->head, b->head_len);
+ print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET,
+ b->tail, b->tail_len);
+ print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET,
+ b->beacon_ies, b->beacon_ies_len);
+ print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET,
+ b->probe_resp, b->probe_resp_len);
+ print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
+ b->proberesp_ies, b->proberesp_ies_len);
+ print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
+ b->assocresp_ies, b->assocresp_ies_len);
+}
+
+static int wil_fix_bcon(struct wil6210_priv *wil,
+ struct cfg80211_beacon_data *bcon)
+{
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ int rc = 0;
+
+ if (bcon->probe_resp_len <= hlen)
+ return 0;
+
+ if (!bcon->proberesp_ies) {
+ bcon->proberesp_ies = f->u.probe_resp.variable;
+ bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
+ rc = 1;
+ }
+ if (!bcon->assocresp_ies) {
+ bcon->assocresp_ies = f->u.probe_resp.variable;
+ bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
+ rc = 1;
+ }
+
+ return rc;
+}
+
+static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_beacon_data *bcon)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (wil_fix_bcon(wil, bcon)) {
+ wil_dbg_misc(wil, "Fixed bcon\n");
+ wil_print_bcon_data(bcon);
+ }
+
+ /* FW do not form regular beacon, so bcon IE's are not set
+ * For the DMG bcon, when it will be supported, bcon IE's will
+ * be reused; add something like:
+ * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ * bcon->beacon_ies);
+ */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
+ bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ if (rc) {
+ wil_err(wil, "set_ie(PROBE_RESP) failed\n");
+ return rc;
+ }
+
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
+ bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+ if (rc) {
+ wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ struct cfg80211_crypto_settings *crypto = &info->crypto;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
+ info->privacy, info->auth_type);
+ wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
+ info->dtim_period);
+ print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+ info->ssid, info->ssid_len);
+ wil_print_bcon_data(bcon);
+ wil_print_crypto(wil, crypto);
+
+ if (wil_fix_bcon(wil, bcon)) {
+ wil_dbg_misc(wil, "Fixed bcon\n");
+ wil_print_bcon_data(bcon);
+ }
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+
+ mutex_lock(&wil->mutex);
+
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ if (rc)
+ goto out;
+
+ rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+ if (rc)
+ goto out;
+
+ /* IE's */
+ /* bcon 'head IE's are not relevant for 60g band */
+ /*
+ * FW do not form regular beacon, so bcon IE's are not set
+ * For the DMG bcon, when it will be supported, bcon IE's will
+ * be reused; add something like:
+ * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ * bcon->beacon_ies);
+ */
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+
+ wil->privacy = info->privacy;
+
+ netif_carrier_on(ndev);
+
+ rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
+ channel->hw_value);
+ if (rc)
+ goto err_pcp_start;
+
+ rc = wil_bcast_init(wil);
+ if (rc)
+ goto err_bcast;
+
+ goto out; /* success */
+err_bcast:
+ wmi_pcp_stop(wil);
+err_pcp_start:
+ netif_carrier_off(ndev);
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ netif_carrier_off(ndev);
+ wil_set_recovery_state(wil, fw_recovery_idle);
+
+ mutex_lock(&wil->mutex);
+
+ wmi_pcp_stop(wil);
+
+ __wil_down(wil);
+ __wil_up(wil);
+
+ mutex_unlock(&wil->mutex);
+
+ /* some functions above might fail (e.g. __wil_up). Nevertheless, we
+ * return success because AP has stopped
+ */
+ return 0;
+}
+
+static int wil_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct station_del_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(wil, params->mac, params->reason_code, false);
+ mutex_unlock(&wil->mutex);
+
+ return 0;
+}
+
+/* probe_client handling */
+static void wil_probe_client_handle(struct wil6210_priv *wil,
+ struct wil_probe_client_req *req)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wil_sta_info *sta = &wil->sta[req->cid];
+ /* assume STA is alive if it is still connected,
+ * else FW will disconnect it
+ */
+ bool alive = (sta->status == wil_sta_connected);
+
+ cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL);
+}
+
+static struct list_head *next_probe_client(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->probe_client_mutex);
+
+ if (!list_empty(&wil->probe_client_pending)) {
+ ret = wil->probe_client_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->probe_client_mutex);
+
+ return ret;
+}
+
+void wil_probe_client_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ probe_client_worker);
+ struct wil_probe_client_req *req;
+ struct list_head *lh;
+
+ while ((lh = next_probe_client(wil)) != NULL) {
+ req = list_entry(lh, struct wil_probe_client_req, list);
+
+ wil_probe_client_handle(wil, req);
+ kfree(req);
+ }
+}
+
+void wil_probe_client_flush(struct wil6210_priv *wil)
+{
+ struct wil_probe_client_req *req, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->probe_client_mutex);
+
+ list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) {
+ list_del(&req->list);
+ kfree(req);
+ }
+
+ mutex_unlock(&wil->probe_client_mutex);
+}
+
+static int wil_cfg80211_probe_client(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *peer, u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_probe_client_req *req;
+ int cid = wil_find_cid(wil, peer);
+
+ wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+
+ if (cid < 0)
+ return -ENOLINK;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->cid = cid;
+ req->cookie = cid;
+
+ mutex_lock(&wil->probe_client_mutex);
+ list_add_tail(&req->list, &wil->probe_client_pending);
+ mutex_unlock(&wil->probe_client_mutex);
+
+ *cookie = req->cookie;
+ queue_work(wil->wq_service, &wil->probe_client_worker);
+ return 0;
+}
+
+static int wil_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ if (params->ap_isolate >= 0) {
+ wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+ wil->ap_isolate, params->ap_isolate);
+ wil->ap_isolate = params->ap_isolate;
+ }
+
+ return 0;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+ .scan = wil_cfg80211_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .dump_station = wil_cfg80211_dump_station,
+ .remain_on_channel = wil_remain_on_channel,
+ .cancel_remain_on_channel = wil_cancel_remain_on_channel,
+ .mgmt_tx = wil_cfg80211_mgmt_tx,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .change_beacon = wil_cfg80211_change_beacon,
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+ .del_station = wil_cfg80211_del_station,
+ .probe_client = wil_cfg80211_probe_client,
+ .change_bss = wil_cfg80211_change_bss,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ /* TODO: set real value */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ /* TODO: enable P2P when integrated with supplicant:
+ * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+ */
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* TODO: figure this out */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+ wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+ int rc = 0;
+ struct wireless_dev *wdev;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+ sizeof(struct wil6210_priv));
+ if (!wdev->wiphy) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wil_wiphy_init(wdev->wiphy);
+
+ rc = wiphy_register(wdev->wiphy);
+ if (rc < 0)
+ goto out_failed_reg;
+
+ return wdev;
+
+out_failed_reg:
+ wiphy_free(wdev->wiphy);
+out:
+ kfree(wdev);
+
+ return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ dev_dbg(wil_to_dev(wil), "%s()\n", __func__);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
new file mode 100644
index 000000000..3249562d9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "trace.h"
+
+void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ netdev_err(ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+}
+
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ if (net_ratelimit()) {
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ netdev_err(ndev, "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
+ }
+}
+
+void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ netdev_info(ndev, "%pV", &vaf);
+ trace_wil6210_log_info(&vaf);
+ va_end(args);
+}
+
+void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000..bbc22d88f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,1462 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/power_supply.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
+u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */
+
+enum dbg_off_type {
+ doff_u32 = 0,
+ doff_x32 = 1,
+ doff_ulong = 2,
+ doff_io32 = 3,
+};
+
+/* offset to "wil" */
+struct dbg_off {
+ const char *name;
+ umode_t mode;
+ ulong off;
+ enum dbg_off_type type;
+};
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct vring *vring,
+ char _s, char _h)
+{
+ void __iomem *x = wmi_addr(wil, vring->hwtail);
+ u32 v;
+
+ seq_printf(s, "VRING %s = {\n", name);
+ seq_printf(s, " pa = %pad\n", &vring->pa);
+ seq_printf(s, " va = 0x%p\n", vring->va);
+ seq_printf(s, " size = %d\n", vring->size);
+ seq_printf(s, " swtail = %d\n", vring->swtail);
+ seq_printf(s, " swhead = %d\n", vring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ if (x) {
+ v = ioread32(x);
+ seq_printf(s, "0x%08x = %d\n", v, v);
+ } else {
+ seq_puts(s, "???\n");
+ }
+
+ if (vring->va && (vring->size < 1025)) {
+ uint i;
+
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &vring->va[i].tx;
+
+ if ((i % 64) == 0 && (i != 0))
+ seq_puts(s, "\n");
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (vring->ctx[i].skb ? _h : 'h'));
+ }
+ seq_puts(s, "\n");
+ }
+ seq_puts(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ struct vring *vring = &wil->vring_tx[i];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+
+ if (vring->va) {
+ int cid = wil->vring2cid_tid[i][0];
+ int tid = wil->vring2cid_tid[i][1];
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ int used = (vring->size + swhead - swtail)
+ % vring->size;
+ int avail = vring->size - used - 1;
+ char name[10];
+ char sidle[10];
+ /* performance monitoring */
+ cycles_t now = get_cycles();
+ uint64_t idle = txdata->idle * 100;
+ uint64_t total = now - txdata->begin;
+
+ if (total != 0) {
+ do_div(idle, total);
+ snprintf(sidle, sizeof(sidle), "%3d%%",
+ (int)idle);
+ } else {
+ snprintf(sidle, sizeof(sidle), "N/A");
+ }
+ txdata->begin = now;
+ txdata->idle = 0ULL;
+
+ snprintf(name, sizeof(name), "tx_%2d", i);
+
+ if (cid < WIL6210_MAX_CID)
+ seq_printf(s,
+ "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+ wil->sta[cid].addr, cid, tid,
+ txdata->agg_wsize,
+ txdata->agg_timeout,
+ txdata->agg_amsdu ? "+" : "-",
+ used, avail, sidle);
+ else
+ seq_printf(s,
+ "\nBroadcast [%3d|%3d] idle %s\n",
+ used, avail, sidle);
+
+ wil_print_vring(s, wil, name, vring, '_', 'H');
+ }
+ }
+
+ return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+ .open = wil_vring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_puts(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ int n = 0;
+ char printbuf[16 * 3 + 2];
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ while (n < len) {
+ int l = min(len - n, 16);
+
+ hex_dump_to_buffer(databuf + n, l,
+ 16, 1, printbuf,
+ sizeof(printbuf),
+ false);
+ seq_printf(s, " : %s\n", printbuf);
+ n += l;
+ }
+ }
+ } else {
+ seq_puts(s, "\n");
+ }
+ }
+ out:
+ seq_puts(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+ .open = wil_mbox_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, (void __iomem *)data);
+ wmb(); /* make sure write propagated to HW */
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32((void __iomem *)data);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ void *value)
+{
+ return debugfs_create_file(name, mode, parent, value,
+ &fops_iomem_x32);
+}
+
+static int wil_debugfs_ulong_set(void *data, u64 val)
+{
+ *(ulong *)data = val;
+ return 0;
+}
+
+static int wil_debugfs_ulong_get(void *data, u64 *val)
+{
+ *val = *(ulong *)data;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
+ wil_debugfs_ulong_set, "%llu\n");
+
+static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ ulong *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong);
+}
+
+/**
+ * wil6210_debugfs_init_offset - create set of debugfs files
+ * @wil - driver's context, used for printing
+ * @dbg - directory on the debugfs, where files will be created
+ * @base - base address used in address calculation
+ * @tbl - table with file descriptions. Should be terminated with empty element.
+ *
+ * Creates files accordingly to the @tbl.
+ */
+static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
+ struct dentry *dbg, void *base,
+ const struct dbg_off * const tbl)
+{
+ int i;
+
+ for (i = 0; tbl[i].name; i++) {
+ struct dentry *f;
+
+ switch (tbl[i].type) {
+ case doff_u32:
+ f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ case doff_x32:
+ f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ case doff_ulong:
+ f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode,
+ dbg, base + tbl[i].off);
+ break;
+ case doff_io32:
+ f = wil_debugfs_create_iomem_x32(tbl[i].name,
+ tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
+ default:
+ f = ERR_PTR(-EINVAL);
+ }
+ if (IS_ERR_OR_NULL(f))
+ wil_err(wil, "Create file \"%s\": err %ld\n",
+ tbl[i].name, PTR_ERR(f));
+ }
+}
+
+static const struct dbg_off isr_off[] = {
+ {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32},
+ {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32},
+ {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32},
+ {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32},
+ {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32},
+ {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32},
+ {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32},
+ {},
+};
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name,
+ struct dentry *parent, u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off,
+ isr_off);
+
+ return 0;
+}
+
+static const struct dbg_off pseudo_isr_off[] = {
+ {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+ {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+ {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+ {},
+};
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
+ pseudo_isr_off);
+
+ return 0;
+}
+
+static const struct dbg_off lgc_itr_cnt_off[] = {
+ {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+ {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+ {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+ {},
+};
+
+static const struct dbg_off tx_itr_cnt_off[] = {
+ {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ doff_io32},
+ {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ doff_io32},
+ {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
+static const struct dbg_off rx_itr_cnt_off[] = {
+ {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ doff_io32},
+ {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ doff_io32},
+ {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ doff_io32},
+ {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ doff_io32},
+ {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ doff_io32},
+ {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ doff_io32},
+ {},
+};
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d, *dtx, *drx;
+
+ d = debugfs_create_dir("ITR_CNT", parent);
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ dtx = debugfs_create_dir("TX", d);
+ drx = debugfs_create_dir("RX", d);
+ if (IS_ERR_OR_NULL(dtx) || IS_ERR_OR_NULL(drx))
+ return -ENODEV;
+
+ wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
+ lgc_itr_cnt_off);
+
+ wil6210_debugfs_init_offset(wil, dtx, (void * __force)wil->csr,
+ tx_itr_cnt_off);
+
+ wil6210_debugfs_init_offset(wil, drx, (void * __force)wil->csr,
+ rx_itr_cnt_off);
+ return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+ .open = wil_memread_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ loff_t pos = *ppos;
+ size_t available = blob->size;
+ void *buf;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+ pos, count);
+
+ ret = copy_to_user(user_buf, buf, count);
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ /**
+ * BUG:
+ * this code does NOT sync device state with the rest of system
+ * use with care, debug only!!!
+ */
+ rtnl_lock();
+ dev_close(ndev);
+ ndev->flags &= ~IFF_UP;
+ rtnl_unlock();
+ wil_reset(wil, true);
+
+ return len;
+}
+
+static const struct file_operations fops_reset = {
+ .write = wil_write_file_reset,
+ .open = simple_open,
+};
+
+/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
+static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ long channel;
+ bool on;
+
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+ if (copy_from_user(kbuf, buf, len)) {
+ kfree(kbuf);
+ return -EIO;
+ }
+
+ kbuf[len] = '\0';
+ rc = kstrtol(kbuf, 0, &channel);
+ kfree(kbuf);
+ if (rc)
+ return rc;
+
+ if ((channel < 0) || (channel > 4)) {
+ wil_err(wil, "Invalid channel %ld\n", channel);
+ return -EINVAL;
+ }
+ on = !!channel;
+
+ if (on) {
+ rc = wmi_set_channel(wil, (int)channel);
+ if (rc)
+ return rc;
+ }
+
+ rc = wmi_rxon(wil, on);
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static const struct file_operations fops_rxon = {
+ .write = wil_write_file_rxon,
+ .open = simple_open,
+};
+
+/* block ack control, write:
+ * - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
+ * - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
+ * - "del_rx <CID> <TID> <reason>" to trigger DELBA for Rx side
+ */
+static ssize_t wil_write_back(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[9];
+ int p1, p2, p3;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d %d", cmd, &p1, &p2, &p3);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 2)
+ return -EINVAL;
+
+ if (0 == strcmp(cmd, "add")) {
+ if (rc < 3) {
+ wil_err(wil, "BACK: add require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = 0;
+ wmi_addba(wil, p1, p2, p3);
+ } else if (0 == strcmp(cmd, "del_tx")) {
+ if (rc < 3)
+ p2 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ wmi_delba_tx(wil, p1, p2);
+ } else if (0 == strcmp(cmd, "del_rx")) {
+ if (rc < 3) {
+ wil_err(wil,
+ "BACK: del_rx require at least 2 params\n");
+ return -EINVAL;
+ }
+ if (rc < 4)
+ p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
+ wmi_delba_rx(wil, mk_cidxtid(p1, p2), p3);
+ } else {
+ wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_back(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char text[] = "block ack control, write:\n"
+ " - \"add <ringid> <agg_size> <timeout>\" to trigger ADDBA\n"
+ "If missing, <timeout> defaults to 0\n"
+ " - \"del_tx <ringid> <reason>\" to trigger DELBA for Tx side\n"
+ " - \"del_rx <CID> <TID> <reason>\" to trigger DELBA for Rx side\n"
+ "If missing, <reason> set to \"STA_LEAVING\" (36)\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_back = {
+ .read = wil_read_back,
+ .write = wil_write_back,
+ .open = simple_open,
+};
+
+/*---tx_mgmt---*/
+/* Write mgmt frame to this file to send it */
+static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct cfg80211_mgmt_tx_params params;
+ int rc;
+ void *frame = kmalloc(len, GFP_KERNEL);
+
+ if (!frame)
+ return -ENOMEM;
+
+ if (copy_from_user(frame, buf, len)) {
+ kfree(frame);
+ return -EIO;
+ }
+
+ params.buf = frame;
+ params.len = len;
+ params.chan = wdev->preset_chandef.chan;
+
+ rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
+
+ kfree(frame);
+ wil_info(wil, "%s() -> %d\n", __func__, rc);
+
+ return len;
+}
+
+static const struct file_operations fops_txmgmt = {
+ .write = wil_write_file_txmgmt,
+ .open = simple_open,
+};
+
+/* Write WMI command (w/o mbox header) to this file to send it
+ * WMI starts from wil6210_mbox_hdr_wmi header
+ */
+static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wil6210_mbox_hdr_wmi *wmi;
+ void *cmd;
+ int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+ u16 cmdid;
+ int rc, rc1;
+
+ if (cmdlen <= 0)
+ return -EINVAL;
+
+ wmi = kmalloc(len, GFP_KERNEL);
+ if (!wmi)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
+ if (rc < 0) {
+ kfree(wmi);
+ return rc;
+ }
+
+ cmd = &wmi[1];
+ cmdid = le16_to_cpu(wmi->id);
+
+ rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
+ kfree(wmi);
+
+ wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+
+ return rc;
+}
+
+static const struct file_operations fops_wmi = {
+ .write = wil_write_file_wmi,
+ .open = simple_open,
+};
+
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+ const char *prefix)
+{
+ char printbuf[16 * 3 + 2];
+ int i = 0;
+
+ while (i < len) {
+ int l = min(len - i, 16);
+
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, "%s%s\n", prefix, printbuf);
+ i += l;
+ }
+}
+
+static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
+{
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ seq_printf(s, " len = %d\n", len);
+ wil_seq_hexdump(s, p, len, " : ");
+
+ if (nr_frags) {
+ seq_printf(s, " nr_frags = %d\n", nr_frags);
+ for (i = 0; i < nr_frags; i++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ p = skb_frag_address_safe(frag);
+ seq_printf(s, " [%2d] : len = %d\n", i, len);
+ wil_seq_hexdump(s, p, len, " : ");
+ }
+ }
+}
+
+/*---------Tx/Rx descriptor------------*/
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct vring *vring;
+ bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+
+ vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
+
+ if (!vring->va) {
+ if (tx)
+ seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ else
+ seq_puts(s, "No Rx VRING\n");
+ return 0;
+ }
+
+ if (dbg_txdesc_index < vring->size) {
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
+ volatile struct vring_tx_desc *d =
+ &vring->va[dbg_txdesc_index].tx;
+ volatile u32 *u = (volatile u32 *)d;
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
+
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
+ dbg_txdesc_index);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = 0x%p\n", skb);
+
+ if (skb) {
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
+ }
+ seq_puts(s, "}\n");
+ } else {
+ if (tx)
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ dbg_vring_index, dbg_txdesc_index,
+ vring->size);
+ else
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
+ }
+
+ return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+ .open = wil_txdesc_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static char *wil_bfstatus_str(u32 status)
+{
+ switch (status) {
+ case 0:
+ return "Failed";
+ case 1:
+ return "OK";
+ case 2:
+ return "Retrying";
+ default:
+ return "??";
+ }
+}
+
+static bool is_all_zeros(void * const x_, size_t sz)
+{
+ /* if reply is all-0, ignore this CID */
+ u32 *x = x_;
+ int n;
+
+ for (n = 0; n < sz / sizeof(*x); n++)
+ if (x[n])
+ return false;
+
+ return true;
+}
+
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+ int rc;
+ int i;
+ struct wil6210_priv *wil = s->private;
+ struct wmi_notify_req_cmd cmd = {
+ .interval_usec = 0,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ u32 status;
+
+ cmd.cid = i;
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply,
+ sizeof(reply), 20);
+ /* if reply is all-0, ignore this CID */
+ if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt)))
+ continue;
+
+ status = le32_to_cpu(reply.evt.status);
+ seq_printf(s, "CID %d {\n"
+ " TSF = 0x%016llx\n"
+ " TxMCS = %2d TxTpt = %4d\n"
+ " SQI = %4d\n"
+ " Status = 0x%08x %s\n"
+ " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n"
+ " Goodput(rx:tx) %4d:%4d\n"
+ "}\n",
+ i,
+ le64_to_cpu(reply.evt.tsf),
+ le16_to_cpu(reply.evt.bf_mcs),
+ le32_to_cpu(reply.evt.tx_tpt),
+ reply.evt.sqi,
+ status, wil_bfstatus_str(status),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le32_to_cpu(reply.evt.tx_goodput));
+ }
+ return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+ .open = wil_bf_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ if (*ppos != 0) {
+ wil_err(wil, "Unable to set SSID substring from [%d]\n",
+ (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(wdev->ssid)) {
+ wil_err(wil, "SSID too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+ if (netif_running(ndev)) {
+ wil_err(wil, "Unable to change SSID on running interface\n");
+ return -EINVAL;
+ }
+
+ wdev->ssid_len = count;
+ return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+ buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+ .read = wil_read_file_ssid,
+ .write = wil_write_file_ssid,
+ .open = simple_open,
+};
+
+/*---------temp------------*/
+static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+{
+ switch (t) {
+ case 0:
+ case ~(u32)0:
+ seq_printf(s, "%s N/A\n", prefix);
+ break;
+ default:
+ seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ break;
+ }
+}
+
+static int wil_temp_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ u32 t_m, t_r;
+ int rc = wmi_get_temperature(wil, &t_m, &t_r);
+
+ if (rc) {
+ seq_puts(s, "Failed\n");
+ return 0;
+ }
+
+ print_temp(s, "T_mac =", t_m);
+ print_temp(s, "T_radio =", t_r);
+
+ return 0;
+}
+
+static int wil_temp_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_temp_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_temp = {
+ .open = wil_temp_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------freq------------*/
+static int wil_freq_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
+
+ seq_printf(s, "Freq = %d\n", freq);
+
+ return 0;
+}
+
+static int wil_freq_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_freq_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_freq = {
+ .open = wil_freq_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------link------------*/
+static int wil_link_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct station_info sinfo;
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ break;
+ }
+ seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+ (p->data_port_open ? " data_port_open" : ""));
+
+ if (p->status == wil_sta_connected) {
+ rc = wil_cid_fill_sinfo(wil, i, &sinfo);
+ if (rc)
+ return rc;
+
+ seq_printf(s, " Tx_mcs = %d\n", sinfo.txrate.mcs);
+ seq_printf(s, " Rx_mcs = %d\n", sinfo.rxrate.mcs);
+ seq_printf(s, " SQ = %d\n", sinfo.signal);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_link_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_link_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_link = {
+ .open = wil_link_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------info------------*/
+static int wil_info_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct net_device *ndev = wil_to_ndev(wil);
+ int is_ac = power_supply_is_system_supplied();
+ int rx = atomic_xchg(&wil->isr_count_rx, 0);
+ int tx = atomic_xchg(&wil->isr_count_tx, 0);
+ static ulong rxf_old, txf_old;
+ ulong rxf = ndev->stats.rx_packets;
+ ulong txf = ndev->stats.tx_packets;
+ unsigned int i;
+
+ /* >0 : AC; 0 : battery; <0 : error */
+ seq_printf(s, "AC powered : %d\n", is_ac);
+ seq_printf(s, "Rx irqs:packets : %8d : %8ld\n", rx, rxf - rxf_old);
+ seq_printf(s, "Tx irqs:packets : %8d : %8ld\n", tx, txf - txf_old);
+ rxf_old = rxf;
+ txf_old = txf;
+
+#define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \
+ " " __stringify(x) : ""
+
+ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
+ unsigned long state = txq->state;
+
+ seq_printf(s, "Tx queue[%i] state : 0x%lx%s%s%s\n", i, state,
+ CHECK_QSTATE(DRV_XOFF),
+ CHECK_QSTATE(STACK_XOFF),
+ CHECK_QSTATE(FROZEN)
+ );
+ }
+#undef CHECK_QSTATE
+ return 0;
+}
+
+static int wil_info_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_info_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_info = {
+ .open = wil_info_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------recovery------------*/
+/* mode = [manual|auto]
+ * state = [idle|pending|running]
+ */
+static ssize_t wil_read_file_recovery(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char buf[80];
+ int n;
+ static const char * const sstate[] = {"idle", "pending", "running"};
+
+ n = snprintf(buf, sizeof(buf), "mode = %s\nstate = %s\n",
+ no_fw_recovery ? "manual" : "auto",
+ sstate[wil->recovery_state]);
+
+ n = min_t(int, n, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, n);
+}
+
+static ssize_t wil_write_file_recovery(struct file *file,
+ const char __user *buf_,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ static const char run_command[] = "run";
+ char buf[sizeof(run_command) + 1]; /* to detect "runx" */
+ ssize_t rc;
+
+ if (wil->recovery_state != fw_recovery_pending) {
+ wil_err(wil, "No recovery pending\n");
+ return -EINVAL;
+ }
+
+ if (*ppos != 0) {
+ wil_err(wil, "Offset [%d]\n", (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(buf)) {
+ wil_err(wil, "Input too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, buf_, count);
+ if (rc < 0)
+ return rc;
+
+ buf[rc] = '\0';
+ if (0 == strcmp(buf, run_command))
+ wil_set_recovery_state(wil, fw_recovery_running);
+ else
+ wil_err(wil, "Bad recovery command \"%s\"\n", buf);
+
+ return rc;
+}
+
+static const struct file_operations fops_recovery = {
+ .read = wil_read_file_recovery,
+ .write = wil_write_file_recovery,
+ .open = simple_open,
+};
+
+/*---------Station matrix------------*/
+static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
+{
+ int i;
+ u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+
+ seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
+ r->head_seq_num);
+ for (i = 0; i < r->buf_size; i++) {
+ if (i == index)
+ seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
+ else
+ seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
+ }
+ seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+}
+
+static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, tid;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ break;
+ }
+ seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+ (p->data_port_open ? " data_port_open" : ""));
+
+ if (p->status == wil_sta_connected) {
+ spin_lock_bh(&p->tid_rx_lock);
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+
+ if (r) {
+ seq_printf(s, "[%2d] ", tid);
+ wil_print_rxtid(s, r);
+ }
+ }
+ spin_unlock_bh(&p->tid_rx_lock);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_sta_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_sta_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_sta = {
+ .open = wil_sta_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*----------------*/
+static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+ char name[32];
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ struct debugfs_blob_wrapper *blob = &wil->blobs[i];
+ const struct fw_map *map = &fw_mapping[i];
+
+ if (!map->name)
+ continue;
+
+ blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
+ blob->size = map->to - map->from;
+ snprintf(name, sizeof(name), "blob_%s", map->name);
+ wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob);
+ }
+}
+
+/* misc files */
+static const struct {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+} dbg_files[] = {
+ {"mbox", S_IRUGO, &fops_mbox},
+ {"vrings", S_IRUGO, &fops_vring},
+ {"stations", S_IRUGO, &fops_sta},
+ {"desc", S_IRUGO, &fops_txdesc},
+ {"bf", S_IRUGO, &fops_bf},
+ {"ssid", S_IRUGO | S_IWUSR, &fops_ssid},
+ {"mem_val", S_IRUGO, &fops_memread},
+ {"reset", S_IWUSR, &fops_reset},
+ {"rxon", S_IWUSR, &fops_rxon},
+ {"tx_mgmt", S_IWUSR, &fops_txmgmt},
+ {"wmi_send", S_IWUSR, &fops_wmi},
+ {"back", S_IRUGO | S_IWUSR, &fops_back},
+ {"temp", S_IRUGO, &fops_temp},
+ {"freq", S_IRUGO, &fops_freq},
+ {"link", S_IRUGO, &fops_link},
+ {"info", S_IRUGO, &fops_info},
+ {"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
+};
+
+static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbg_files); i++)
+ debugfs_create_file(dbg_files[i].name, dbg_files[i].mode, dbg,
+ wil, dbg_files[i].fops);
+}
+
+/* interrupt control blocks */
+static const struct {
+ const char *name;
+ u32 icr_off;
+} dbg_icr[] = {
+ {"USER_ICR", HOSTADDR(RGF_USER_USER_ICR)},
+ {"DMA_EP_TX_ICR", HOSTADDR(RGF_DMA_EP_TX_ICR)},
+ {"DMA_EP_RX_ICR", HOSTADDR(RGF_DMA_EP_RX_ICR)},
+ {"DMA_EP_MISC_ICR", HOSTADDR(RGF_DMA_EP_MISC_ICR)},
+};
+
+static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
+ struct dentry *dbg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbg_icr); i++)
+ wil6210_debugfs_create_ISR(wil, dbg_icr[i].name, dbg,
+ dbg_icr[i].icr_off);
+}
+
+#define WIL_FIELD(name, mode, type) { __stringify(name), mode, \
+ offsetof(struct wil6210_priv, name), type}
+
+/* fields in struct wil6210_priv */
+static const struct dbg_off dbg_wil_off[] = {
+ WIL_FIELD(privacy, S_IRUGO, doff_u32),
+ WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
+ WIL_FIELD(fw_version, S_IRUGO, doff_u32),
+ WIL_FIELD(hw_version, S_IRUGO, doff_x32),
+ WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
+ WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
+ {},
+};
+
+static const struct dbg_off dbg_wil_regs[] = {
+ {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+ doff_io32},
+ {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {},
+};
+
+/* static parameters */
+static const struct dbg_off dbg_statics[] = {
+ {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
+ {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
+ {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
+ {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+ doff_u32},
+ {},
+};
+
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ wil6210_debugfs_init_files(wil, dbg);
+ wil6210_debugfs_init_isr(wil, dbg);
+ wil6210_debugfs_init_blobs(wil, dbg);
+ wil6210_debugfs_init_offset(wil, dbg, wil, dbg_wil_off);
+ wil6210_debugfs_init_offset(wil, dbg, (void * __force)wil->csr,
+ dbg_wil_regs);
+ wil6210_debugfs_init_offset(wil, dbg, NULL, dbg_statics);
+
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
new file mode 100644
index 000000000..0ea695ff9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+
+static int wil_ethtoolops_begin(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ mutex_lock(&wil->mutex);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ return 0;
+}
+
+static void wil_ethtoolops_complete(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_unlock(&wil->mutex);
+}
+
+static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *cp)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ u32 tx_itr_en, tx_itr_val = 0;
+ u32 rx_itr_en, rx_itr_val = 0;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ tx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+ if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
+ tx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+
+ rx_itr_en = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+ if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
+ rx_itr_val =
+ ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+
+ cp->tx_coalesce_usecs = tx_itr_val;
+ cp->rx_coalesce_usecs = rx_itr_val;
+ return 0;
+}
+
+static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *cp)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+ cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
+ return -EINVAL;
+ }
+
+ /* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
+ * ignore other parameters
+ */
+
+ if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX ||
+ cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
+ goto out_bad;
+
+ wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
+ wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+ wil_configure_interrupt_moderation(wil);
+
+ return 0;
+
+out_bad:
+ wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n");
+ print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4,
+ cp, sizeof(*cp), false);
+ return -EINVAL;
+}
+
+static const struct ethtool_ops wil_ethtool_ops = {
+ .begin = wil_ethtoolops_begin,
+ .complete = wil_ethtoolops_complete,
+ .get_drvinfo = cfg80211_get_drvinfo,
+ .get_coalesce = wil_ethtoolops_get_coalesce,
+ .set_coalesce = wil_ethtoolops_set_coalesce,
+};
+
+void wil_set_ethtoolops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &wil_ethtool_ops;
+}
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
new file mode 100644
index 000000000..d9aedad0c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include "wil6210.h"
+#include "fw.h"
+
+/*(DEBLOBBED)*/
+
+/* target operations */
+/* register read */
+#define R(a) ioread32(wil->csr + HOSTADDR(a))
+/* register write. wmb() to make sure it is completed */
+#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
+/* register set = read, OR, write */
+#define S(a, v) W(a, R(a) | v)
+/* register clear = read, AND with inverted, write */
+#define C(a, v) W(a, R(a) & ~v)
+
+static
+void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(val, d++);
+}
+
+#include "fw_inc.c"
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
new file mode 100644
index 000000000..7a2c6c129
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define WIL_FW_SIGNATURE (0x36323130) /* '0126' */
+#define WIL_FW_FMT_VERSION (1) /* format version driver supports */
+
+enum wil_fw_record_type {
+ wil_fw_type_comment = 1,
+ wil_fw_type_data = 2,
+ wil_fw_type_fill = 3,
+ wil_fw_type_action = 4,
+ wil_fw_type_verify = 5,
+ wil_fw_type_file_header = 6,
+ wil_fw_type_direct_write = 7,
+ wil_fw_type_gateway_data = 8,
+ wil_fw_type_gateway_data4 = 9,
+};
+
+struct wil_fw_record_head {
+ __le16 type; /* enum wil_fw_record_type */
+ __le16 flags; /* to be defined */
+ __le32 size; /* whole record, bytes after head */
+} __packed;
+
+/* data block. write starting from @addr
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_data, data)
+ */
+struct wil_fw_record_data { /* type == wil_fw_type_data */
+ __le32 addr;
+ __le32 data[0]; /* [data_size], see above */
+} __packed;
+
+/* fill with constant @value, @size bytes starting from @addr */
+struct wil_fw_record_fill { /* type == wil_fw_type_fill */
+ __le32 addr;
+ __le32 value;
+ __le32 size;
+} __packed;
+
+/* free-form comment
+ * for informational purpose, data_size is @head.size from record header
+ */
+struct wil_fw_record_comment { /* type == wil_fw_type_comment */
+ u8 data[0]; /* free-form data [data_size], see above */
+} __packed;
+
+/* perform action
+ * data_size = @head.size - offsetof(struct wil_fw_record_action, data)
+ */
+struct wil_fw_record_action { /* type == wil_fw_type_action */
+ __le32 action; /* action to perform: reset, wait for fw ready etc. */
+ __le32 data[0]; /* action specific, [data_size], see above */
+} __packed;
+
+/* data block for struct wil_fw_record_direct_write */
+struct wil_fw_data_dwrite {
+ __le32 addr;
+ __le32 value;
+ __le32 mask;
+} __packed;
+
+/* write @value to the @addr,
+ * preserve original bits accordingly to the @mask
+ * data_size is @head.size where @head is record header
+ */
+struct wil_fw_record_direct_write { /* type == wil_fw_type_direct_write */
+ struct wil_fw_data_dwrite data[0];
+} __packed;
+
+/* verify condition: [@addr] & @mask == @value
+ * if condition not met, firmware download fails
+ */
+struct wil_fw_record_verify { /* type == wil_fw_verify */
+ __le32 addr; /* read from this address */
+ __le32 value; /* reference value */
+ __le32 mask; /* mask for verification */
+} __packed;
+
+/* file header
+ * First record of every file
+ */
+struct wil_fw_record_file_header {
+ __le32 signature ; /* Wilocity signature */
+ __le32 reserved;
+ __le32 crc; /* crc32 of the following data */
+ __le32 version; /* format version */
+ __le32 data_len; /* total data in file, including this record */
+ u8 comment[32]; /* short description */
+} __packed;
+
+/* 1-dword gateway */
+/* data block for the struct wil_fw_record_gateway_data */
+struct wil_fw_data_gw {
+ __le32 addr;
+ __le32 value;
+} __packed;
+
+/* gateway write block.
+ * write starting address and values from the data buffer
+ * through the gateway
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data, data)
+ */
+struct wil_fw_record_gateway_data { /* type == wil_fw_type_gateway_data */
+ __le32 gateway_addr_addr;
+ __le32 gateway_value_addr;
+ __le32 gateway_cmd_addr;
+ __le32 gateway_ctrl_address;
+#define WIL_FW_GW_CTL_BUSY BIT(29) /* gateway busy performing operation */
+#define WIL_FW_GW_CTL_RUN BIT(30) /* start gateway operation */
+ __le32 command;
+ struct wil_fw_data_gw data[0]; /* total size [data_size], see above */
+} __packed;
+
+/* 4-dword gateway */
+/* data block for the struct wil_fw_record_gateway_data4 */
+struct wil_fw_data_gw4 {
+ __le32 addr;
+ __le32 value[4];
+} __packed;
+
+/* gateway write block.
+ * write starting address and values from the data buffer
+ * through the gateway
+ * data_size inferred from the @head.size. For this case,
+ * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data4, data)
+ */
+struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */
+ __le32 gateway_addr_addr;
+ __le32 gateway_value_addr[4];
+ __le32 gateway_cmd_addr;
+ __le32 gateway_ctrl_address; /* same logic as for 1-dword gw */
+ __le32 command;
+ struct wil_fw_data_gw4 data[0]; /* total size [data_size], see above */
+} __packed;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
new file mode 100644
index 000000000..c3f815f55
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Algorithmic part of the firmware download.
+ * To be included in the container file providing framework
+ */
+
+#define wil_err_fw(wil, fmt, arg...) wil_err(wil, "ERR[ FW ]" fmt, ##arg)
+#define wil_dbg_fw(wil, fmt, arg...) wil_dbg(wil, "DBG[ FW ]" fmt, ##arg)
+#define wil_hex_dump_fw(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[ FW ]" prefix_str, \
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
+ ioaddr = wmi_buffer(wil, val); \
+ if (!ioaddr) { \
+ wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
+ le32_to_cpu(val)); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+/**
+ * wil_fw_verify - verify firmware file validity
+ *
+ * perform various checks for the firmware file header.
+ * records are not validated.
+ *
+ * Return file size or negative error
+ */
+static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
+{
+ const struct wil_fw_record_head *hdr = (const void *)data;
+ struct wil_fw_record_file_header fh;
+ const struct wil_fw_record_file_header *fh_;
+ u32 crc;
+ u32 dlen;
+
+ if (size % 4) {
+ wil_err_fw(wil, "image size not aligned: %zu\n", size);
+ return -EINVAL;
+ }
+ /* have enough data for the file header? */
+ if (size < sizeof(*hdr) + sizeof(fh)) {
+ wil_err_fw(wil, "file too short: %zu bytes\n", size);
+ return -EINVAL;
+ }
+
+ /* start with the file header? */
+ if (le16_to_cpu(hdr->type) != wil_fw_type_file_header) {
+ wil_err_fw(wil, "no file header\n");
+ return -EINVAL;
+ }
+
+ /* data_len */
+ fh_ = (struct wil_fw_record_file_header *)&hdr[1];
+ dlen = le32_to_cpu(fh_->data_len);
+ if (dlen % 4) {
+ wil_err_fw(wil, "data length not aligned: %lu\n", (ulong)dlen);
+ return -EINVAL;
+ }
+ if (size < dlen) {
+ wil_err_fw(wil, "file truncated at %zu/%lu\n",
+ size, (ulong)dlen);
+ return -EINVAL;
+ }
+ if (dlen < sizeof(*hdr) + sizeof(fh)) {
+ wil_err_fw(wil, "data length too short: %lu\n", (ulong)dlen);
+ return -EINVAL;
+ }
+
+ /* signature */
+ if (le32_to_cpu(fh_->signature) != WIL_FW_SIGNATURE) {
+ wil_err_fw(wil, "bad header signature: 0x%08x\n",
+ le32_to_cpu(fh_->signature));
+ return -EINVAL;
+ }
+
+ /* version */
+ if (le32_to_cpu(fh_->version) > WIL_FW_FMT_VERSION) {
+ wil_err_fw(wil, "unsupported header version: %d\n",
+ le32_to_cpu(fh_->version));
+ return -EINVAL;
+ }
+
+ /* checksum. ~crc32(~0, data, size) when fh.crc set to 0*/
+ fh = *fh_;
+ fh.crc = 0;
+
+ crc = crc32_le(~0, (unsigned char const *)hdr, sizeof(*hdr));
+ crc = crc32_le(crc, (unsigned char const *)&fh, sizeof(fh));
+ crc = crc32_le(crc, (unsigned char const *)&fh_[1],
+ dlen - sizeof(*hdr) - sizeof(fh));
+ crc = ~crc;
+
+ if (crc != le32_to_cpu(fh_->crc)) {
+ wil_err_fw(wil, "checksum mismatch:"
+ " calculated for %lu bytes 0x%08x != 0x%08x\n",
+ (ulong)dlen, crc, le32_to_cpu(fh_->crc));
+ return -EINVAL;
+ }
+
+ return (int)dlen;
+}
+
+static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
+
+ return 0;
+}
+
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_data *d = data;
+ void __iomem *dst;
+ size_t s = size - sizeof(*d);
+
+ if (size < sizeof(*d) + sizeof(u32)) {
+ wil_err_fw(wil, "data record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ FW_ADDR_CHECK(dst, d->addr, "address");
+ wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
+ s);
+ wil_memcpy_toio_32(dst, d->data, s);
+ wmb(); /* finish before processing next record */
+
+ return 0;
+}
+
+static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_fill *d = data;
+ void __iomem *dst;
+ u32 v;
+ size_t s = (size_t)le32_to_cpu(d->size);
+
+ if (size != sizeof(*d)) {
+ wil_err_fw(wil, "bad size for fill record: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if (s < sizeof(u32)) {
+ wil_err_fw(wil, "fill size too short: %zu\n", s);
+ return -EINVAL;
+ }
+
+ if (s % sizeof(u32)) {
+ wil_err_fw(wil, "fill size not aligned: %zu\n", s);
+ return -EINVAL;
+ }
+
+ FW_ADDR_CHECK(dst, d->addr, "address");
+
+ v = le32_to_cpu(d->value);
+ wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
+ le32_to_cpu(d->addr), v, s);
+ wil_memset_toio_32(dst, v, s);
+ wmb(); /* finish before processing next record */
+
+ return 0;
+}
+
+static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_file_header *d = data;
+
+ if (size != sizeof(*d)) {
+ wil_err_fw(wil, "file header length incorrect: %zu\n", size);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil, "new file, ver. %d, %i bytes\n",
+ d->version, d->data_len);
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
+ sizeof(d->comment), true);
+
+ return 0;
+}
+
+static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_direct_write *d = data;
+ const struct wil_fw_data_dwrite *block = d->data;
+ int n, i;
+
+ if (size % sizeof(*block)) {
+ wil_err_fw(wil, "record size not aligned on %zu: %zu\n",
+ sizeof(*block), size);
+ return -EINVAL;
+ }
+ n = size / sizeof(*block);
+
+ for (i = 0; i < n; i++) {
+ void __iomem *dst;
+ u32 m = le32_to_cpu(block[i].mask);
+ u32 v = le32_to_cpu(block[i].value);
+ u32 x, y;
+
+ FW_ADDR_CHECK(dst, block[i].addr, "address");
+
+ x = ioread32(dst);
+ y = (x & m) | (v & ~m);
+ wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
+ "(old 0x%08x val 0x%08x mask 0x%08x)\n",
+ le32_to_cpu(block[i].addr), y, x, v, m);
+ iowrite32(y, dst);
+ wmb(); /* finish before processing next record */
+ }
+
+ return 0;
+}
+
+static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
+ void __iomem *gwa_cmd, void __iomem *gwa_ctl, u32 gw_cmd,
+ u32 a)
+{
+ unsigned delay = 0;
+
+ iowrite32(a, gwa_addr);
+ iowrite32(gw_cmd, gwa_cmd);
+ wmb(); /* finish before activate gw */
+
+ iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+ do {
+ udelay(1); /* typical time is few usec */
+ if (delay++ > 100) {
+ wil_err_fw(wil, "gw timeout\n");
+ return -EINVAL;
+ }
+ } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+
+ return 0;
+}
+
+static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_gateway_data *d = data;
+ const struct wil_fw_data_gw *block = d->data;
+ void __iomem *gwa_addr;
+ void __iomem *gwa_val;
+ void __iomem *gwa_cmd;
+ void __iomem *gwa_ctl;
+ u32 gw_cmd;
+ int n, i;
+
+ if (size < sizeof(*d) + sizeof(*block)) {
+ wil_err_fw(wil, "gateway record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if ((size - sizeof(*d)) % sizeof(*block)) {
+ wil_err_fw(wil, "gateway record data size"
+ " not aligned on %zu: %zu\n",
+ sizeof(*block), size - sizeof(*d));
+ return -EINVAL;
+ }
+ n = (size - sizeof(*d)) / sizeof(*block);
+
+ gw_cmd = le32_to_cpu(d->command);
+
+ wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
+ n, gw_cmd);
+
+ FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
+ FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
+ FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+
+ wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
+ " cmd 0x%08x ctl 0x%08x\n",
+ le32_to_cpu(d->gateway_addr_addr),
+ le32_to_cpu(d->gateway_value_addr),
+ le32_to_cpu(d->gateway_cmd_addr),
+ le32_to_cpu(d->gateway_ctrl_address));
+
+ for (i = 0; i < n; i++) {
+ int rc;
+ u32 a = le32_to_cpu(block[i].addr);
+ u32 v = le32_to_cpu(block[i].value);
+
+ wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
+ i, a, v);
+
+ iowrite32(v, gwa_val);
+ rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_gateway_data4 *d = data;
+ const struct wil_fw_data_gw4 *block = d->data;
+ void __iomem *gwa_addr;
+ void __iomem *gwa_val[ARRAY_SIZE(block->value)];
+ void __iomem *gwa_cmd;
+ void __iomem *gwa_ctl;
+ u32 gw_cmd;
+ int n, i, k;
+
+ if (size < sizeof(*d) + sizeof(*block)) {
+ wil_err_fw(wil, "gateway4 record too short: %zu\n", size);
+ return -EINVAL;
+ }
+
+ if ((size - sizeof(*d)) % sizeof(*block)) {
+ wil_err_fw(wil, "gateway4 record data size"
+ " not aligned on %zu: %zu\n",
+ sizeof(*block), size - sizeof(*d));
+ return -EINVAL;
+ }
+ n = (size - sizeof(*d)) / sizeof(*block);
+
+ gw_cmd = le32_to_cpu(d->command);
+
+ wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
+ n, gw_cmd);
+
+ FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
+ "gateway_value_addr");
+ FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
+ FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+
+ wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
+ le32_to_cpu(d->gateway_addr_addr),
+ le32_to_cpu(d->gateway_cmd_addr),
+ le32_to_cpu(d->gateway_ctrl_address));
+ wil_hex_dump_fw("val addresses: ", DUMP_PREFIX_NONE, 16, 4,
+ d->gateway_value_addr, sizeof(d->gateway_value_addr),
+ false);
+
+ for (i = 0; i < n; i++) {
+ int rc;
+ u32 a = le32_to_cpu(block[i].addr);
+ u32 v[ARRAY_SIZE(block->value)];
+
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ v[k] = le32_to_cpu(block[i].value[k]);
+
+ wil_dbg_fw(wil, " gw4 write[%3d] [0x%08x] <==\n", i, a);
+ wil_hex_dump_fw(" val ", DUMP_PREFIX_NONE, 16, 4, v,
+ sizeof(v), false);
+
+ for (k = 0; k < ARRAY_SIZE(block->value); k++)
+ iowrite32(v[k], gwa_val[k]);
+ rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct {
+ int type;
+ int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
+} wil_fw_handlers[] = {
+ {wil_fw_type_comment, fw_handle_comment},
+ {wil_fw_type_data, fw_handle_data},
+ {wil_fw_type_fill, fw_handle_fill},
+ /* wil_fw_type_action */
+ /* wil_fw_type_verify */
+ {wil_fw_type_file_header, fw_handle_file_header},
+ {wil_fw_type_direct_write, fw_handle_direct_write},
+ {wil_fw_type_gateway_data, fw_handle_gateway_data},
+ {wil_fw_type_gateway_data4, fw_handle_gateway_data4},
+};
+
+static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
+ const void *data, size_t size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
+ if (wil_fw_handlers[i].type == type)
+ return wil_fw_handlers[i].handler(wil, data, size);
+ }
+
+ wil_err_fw(wil, "unknown record type: %d\n", type);
+ return -EINVAL;
+}
+
+/**
+ * wil_fw_load - load FW into device
+ *
+ * Load the FW and uCode code and data to the corresponding device
+ * memory regions
+ *
+ * Return error code
+ */
+static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr;
+ size_t s, hdr_sz;
+
+ for (hdr = data;; hdr = (const void *)hdr + s, size -= s) {
+ if (size < sizeof(*hdr))
+ break;
+ hdr_sz = le32_to_cpu(hdr->size);
+ s = sizeof(*hdr) + hdr_sz;
+ if (s > size)
+ break;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
+ &hdr[1], hdr_sz);
+ if (rc)
+ return rc;
+ }
+ if (size) {
+ wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
+ if (size >= sizeof(*hdr)) {
+ wil_err_fw(wil, "Stop at offset %ld"
+ " record type %d [%zd bytes]\n",
+ (long)((const void *)hdr - data),
+ le16_to_cpu(hdr->type), hdr_sz);
+ }
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * wil_request_firmware - Request firmware and load to device
+ *
+ * Request firmware image from the file and load it to device
+ *
+ * Return error code
+ */
+int wil_request_firmware(struct wil6210_priv *wil, const char *name)
+{
+ int rc, rc1;
+ const struct firmware *fw;
+ size_t sz;
+ const void *d;
+
+ rc = reject_firmware(&fw, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load firmware %s\n", name);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
+
+ for (sz = fw->size, d = fw->data; sz; sz -= rc1, d += rc1) {
+ rc1 = wil_fw_verify(wil, d, sz);
+ if (rc1 < 0) {
+ rc = rc1;
+ goto out;
+ }
+ rc = wil_fw_load(wil, d, rc1);
+ if (rc < 0)
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000..28ffc1846
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,625 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+#include "trace.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
+ BIT_DMA_EP_RX_ICR_RX_HTRSH)
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+ clear_bit(wil_status_irqen, wil->status);
+}
+
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_TX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_RX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_MISC, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ set_bit(wil_status_irqen, wil->status);
+
+ iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil_mask_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil_unmask_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ wil6210_unmask_irq_misc(wil);
+}
+
+/* target write operation */
+#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
+
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ /* disable interrupt moderation for monitor
+ * to get better timestamp precision
+ */
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ return;
+
+ /* Disable and clear tx counter before (re)configuration */
+ W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+ wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
+ wil->tx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_TX_CNT_CTL,
+ BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear tx idle counter before (re)configuration */
+ W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+ wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
+ wil->tx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx counter before (re)configuration */
+ W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+ wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
+ wil->rx_max_burst_duration);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_RX_CNT_CTL,
+ BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+
+ /* Disable and clear rx idle counter before (re)configuration */
+ W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+ W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+ wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
+ wil->rx_interframe_timeout);
+ /* Configure TX max burst duration timer to use usec units */
+ W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+ BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+}
+
+#undef W
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx(wil);
+
+ /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
+ * moderation is not used. Interrupt moderation may cause RX
+ * buffer overflow while RX_DONE is delayed. The required
+ * action is always the same - should empty the accumulated
+ * packets from the RX ring.
+ */
+ if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
+ wil_dbg_irq(wil, "RX done\n");
+
+ if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
+ wil_err_ratelimited(wil,
+ "Received \"Rx buffer is in risk of overflow\" interrupt\n");
+
+ isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
+ BIT_DMA_EP_RX_ICR_RX_HTRSH);
+ if (likely(test_bit(wil_status_reset_done, wil->status))) {
+ if (likely(test_bit(wil_status_napi_en, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil,
+ "Got Rx interrupt while stopping interface\n");
+ }
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ /* Rx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_rx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ bool need_unmask = true;
+
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx(wil);
+
+ if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
+ wil_dbg_irq(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ /* clear also all VRING interrupts */
+ isr &= ~(BIT(25) - 1UL);
+ if (likely(test_bit(wil_status_reset_done, wil->status))) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ need_unmask = false;
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx interrupt while in reset\n");
+ }
+ }
+
+ if (unlikely(isr))
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ /* Tx IRQ will be enabled when NAPI processing finished */
+
+ atomic_inc(&wil->isr_count_tx);
+
+ if (unlikely(need_unmask))
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+ struct device *dev = &wil_to_ndev(wil)->dev;
+ char *envp[3] = {
+ [0] = "SOURCE=wil6210",
+ [1] = "EVENT=FW_ERROR",
+ [2] = NULL,
+ };
+ wil_err(wil, "Notify about firmware error\n");
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_misc(isr);
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_misc(wil);
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_err(wil, "Firmware error detected\n");
+ clear_bit(wil_status_fwready, wil->status);
+ /*
+ * do not clear @isr here - we do 2-nd part in thread
+ * there, user space get notified, and it should be done
+ * in non-atomic context
+ */
+ }
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_irq(wil, "IRQ: FW ready\n");
+ wil_cache_mbox_regs(wil);
+ set_bit(wil_status_reset_done, wil->status);
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ trace_wil6210_irq_misc_thread(isr);
+ wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ wil_fw_error_recovery(wil);
+ }
+
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_irq(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_irq(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ if (!test_bit(wil_status_irqen, wil->status)) {
+ u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_rx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_tx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_misc = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
+ return IRQ_NONE;
+
+ /* FIXME: IRQ mask debug */
+ if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
+ return IRQ_NONE;
+
+ trace_wil6210_irq_pseudo(pseudo_cause);
+ wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ /*
+ * IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+
+ rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+ WIL_NAME"_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+ WIL_NAME"_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME"_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+ /* error branch */
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+/* can't use wil_ioread32_and_clear because ICC value is not set yet */
+static inline void wil_clear32(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ iowrite32(x, addr);
+}
+
+void wil6210_clear_irq(struct wil6210_priv *wil)
+{
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wmb(); /* make sure write completed */
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ return rc;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_mask_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
new file mode 100644
index 000000000..e9c067381
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/uaccess.h>
+
+#include "wil6210.h"
+#include <uapi/linux/wil6210_uapi.h>
+
+#define wil_hex_dump_ioctl(prefix_str, buf, len) \
+ print_hex_dump_debug("DBG[IOC ]" prefix_str, \
+ DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
+#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
+
+static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr,
+ uint32_t size, enum wil_memio_op op)
+{
+ void __iomem *a;
+ u32 off;
+
+ switch (op & wil_mmio_addr_mask) {
+ case wil_mmio_addr_linker:
+ a = wmi_buffer(wil, cpu_to_le32(addr));
+ break;
+ case wil_mmio_addr_ahb:
+ a = wmi_addr(wil, addr);
+ break;
+ case wil_mmio_addr_bar:
+ a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
+ break;
+ default:
+ wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
+ return NULL;
+ }
+
+ off = a - wil->csr;
+ if (size >= WIL6210_MEM_SIZE - off) {
+ wil_err(wil, "Requested block does not fit into memory: "
+ "off = 0x%08x size = 0x%08x\n", off, size);
+ return NULL;
+ }
+
+ return a;
+}
+
+static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
+{
+ struct wil_memio io;
+ void __iomem *a;
+ bool need_copy = false;
+
+ if (copy_from_user(&io, data, sizeof(io)))
+ return -EFAULT;
+
+ wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
+ io.addr, io.val, io.op);
+
+ a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
+ if (!a) {
+ wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+ io.op);
+ return -EINVAL;
+ }
+ /* operation */
+ switch (io.op & wil_mmio_op_mask) {
+ case wil_mmio_read:
+ io.val = ioread32(a);
+ need_copy = true;
+ break;
+ case wil_mmio_write:
+ iowrite32(io.val, a);
+ wmb(); /* make sure write propagated to HW */
+ break;
+ default:
+ wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+ return -EINVAL;
+ }
+
+ if (need_copy) {
+ wil_dbg_ioctl(wil, "IO done: addr = 0x%08x"
+ " val = 0x%08x op = 0x%08x\n",
+ io.addr, io.val, io.op);
+ if (copy_to_user(data, &io, sizeof(io)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
+{
+ struct wil_memio_block io;
+ void *block;
+ void __iomem *a;
+ int rc = 0;
+
+ if (copy_from_user(&io, data, sizeof(io)))
+ return -EFAULT;
+
+ wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
+ io.addr, io.size, io.op);
+
+ /* size */
+ if (io.size % 4) {
+ wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size);
+ return -EINVAL;
+ }
+
+ a = wil_ioc_addr(wil, io.addr, io.size, io.op);
+ if (!a) {
+ wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+ io.op);
+ return -EINVAL;
+ }
+
+ block = kmalloc(io.size, GFP_USER);
+ if (!block)
+ return -ENOMEM;
+
+ /* operation */
+ switch (io.op & wil_mmio_op_mask) {
+ case wil_mmio_read:
+ wil_memcpy_fromio_32(block, a, io.size);
+ wil_hex_dump_ioctl("Read ", block, io.size);
+ if (copy_to_user(io.block, block, io.size)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ break;
+ case wil_mmio_write:
+ if (copy_from_user(block, io.block, io.size)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ wil_memcpy_toio_32(a, block, io.size);
+ wmb(); /* make sure write propagated to HW */
+ wil_hex_dump_ioctl("Write ", block, io.size);
+ break;
+ default:
+ wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+ rc = -EINVAL;
+ break;
+ }
+
+out_free:
+ kfree(block);
+ return rc;
+}
+
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
+{
+ switch (cmd) {
+ case WIL_IOCTL_MEMIO:
+ return wil_ioc_memio_dword(wil, data);
+ case WIL_IOCTL_MEMIO_BLOCK:
+ return wil_ioc_memio_block(wil, data);
+ default:
+ wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000..c2a238426
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,927 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "wmi.h"
+
+#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
+#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
+
+bool no_fw_recovery;
+module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
+
+/* if not set via modparam, will be set to default value of 1/8 of
+ * rx ring size during init flow
+ */
+unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
+module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_ring_overflow_thrsh,
+ " RX ring overflow threshold in descriptors.");
+
+/* We allow allocation of more than 1 page buffers to support large packets.
+ * It is suboptimal behavior performance wise in case MTU above page size.
+ */
+unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+static int mtu_max_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ /* sets mtu_max directly. no need to restore it in case of
+ * illegal value since we assume this will fail insmod
+ */
+ ret = param_set_uint(val, kp);
+ if (ret)
+ return ret;
+
+ if (mtu_max < 68 || mtu_max > WIL_MAX_ETH_MTU)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct kernel_param_ops mtu_max_ops = {
+ .set = mtu_max_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+MODULE_PARM_DESC(mtu_max, " Max MTU value.");
+
+static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
+static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
+static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
+
+static int ring_order_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ uint x;
+
+ ret = kstrtouint(val, 0, &x);
+ if (ret)
+ return ret;
+
+ if ((x < WIL_RING_SIZE_ORDER_MIN) || (x > WIL_RING_SIZE_ORDER_MAX))
+ return -EINVAL;
+
+ *((uint *)kp->arg) = x;
+
+ return 0;
+}
+
+static struct kernel_param_ops ring_order_ops = {
+ .set = ring_order_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
+
+#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
+#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ /* size_t is unsigned, if (count%4 != 0) it will wrap */
+ for (count += 4; count > 4; count -= 4)
+ *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(*s++, d++);
+}
+
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
+ u16 reason_code, bool from_event)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ uint i;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil_sta_info *sta = &wil->sta[cid];
+
+ might_sleep();
+ wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
+ sta->status);
+
+ sta->data_port_open = false;
+ if (sta->status != wil_sta_unused) {
+ if (!from_event)
+ wmi_disconnect_sta(wil, sta->addr, reason_code);
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* AP-like interface */
+ cfg80211_del_sta(ndev, sta->addr, GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+ sta->status = wil_sta_unused;
+ }
+
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r;
+
+ spin_lock_bh(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+
+ spin_unlock_bh(&sta->tid_rx_lock);
+ }
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (wil->vring2cid_tid[i][0] == cid)
+ wil_vring_fini_tx(wil, i);
+ }
+ memset(&sta->stats, 0, sizeof(sta->stats));
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
+{
+ int cid = -ENOENT;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ might_sleep();
+ wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ reason_code, from_event ? "+" : "-");
+
+ /* Cases are:
+ * - disconnect single STA, still connected
+ * - disconnect single STA, already disconnected
+ * - disconnect all
+ *
+ * For "disconnect all", there are 2 options:
+ * - bssid == NULL
+ * - bssid is our MAC address
+ */
+ if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
+ cid = wil_find_cid(wil, bssid);
+ wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
+ bssid, cid, reason_code);
+ if (cid >= 0) /* disconnect 1 peer */
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
+ } else { /* all */
+ wil_dbg_misc(wil, "Disconnect all\n");
+ for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ wil_disconnect_cid(wil, cid, reason_code, from_event);
+ }
+
+ /* link state */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_bcast_fini(wil);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+
+ if (test_bit(wil_status_fwconnected, wil->status)) {
+ clear_bit(wil_status_fwconnected, wil->status);
+ cfg80211_disconnected(ndev, reason_code,
+ NULL, 0, GFP_KERNEL);
+ } else if (test_bit(wil_status_fwconnecting, wil->status)) {
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+ clear_bit(wil_status_fwconnecting, wil->status);
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, disconnect_worker);
+
+ mutex_lock(&wil->mutex);
+ _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
+ mutex_unlock(&wil->mutex);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "Connect timeout\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context
+ */
+ schedule_work(&wil->disconnect_worker);
+}
+
+static void wil_scan_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ clear_bit(wil_status_fwready, wil->status);
+ wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ wil->recovery_state = fw_recovery_pending;
+ schedule_work(&wil->fw_error_worker);
+}
+
+static int wil_wait_for_recovery(struct wil6210_priv *wil)
+{
+ if (wait_event_interruptible(wil->wq, wil->recovery_state !=
+ fw_recovery_pending)) {
+ wil_err(wil, "Interrupt, canceling recovery\n");
+ return -ERESTARTSYS;
+ }
+ if (wil->recovery_state != fw_recovery_running) {
+ wil_info(wil, "Recovery cancelled\n");
+ return -EINTR;
+ }
+ wil_info(wil, "Proceed with recovery\n");
+ return 0;
+}
+
+void wil_set_recovery_state(struct wil6210_priv *wil, int state)
+{
+ wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__,
+ wil->recovery_state, state);
+
+ wil->recovery_state = state;
+ wake_up_interruptible(&wil->wq);
+}
+
+static void wil_fw_error_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ fw_error_worker);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_misc(wil, "fw error worker\n");
+
+ if (!netif_running(wil_to_ndev(wil))) {
+ wil_info(wil, "No recovery - interface is down\n");
+ return;
+ }
+
+ /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
+ * passed since last recovery attempt
+ */
+ if (time_is_after_jiffies(wil->last_fw_recovery +
+ WIL6210_FW_RECOVERY_TO))
+ wil->recovery_count++;
+ else
+ wil->recovery_count = 1; /* fw was alive for a long time */
+
+ if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
+ wil_err(wil, "too many recovery attempts (%d), giving up\n",
+ wil->recovery_count);
+ return;
+ }
+
+ wil->last_fw_recovery = jiffies;
+
+ mutex_lock(&wil->mutex);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_MONITOR:
+ wil_info(wil, "fw error recovery requested (try %d)...\n",
+ wil->recovery_count);
+ if (!no_fw_recovery)
+ wil->recovery_state = fw_recovery_running;
+ if (0 != wil_wait_for_recovery(wil))
+ break;
+
+ __wil_down(wil);
+ __wil_up(wil);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ wil_info(wil, "No recovery for AP-like interface\n");
+ /* recovery in these modes is done by upper layers */
+ break;
+ default:
+ wil_err(wil, "No recovery - unknown interface type %d\n",
+ wdev->iftype);
+ break;
+ }
+ mutex_unlock(&wil->mutex);
+}
+
+static int wil_find_free_vring(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->vring_tx[i].va)
+ return i;
+ }
+ return -EINVAL;
+}
+
+int wil_bcast_init(struct wil6210_priv *wil)
+{
+ int ri = wil->bcast_vring, rc;
+
+ if ((ri >= 0) && wil->vring_tx[ri].va)
+ return 0;
+
+ ri = wil_find_free_vring(wil);
+ if (ri < 0)
+ return ri;
+
+ rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
+ if (rc == 0)
+ wil->bcast_vring = ri;
+
+ return rc;
+}
+
+void wil_bcast_fini(struct wil6210_priv *wil)
+{
+ int ri = wil->bcast_vring;
+
+ if (ri < 0)
+ return;
+
+ wil->bcast_vring = -1;
+ wil_vring_fini_tx(wil, ri);
+}
+
+static void wil_connect_worker(struct work_struct *work)
+{
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ connect_worker);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ int cid = wil->pending_connect_cid;
+ int ringid = wil_find_free_vring(wil);
+
+ if (cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
+
+ rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0) {
+ wil->sta[cid].status = wil_sta_connected;
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ wil->sta[cid].status = wil_sta_unused;
+ }
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ uint i;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ memset(wil->sta, 0, sizeof(wil->sta));
+ for (i = 0; i < WIL6210_MAX_CID; i++)
+ spin_lock_init(&wil->sta[i].tid_rx_lock);
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->wmi_mutex);
+ mutex_init(&wil->back_rx_mutex);
+ mutex_init(&wil->back_tx_mutex);
+ mutex_init(&wil->probe_client_mutex);
+
+ init_completion(&wil->wmi_ready);
+ init_completion(&wil->wmi_call);
+
+ wil->pending_connect_cid = -1;
+ wil->bcast_vring = -1;
+ setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+ setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
+
+ INIT_WORK(&wil->connect_worker, wil_connect_worker);
+ INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+ INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
+ INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
+ INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
+ INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ INIT_LIST_HEAD(&wil->back_rx_pending);
+ INIT_LIST_HEAD(&wil->back_tx_pending);
+ INIT_LIST_HEAD(&wil->probe_client_pending);
+ spin_lock_init(&wil->wmi_ev_lock);
+ init_waitqueue_head(&wil->wq);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wq_service = create_singlethread_workqueue(WIL_NAME "_service");
+ if (!wil->wq_service)
+ goto out_wmi_wq;
+
+ wil->last_fw_recovery = jiffies;
+ wil->tx_interframe_timeout = WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->rx_interframe_timeout = WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT;
+ wil->tx_max_burst_duration = WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT;
+ wil->rx_max_burst_duration = WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT;
+
+ if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
+ rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
+ return 0;
+
+out_wmi_wq:
+ destroy_workqueue(wil->wmi_wq);
+
+ return -EAGAIN;
+}
+
+/**
+ * wil6210_disconnect - disconnect one connection
+ * @wil: driver context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ * @from_event: whether is invoked from FW event handler
+ *
+ * Disconnect and release associated resources. If invoked not from the
+ * FW event handler, issue WMI command(s) to trigger MAC disconnect.
+ */
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ del_timer_sync(&wil->connect_timer);
+ _wil6210_disconnect(wil, bssid, reason_code, from_event);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ del_timer_sync(&wil->scan_timer);
+ cancel_work_sync(&wil->disconnect_worker);
+ cancel_work_sync(&wil->fw_error_worker);
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+ mutex_unlock(&wil->mutex);
+ wmi_event_flush(wil);
+ wil_back_rx_flush(wil);
+ cancel_work_sync(&wil->back_rx_worker);
+ wil_back_tx_flush(wil);
+ cancel_work_sync(&wil->back_tx_worker);
+ wil_probe_client_flush(wil);
+ cancel_work_sync(&wil->probe_client_worker);
+ destroy_workqueue(wil->wq_service);
+ destroy_workqueue(wil->wmi_wq);
+}
+
+/* target operations */
+/* register read */
+#define R(a) ioread32(wil->csr + HOSTADDR(a))
+/* register write. wmb() to make sure it is completed */
+#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
+/* register set = read, OR, write */
+#define S(a, v) W(a, R(a) | v)
+/* register clear = read, AND with inverted, write */
+#define C(a, v) W(a, R(a) & ~v)
+
+static inline void wil_halt_cpu(struct wil6210_priv *wil)
+{
+ W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+ W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
+}
+
+static inline void wil_release_cpu(struct wil6210_priv *wil)
+{
+ /* Start CPU */
+ W(RGF_USER_USER_CPU_0, 1);
+}
+
+static int wil_target_reset(struct wil6210_priv *wil)
+{
+ int delay = 0;
+ u32 x;
+
+ wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
+
+ /* Clear MAC link up */
+ S(RGF_HP_CTRL, BIT(15));
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+
+ wil_halt_cpu(wil);
+
+ /* clear all boot loader "ready" bits */
+ W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+ /* Clear Fw Download notification */
+ C(RGF_USER_USAGE_6, BIT(0));
+
+ S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+ /* XTAL stabilization should take about 3ms */
+ usleep_range(5000, 7000);
+ x = R(RGF_CAF_PLL_LOCK_STATUS);
+ if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
+ wil_err(wil, "Xtal stabilization timeout\n"
+ "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
+ return -ETIME;
+ }
+ /* switch 10k to XTAL*/
+ C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+ /* 40 MHz */
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+ W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ /* wait until device ready. typical time is 20..80 msec */
+ do {
+ msleep(RST_DELAY);
+ x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (!(x & BIT_BL_READY));
+
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+ /* enable fix for HW bug related to the SA/DA swap in AP Rx */
+ S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+ BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+
+ wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
+ return 0;
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+static int wil_get_bl_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct RGF_BL bl;
+
+ wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
+ le32_to_cpus(&bl.ready);
+ le32_to_cpus(&bl.version);
+ le32_to_cpus(&bl.rf_type);
+ le32_to_cpus(&bl.baseband_type);
+
+ if (!is_valid_ether_addr(bl.mac_address)) {
+ wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, bl.mac_address);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ ether_addr_copy(ndev->dev_addr, bl.mac_address);
+ wil_info(wil,
+ "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+ bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+
+ return 0;
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(1000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+ jiffies_to_msecs(to-left), wil->hw_version);
+ }
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil, bool load_fw)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (wil->hw_version == HW_VER_UNKNOWN)
+ return -ENODEV;
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+ WARN_ON(test_bit(wil_status_napi_en, wil->status));
+
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+ wil_bcast_fini(wil);
+
+ /* prevent NAPI from being scheduled */
+ bitmap_zero(wil->status, wil_status_last);
+
+ if (wil->scan_request) {
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+ wil->scan_request);
+ del_timer_sync(&wil->scan_timer);
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ wil_mask_irq(wil);
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wq_service);
+ flush_workqueue(wil->wmi_wq);
+
+ rc = wil_target_reset(wil);
+ wil_rx_fini(wil);
+ if (rc)
+ return rc;
+
+ rc = wil_get_bl_info(wil);
+ if (rc)
+ return rc;
+
+ if (load_fw) {
+ wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
+ WIL_FW2_NAME);
+
+ wil_halt_cpu(wil);
+ /* Loading f/w from the file */
+ rc = wil_request_firmware(wil, WIL_FW_NAME);
+ if (rc)
+ return rc;
+ rc = wil_request_firmware(wil, WIL_FW2_NAME);
+ if (rc)
+ return rc;
+
+ /* Mark FW as loaded from host */
+ S(RGF_USER_USAGE_6, 1);
+
+ /* clear any interrupts which on-card-firmware
+ * may have set
+ */
+ wil6210_clear_irq(wil);
+ /* CAF_ICR - clear and mask */
+ /* it is W1C, clear by writing back same value */
+ S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+
+ wil_release_cpu(wil);
+ }
+
+ /* init after reset */
+ wil->pending_connect_cid = -1;
+ wil->ap_isolate = 0;
+ reinit_completion(&wil->wmi_ready);
+ reinit_completion(&wil->wmi_call);
+
+ if (load_fw) {
+ wil_configure_interrupt_moderation(wil);
+ wil_unmask_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+ if (rc == 0) /* check FW is responsive */
+ rc = wmi_echo(wil);
+ }
+
+ return rc;
+}
+
+#undef R
+#undef W
+#undef S
+#undef C
+
+void wil_fw_error_recovery(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "starting fw error recovery\n");
+ wil->recovery_state = fw_recovery_pending;
+ schedule_work(&wil->fw_error_worker);
+}
+
+int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ rc = wil_reset(wil, true);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil, 1 << rx_ring_order);
+ if (rc)
+ return rc;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg_misc(wil, "type: STATION\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg_misc(wil, "type: AP\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg_misc(wil, "type: P2P_CLIENT\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg_misc(wil, "type: P2P_GO\n");
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg_misc(wil, "type: Monitor\n");
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ wil_dbg_misc(wil, "NAPI enable\n");
+ napi_enable(&wil->napi_rx);
+ napi_enable(&wil->napi_tx);
+ set_bit(wil_status_napi_en, wil->status);
+
+ if (wil->platform_ops.bus_request)
+ wil->platform_ops.bus_request(wil->platform_handle,
+ WIL_MAX_BUS_REQUEST_KBPS);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+int __wil_down(struct wil6210_priv *wil)
+{
+ int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS /
+ WAIT_FOR_DISCONNECT_INTERVAL_MS;
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ if (wil->platform_ops.bus_request)
+ wil->platform_ops.bus_request(wil->platform_handle, 0);
+
+ wil_disable_irq(wil);
+ if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
+ napi_disable(&wil->napi_rx);
+ napi_disable(&wil->napi_tx);
+ wil_dbg_misc(wil, "NAPI disable\n");
+ }
+ wil_enable_irq(wil);
+
+ if (wil->scan_request) {
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+ wil->scan_request);
+ del_timer_sync(&wil->scan_timer);
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ if (test_bit(wil_status_fwconnected, wil->status) ||
+ test_bit(wil_status_fwconnecting, wil->status))
+ wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ /* make sure wil is idle (not connected) */
+ mutex_unlock(&wil->mutex);
+ while (iter--) {
+ int idle = !test_bit(wil_status_fwconnected, wil->status) &&
+ !test_bit(wil_status_fwconnecting, wil->status);
+ if (idle)
+ break;
+ msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS);
+ }
+ mutex_lock(&wil->mutex);
+
+ if (!iter)
+ wil_err(wil, "timeout waiting for idle FW/HW\n");
+
+ wil_reset(wil, false);
+
+ return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
+{
+ int i;
+ int rc = -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if ((wil->sta[i].status != wil_sta_unused) &&
+ ether_addr_equal(wil->sta[i].addr, mac)) {
+ rc = i;
+ break;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000..f2f7ea295
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ return wil_down(wil);
+}
+
+static int wil_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ if (new_mtu < 68 || new_mtu > mtu_max) {
+ wil_err(wil, "invalid MTU %d\n", new_mtu);
+ return -EINVAL;
+ }
+
+ wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ int ret = wil_ioctl(wil, ifr->ifr_data, cmd);
+
+ wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+
+ return ret;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = wil_change_mtu,
+ .ndo_do_ioctl = wil_do_ioctl,
+};
+
+static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_rx);
+ int quota = budget;
+ int done;
+
+ wil_rx_handle(wil, &quota);
+ done = budget - quota;
+
+ if (done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_rx(wil);
+ wil_dbg_txrx(wil, "NAPI RX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+ return done;
+}
+
+static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+ napi_tx);
+ int tx_done = 0;
+ uint i;
+
+ /* always process ALL Tx complete, regardless budget - it is fast */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ struct vring *vring = &wil->vring_tx[i];
+
+ if (!vring->va)
+ continue;
+
+ tx_done += wil_tx_complete(wil, i);
+ }
+
+ if (tx_done < budget) {
+ napi_complete(napi);
+ wil6210_unmask_irq_tx(wil);
+ wil_dbg_txrx(wil, "NAPI TX complete\n");
+ }
+
+ wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+ return min(tx_done, budget);
+}
+
+static void wil_dev_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
+}
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+ int rc = 0;
+
+ wdev = wil_cfg80211_init(dev);
+ if (IS_ERR(wdev)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wdev;
+ }
+
+ wil = wdev_to_wil(wdev);
+ wil->csr = csr;
+ wil->wdev = wdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_wdev;
+ }
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+ /* default monitor channel */
+ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+ ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
+ if (!ndev) {
+ dev_err(dev, "alloc_netdev_mqs failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ wil_set_ethtoolops(ndev);
+ ndev->ieee80211_ptr = wdev;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GRO;
+ ndev->features |= ndev->hw_features;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+
+ netif_tx_stop_all_queues(ndev);
+
+ return wil;
+
+ out_priv:
+ wil_priv_deinit(wil);
+
+ out_wdev:
+ wil_wdev_free(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (!ndev)
+ return;
+
+ wil_priv_deinit(wil);
+
+ wil_to_ndev(wil) = NULL;
+ free_netdev(ndev);
+
+ wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ rc = register_netdev(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000..109986114
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+ " Use MSI interrupt: "
+ "0 - don't, 1 - (default) - single, or 3");
+
+static bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
+
+static
+void wil_set_capabilities(struct wil6210_priv *wil)
+{
+ u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+
+ bitmap_zero(wil->hw_capabilities, hw_capability_last);
+
+ switch (rev_id) {
+ case JTAG_DEV_ID_SPARROW_B0:
+ wil->hw_name = "Sparrow B0";
+ wil->hw_version = HW_VER_SPARROW_B0;
+ break;
+ default:
+ wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id);
+ wil->hw_name = "Unknown";
+ wil->hw_version = HW_VER_UNKNOWN;
+ }
+
+ wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+}
+
+void wil_disable_irq(struct wil6210_priv *wil)
+{
+ int irq = wil->pdev->irq;
+
+ disable_irq(irq);
+ if (wil->n_msi == 3) {
+ disable_irq(irq + 1);
+ disable_irq(irq + 2);
+ }
+}
+
+void wil_enable_irq(struct wil6210_priv *wil)
+{
+ int irq = wil->pdev->irq;
+
+ enable_irq(irq);
+ if (wil->n_msi == 3) {
+ enable_irq(irq + 1);
+ enable_irq(irq + 2);
+ }
+}
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+ /* on platforms with buggy ACPI, pdev->msi_enabled may be set to
+ * allow pci_enable_device to work. This indicates INTx was not routed
+ * and only MSI should be used
+ */
+ int msi_only = pdev->msi_enabled;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ pdev->msi_enabled = 0;
+
+ pci_set_master(pdev);
+
+ /*
+ * how many MSI interrupts to request?
+ */
+ switch (use_msi) {
+ case 3:
+ case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ break;
+ case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ break;
+ default:
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
+ use_msi = 1;
+ }
+
+ if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ use_msi = 1;
+ }
+
+ if (use_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ use_msi = 0;
+ }
+
+ wil->n_msi = use_msi;
+
+ if ((wil->n_msi == 0) && msi_only) {
+ wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
+ rc = -ENODEV;
+ goto stop_master;
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto stop_master;
+
+ /* need reset here to obtain MAC */
+ mutex_lock(&wil->mutex);
+ rc = wil_reset(wil, false);
+ mutex_unlock(&wil->mutex);
+ if (debug_fw)
+ rc = 0;
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ void __iomem *csr;
+ int rc;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME
+ " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+ dev_err(&pdev->dev, "Not " WIL_NAME "? "
+ "BAR0 size is %lu while expecting %lu\n",
+ (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ return -ENODEV;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "pci_enable_device failed, retry with MSI only\n");
+ /* Work around for platforms that can't allocate IRQ:
+ * retry with MSI only
+ */
+ pdev->msi_enabled = 1;
+ rc = pci_enable_device(pdev);
+ }
+ if (rc)
+ return -ENODEV;
+ /* rollback to err_disable_pdev */
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ csr = pci_ioremap_bar(pdev, 0);
+ if (!csr) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
+
+ wil = wil_if_alloc(dev, csr);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ goto err_iounmap;
+ }
+ /* rollback to if_free */
+
+ pci_set_drvdata(pdev, wil);
+ wil->pdev = pdev;
+ wil_set_capabilities(wil);
+ wil6210_clear_irq(wil);
+
+ wil->platform_handle =
+ wil_platform_init(&pdev->dev, &wil->platform_ops);
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto if_free;
+ }
+ /* rollback to bus_disable */
+
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ wil6210_debugfs_init(wil);
+
+
+ return 0;
+
+ bus_disable:
+ wil_if_pcie_disable(wil);
+ if_free:
+ if (wil->platform_ops.uninit)
+ wil->platform_ops.uninit(wil->platform_handle);
+ wil_if_free(wil);
+ err_iounmap:
+ pci_iounmap(pdev, csr);
+ err_release_reg:
+ pci_release_region(pdev, 0);
+ err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ void __iomem *csr = wil->csr;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil6210_debugfs_remove(wil);
+ wil_if_remove(wil);
+ wil_if_pcie_disable(wil);
+ if (wil->platform_ops.uninit)
+ wil->platform_ops.uninit(wil->platform_handle);
+ wil_if_free(wil);
+ pci_iounmap(pdev, csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id wil6210_pcie_ids[] = {
+ { PCI_DEVICE(0x1ae9, 0x0310) },
+ { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644
index 000000000..ca10dcf09
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "txrx.h"
+
+#define SEQ_MODULO 0x1000
+#define SEQ_MASK 0xfff
+
+static inline int seq_less(u16 sq1, u16 sq2)
+{
+ return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
+}
+
+static inline u16 seq_inc(u16 sq)
+{
+ return (sq + 1) & SEQ_MASK;
+}
+
+static inline u16 seq_sub(u16 sq1, u16 sq2)
+{
+ return (sq1 - sq2) & SEQ_MASK;
+}
+
+static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
+{
+ return seq_sub(seq, r->ssn) % r->buf_size;
+}
+
+static void wil_release_reorder_frame(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ int index)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct sk_buff *skb = r->reorder_buf[index];
+
+ if (!skb)
+ goto no_frame;
+
+ /* release the frame from the reorder ring buffer */
+ r->stored_mpdu_num--;
+ r->reorder_buf[index] = NULL;
+ wil_netif_rx_any(skb, ndev);
+
+no_frame:
+ r->head_seq_num = seq_inc(r->head_seq_num);
+}
+
+static void wil_release_reorder_frames(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ u16 hseq)
+{
+ int index;
+
+ /* note: this function is never called with
+ * hseq preceding r->head_seq_num, i.e it is always true
+ * !seq_less(hseq, r->head_seq_num)
+ * and thus on loop exit it should be
+ * r->head_seq_num == hseq
+ */
+ while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
+ index = reorder_index(r, r->head_seq_num);
+ wil_release_reorder_frame(wil, r, index);
+ }
+ r->head_seq_num = hseq;
+}
+
+static void wil_reorder_release(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ int index = reorder_index(r, r->head_seq_num);
+
+ while (r->reorder_buf[index]) {
+ wil_release_reorder_frame(wil, r, index);
+ index = reorder_index(r, r->head_seq_num);
+ }
+}
+
+/* called in NAPI context */
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int tid = wil_rxdesc_tid(d);
+ int cid = wil_rxdesc_cid(d);
+ int mid = wil_rxdesc_mid(d);
+ u16 seq = wil_rxdesc_seq(d);
+ int mcast = wil_rxdesc_mcast(d);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ struct wil_tid_ampdu_rx *r;
+ u16 hseq;
+ int index;
+
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
+ mid, cid, tid, seq, mcast);
+
+ if (unlikely(mcast)) {
+ wil_netif_rx_any(skb, ndev);
+ return;
+ }
+
+ spin_lock(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ if (!r) {
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ hseq = r->head_seq_num;
+
+ /** Due to the race between WMI events, where BACK establishment
+ * reported, and data Rx, few packets may be pass up before reorder
+ * buffer get allocated. Catch up by pretending SSN is what we
+ * see in the 1-st Rx packet
+ *
+ * Another scenario, Rx get delayed and we got packet from before
+ * BACK. Pass it to the stack and wait.
+ */
+ if (r->first_time) {
+ r->first_time = false;
+ if (seq != r->head_seq_num) {
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil,
+ "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
+ seq, r->head_seq_num);
+ r->first_time = true;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+ wil_err(wil,
+ "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
+ seq, r->head_seq_num);
+ r->head_seq_num = seq;
+ r->ssn = seq;
+ }
+ }
+
+ /* frame with out of date sequence number */
+ if (seq_less(seq, r->head_seq_num)) {
+ r->ssn_last_drop = seq;
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If frame the sequence number exceeds our buffering window
+ * size release some previous frames to make room for this one.
+ */
+ if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
+ hseq = seq_inc(seq_sub(seq, r->buf_size));
+ /* release stored frames up to new head to stack */
+ wil_release_reorder_frames(wil, r, hseq);
+ }
+
+ /* Now the new frame is always in the range of the reordering buffer */
+
+ index = reorder_index(r, seq);
+
+ /* check if we already stored this frame */
+ if (r->reorder_buf[index]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If the current MPDU is in the right order and nothing else
+ * is stored we can process it directly, no need to buffer it.
+ * If it is first but there's something stored, we may be able
+ * to release frames after this one.
+ */
+ if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
+ r->head_seq_num = seq_inc(r->head_seq_num);
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ /* put the frame in the reordering buffer */
+ r->reorder_buf[index] = skb;
+ r->reorder_time[index] = jiffies;
+ r->stored_mpdu_num++;
+ wil_reorder_release(wil, r);
+
+out:
+ spin_unlock(&sta->tid_rx_lock);
+}
+
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn)
+{
+ struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
+
+ if (!r)
+ return NULL;
+
+ r->reorder_buf =
+ kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
+ r->reorder_time =
+ kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
+ if (!r->reorder_buf || !r->reorder_time) {
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+ return NULL;
+ }
+
+ r->ssn = ssn;
+ r->head_seq_num = ssn;
+ r->buf_size = size;
+ r->stored_mpdu_num = 0;
+ r->first_time = true;
+ return r;
+}
+
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ if (!r)
+ return;
+ wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+}
+
+/* ADDBA processing */
+static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
+{
+ u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ (mtu_max + WIL_MAX_MPDU_OVERHEAD));
+
+ if (!req_agg_wsize)
+ return max_agg_size;
+
+ return min(max_agg_size, req_agg_wsize);
+}
+
+/* Block Ack - Rx side (recipient */
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl)
+{
+ struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->cidxtid = cidxtid;
+ req->dialog_token = dialog_token;
+ req->ba_param_set = le16_to_cpu(ba_param_set);
+ req->ba_timeout = le16_to_cpu(ba_timeout);
+ req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
+
+ mutex_lock(&wil->back_rx_mutex);
+ list_add_tail(&req->list, &wil->back_rx_pending);
+ mutex_unlock(&wil->back_rx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_rx_worker);
+
+ return 0;
+}
+
+static void wil_back_rx_handle(struct wil6210_priv *wil,
+ struct wil_back_rx *req)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wil_sta_info *sta;
+ u8 cid, tid;
+ u16 agg_wsize = 0;
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
+ bool agg_amsdu = !!(req->ba_param_set & BIT(0));
+ int ba_policy = req->ba_param_set & BIT(1);
+ u16 agg_timeout = req->ba_timeout;
+ u16 status = WLAN_STATUS_SUCCESS;
+ u16 ssn = req->ba_seq_ctrl >> 4;
+ struct wil_tid_ampdu_rx *r;
+ int rc;
+
+ might_sleep();
+ parse_cidxtid(req->cidxtid, &cid, &tid);
+
+ /* sanity checks */
+ if (cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "BACK: invalid CID %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status != wil_sta_connected) {
+ wil_err(wil, "BACK: CID %d not connected\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil,
+ "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
+ cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+
+ /* apply policies */
+ if (ba_policy) {
+ wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
+ status = WLAN_STATUS_INVALID_QOS_PARAM;
+ }
+ if (status == WLAN_STATUS_SUCCESS)
+ agg_wsize = wil_agg_size(wil, req_agg_wsize);
+
+ rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+ agg_amsdu, agg_wsize, agg_timeout);
+ if (rc || (status != WLAN_STATUS_SUCCESS))
+ return;
+
+ /* apply */
+ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+ spin_lock_bh(&sta->tid_rx_lock);
+ wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+ sta->tid_rx[tid] = r;
+ spin_unlock_bh(&sta->tid_rx_lock);
+}
+
+void wil_back_rx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_rx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+}
+
+/* Retrieve next ADDBA request from the pending list */
+static struct list_head *next_back_rx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ if (!list_empty(&wil->back_rx_pending)) {
+ ret = wil->back_rx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+
+ return ret;
+}
+
+void wil_back_rx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_rx_worker);
+ struct wil_back_rx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_rx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_rx, list);
+
+ wil_back_rx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+/* BACK - Tx (originator) side */
+static void wil_back_tx_handle(struct wil6210_priv *wil,
+ struct wil_back_tx *req)
+{
+ struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
+ int rc;
+
+ if (txdata->addba_in_progress) {
+ wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
+ req->ringid);
+ return;
+ }
+ if (txdata->agg_wsize) {
+ wil_dbg_misc(wil,
+ "ADDBA for vring[%d] already established wsize %d\n",
+ req->ringid, txdata->agg_wsize);
+ return;
+ }
+ txdata->addba_in_progress = true;
+ rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
+ if (rc)
+ txdata->addba_in_progress = false;
+}
+
+static struct list_head *next_back_tx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ if (!list_empty(&wil->back_tx_pending)) {
+ ret = wil->back_tx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+
+ return ret;
+}
+
+void wil_back_tx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_tx_worker);
+ struct wil_back_tx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_tx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_tx, list);
+
+ wil_back_tx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+void wil_back_tx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_tx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+}
+
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
+{
+ struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->ringid = ringid;
+ req->agg_wsize = wil_agg_size(wil, wsize);
+ req->agg_timeout = 0;
+
+ mutex_lock(&wil->back_tx_mutex);
+ list_add_tail(&req->list, &wil->back_tx_pending);
+ mutex_unlock(&wil->back_tx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_tx_worker);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
new file mode 100644
index 000000000..cd2534b9c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
new file mode 100644
index 000000000..e59239d22
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wil6210
+#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define WIL6210_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
+
+DECLARE_EVENT_CLASS(wil6210_wmi,
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+
+ TP_ARGS(wmi, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(u8, mid)
+ __field(u16, id)
+ __field(u32, timestamp)
+ __field(u16, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->mid = wmi->mid;
+ __entry->id = le16_to_cpu(wmi->id);
+ __entry->timestamp = le32_to_cpu(wmi->timestamp);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "MID %d id 0x%04x len %d timestamp %d",
+ __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
+ )
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
+);
+
+#define WIL6210_MSG_MAX (200)
+
+DECLARE_EVENT_CLASS(wil6210_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ WIL6210_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= WIL6210_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \
+ {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \
+ {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" })
+
+TRACE_EVENT(wil6210_irq_pseudo,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x : %s", __entry->x,
+ wil_pseudo_irq_cause(__entry->x))
+);
+
+DECLARE_EVENT_CLASS(wil6210_irq,
+ TP_PROTO(u32 x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(
+ __field(u32, x)
+ ),
+ TP_fast_assign(
+ __entry->x = x;
+ ),
+ TP_printk("cause 0x%08x", __entry->x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_rx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_tx,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread,
+ TP_PROTO(u32 x),
+ TP_ARGS(x)
+);
+
+TRACE_EVENT(wil6210_rx,
+ TP_PROTO(u16 index, struct vring_rx_desc *d),
+ TP_ARGS(index, d),
+ TP_STRUCT__entry(
+ __field(u16, index)
+ __field(unsigned int, len)
+ __field(u8, mid)
+ __field(u8, cid)
+ __field(u8, tid)
+ __field(u8, type)
+ __field(u8, subtype)
+ __field(u16, seq)
+ __field(u8, mcs)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->len = d->dma.length;
+ __entry->mid = wil_rxdesc_mid(d);
+ __entry->cid = wil_rxdesc_cid(d);
+ __entry->tid = wil_rxdesc_tid(d);
+ __entry->type = wil_rxdesc_ftype(d);
+ __entry->subtype = wil_rxdesc_subtype(d);
+ __entry->seq = wil_rxdesc_seq(d);
+ __entry->mcs = wil_rxdesc_mcs(d);
+ ),
+ TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
+ " type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
+ __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+ __entry->seq, __entry->type, __entry->subtype)
+);
+
+TRACE_EVENT(wil6210_tx,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
+ TP_ARGS(vring, index, len, frags),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, frags)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->frags = frags;
+ __entry->index = index;
+ __entry->len = len;
+ ),
+ TP_printk("vring %d index %d len %d frags %d",
+ __entry->vring, __entry->index, __entry->len, __entry->frags)
+);
+
+TRACE_EVENT(wil6210_tx_done,
+ TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err),
+ TP_ARGS(vring, index, len, err),
+ TP_STRUCT__entry(
+ __field(u8, vring)
+ __field(u8, err)
+ __field(u16, index)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->vring = vring;
+ __entry->index = index;
+ __entry->len = len;
+ __entry->err = err;
+ ),
+ TP_printk("vring %d index %d len %d err 0x%02x",
+ __entry->vring, __entry->index, __entry->len,
+ __entry->err)
+);
+
+#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000..e8bd512d8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,1453 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/prefetch.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+#include "trace.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+ " Include PHY info in the radiotap header, default - no");
+
+bool rx_align_2;
+module_param(rx_align_2, bool, S_IRUGO);
+MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
+
+static inline uint wil_rx_snaplen(void)
+{
+ return rx_align_2 ? 6 : 0;
+}
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+ return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+ return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+ vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+ return wil_vring_next_tail(vring) == vring->swhead;
+}
+
+/* Used space in Tx Vring */
+static inline int wil_vring_used_tx(struct vring *vring)
+{
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ return (vring->size + swhead - swtail) % vring->size;
+}
+
+/* Available space in Tx Vring */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ return vring->size - wil_vring_used_tx(vring) - 1;
+}
+
+/* wil_vring_wmark_low - low watermark for available descriptor space */
+static inline int wil_vring_wmark_low(struct vring *vring)
+{
+ return vring->size/8;
+}
+
+/* wil_vring_wmark_high - high watermark for available descriptor space */
+static inline int wil_vring_wmark_high(struct vring *vring)
+{
+ return vring->size/4;
+}
+
+/* wil_val_in_range - check if value in [min,max) */
+static inline bool wil_val_in_range(int val, int min, int max)
+{
+ return val >= min && val < max;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+ /* vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *_d = &vring->va[i].tx;
+
+ _d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
+ vring->va, &vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+ struct wil_ctx *ctx)
+{
+ dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+ int tx)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ if (tx) {
+ int vring_index = vring - wil->vring_tx;
+
+ wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
+ vring_index, vring->size, vring->va,
+ &vring->pa, vring->ctx);
+ } else {
+ wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
+ vring->size, vring->va,
+ &vring->pa, vring->ctx);
+ }
+
+ while (!wil_vring_is_empty(vring)) {
+ dma_addr_t pa;
+ u16 dmalen;
+ struct wil_ctx *ctx;
+
+ if (tx) {
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d =
+ &vring->va[vring->swtail].tx;
+
+ ctx = &vring->ctx[vring->swtail];
+ *d = *_d;
+ wil_txdesc_unmap(dev, d, ctx);
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+ vring->swtail = wil_vring_next_tail(vring);
+ } else { /* rx */
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d =
+ &vring->va[vring->swhead].rx;
+
+ ctx = &vring->ctx[vring->swhead];
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+ kfree_skb(ctx->skb);
+ wil_vring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
+ struct vring_rx_desc dd, *d = &dd;
+ volatile struct vring_rx_desc *_d = &vring->va[i].rx;
+ dma_addr_t pa;
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ wil_desc_addr_set(&d->dma.addr, pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16(sz);
+ *_d = *d;
+ vring->ctx[i].skb = skb;
+
+ return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct wil6210_rtap_vendor {
+ struct wil6210_rtap rtap;
+ /* vendor */
+ u8 vendor_oui[3] __aligned(2);
+ u8 vendor_ns;
+ __le16 vendor_skip;
+ u8 vendor_data[0];
+ } __packed;
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ struct wil6210_rtap_vendor *rtap_vendor;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ int phy_length = 0; /* phy info header size, bytes */
+ static char phy_data[128];
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ if (rtap_include_phy_info) {
+ rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+ /* calculate additional length */
+ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+ /**
+ * PHY info starts from 8-byte boundary
+ * there are 8-byte lines, last line may be partially
+ * written (HW bug), thus FW configures for last line
+ * to be excessive. Driver skips this last line.
+ */
+ int len = min_t(int, 8 + sizeof(phy_data),
+ wil_rxdesc_phy_length(d));
+
+ if (len > 8) {
+ void *p = skb_tail_pointer(skb);
+ void *pa = PTR_ALIGN(p, 8);
+
+ if (skb_tailroom(skb) >= len + (pa - p)) {
+ phy_length = len - 8;
+ memcpy(phy_data, pa, phy_length);
+ }
+ }
+ }
+ rtap_len += phy_length;
+ }
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap_vendor = (void *)skb_push(skb, rtap_len);
+ memset(rtap_vendor, 0, rtap_len);
+
+ rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+ (1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+ rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap_vendor->rtap.mcs_flags = 0;
+ rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+ if (rtap_include_phy_info) {
+ rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+ /* OUI for Wilocity 04:ce:14 */
+ rtap_vendor->vendor_oui[0] = 0x04;
+ rtap_vendor->vendor_oui[1] = 0xce;
+ rtap_vendor->vendor_oui[2] = 0x14;
+ rtap_vendor->vendor_ns = 1;
+ /* Rx descriptor + PHY data */
+ rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+ phy_length);
+ memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+ memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+ phy_length);
+ }
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Rx descriptor copied to skb->cb
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+ volatile struct vring_rx_desc *_d;
+ struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int snaplen = wil_rx_snaplen();
+ unsigned int sz = mtu_max + ETH_HLEN + snaplen;
+ u16 dmalen;
+ u8 ftype;
+ int cid;
+ int i = (int)vring->swhead;
+ struct wil_net_stats *stats;
+
+ BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
+
+ if (unlikely(wil_vring_is_empty(vring)))
+ return NULL;
+
+ _d = &vring->va[i].rx;
+ if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ skb = vring->ctx[i].skb;
+ vring->ctx[i].skb = NULL;
+ wil_vring_advance_head(vring, 1);
+ if (!skb) {
+ wil_err(wil, "No Rx skb at [%d]\n", i);
+ return NULL;
+ }
+ d = wil_skb_rxdesc(skb);
+ *d = *_d;
+ pa = wil_desc_addr(&d->dma.addr);
+
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ dmalen = le16_to_cpu(d->dma.length);
+
+ trace_wil6210_rx(i, d);
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ if (unlikely(dmalen > sz)) {
+ wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+ kfree_skb(skb);
+ return NULL;
+ }
+ skb_trim(skb, dmalen);
+
+ prefetch(skb->data);
+
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ cid = wil_rxdesc_cid(d);
+ stats = &wil->sta[cid].stats;
+ stats->last_mcs_rx = wil_rxdesc_mcs(d);
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /*
+ * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
+ wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ /* TODO: process it */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (unlikely(skb->len < ETH_HLEN + snaplen)) {
+ wil_err(wil, "Short frame, len = %d\n", skb->len);
+ /* TODO: process it (i.e. BAR) */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ /* L4 IDENT is on when HW calculated checksum, check status
+ * and in case of error drop the packet
+ * higher stack layers will handle retransmission (if required)
+ */
+ if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
+ /* L4 protocol identified, csum calculated */
+ if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ }
+
+ if (snaplen) {
+ /* Packet layout
+ * +-------+-------+---------+------------+------+
+ * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
+ * +-------+-------+---------+------------+------+
+ * Need to remove SNAP, shifting SA and DA forward
+ */
+ memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, snaplen);
+ }
+
+ return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_vring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (unlikely(rc)) {
+ wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+ iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+ return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ * Called in softirq context (NAPI poll).
+ */
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ gro_result_t rc = GRO_NORMAL;
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ unsigned int len = skb->len;
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ struct ethhdr *eth = (void *)skb->data;
+ /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
+ * is not suitable, need to look at data
+ */
+ int mcast = is_multicast_ether_addr(eth->h_dest);
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ struct sk_buff *xmit_skb = NULL;
+ static const char * const gro_res_str[] = {
+ [GRO_MERGED] = "GRO_MERGED",
+ [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
+ [GRO_HELD] = "GRO_HELD",
+ [GRO_NORMAL] = "GRO_NORMAL",
+ [GRO_DROP] = "GRO_DROP",
+ };
+
+ skb_orphan(skb);
+
+ if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
+ if (mcast) {
+ /* send multicast frames both to higher layers in
+ * local net stack and back to the wireless medium
+ */
+ xmit_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ int xmit_cid = wil_find_cid(wil, eth->h_dest);
+
+ if (xmit_cid >= 0) {
+ /* The destination station is associated to
+ * this AP (in this VLAN), so send the frame
+ * directly to it and do not pass it to local
+ * net stack.
+ */
+ xmit_skb = skb;
+ skb = NULL;
+ }
+ }
+ }
+ if (xmit_skb) {
+ /* Send to wireless media and increase priority by 256 to
+ * keep the received priority instead of reclassifying
+ * the frame (see cfg80211_classify8021d).
+ */
+ xmit_skb->dev = ndev;
+ xmit_skb->priority += 256;
+ xmit_skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(xmit_skb);
+ skb_reset_mac_header(xmit_skb);
+ wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
+ dev_queue_xmit(xmit_skb);
+ }
+
+ if (skb) { /* deliver to local stack */
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ rc = napi_gro_receive(&wil->napi_rx, skb);
+ wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
+ len, gro_res_str[rc]);
+ }
+ /* statistics. rc set to GRO_NORMAL for AP bridging */
+ if (unlikely(rc == GRO_DROP)) {
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ } else {
+ ndev->stats.rx_packets++;
+ stats->rx_packets++;
+ ndev->stats.rx_bytes += len;
+ stats->rx_bytes += len;
+ if (mcast)
+ ndev->stats.multicast++;
+ }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
+ */
+void wil_rx_handle(struct wil6210_priv *wil, int *quota)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ struct sk_buff *skb;
+
+ if (unlikely(!v->va)) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
+ (*quota)--;
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+ wil_netif_rx_any(skb, ndev);
+ } else {
+ wil_rx_reorder(wil, skb);
+ }
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil, u16 size)
+{
+ struct vring *vring = &wil->vring_rx;
+ int rc;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (vring->va) {
+ wil_err(wil, "Rx ring already allocated\n");
+ return -EINVAL;
+ }
+
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ rc = wmi_rx_chain_add(wil, vring);
+ if (rc)
+ goto err_free;
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring, 0);
+
+ return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (vring->va)
+ wil_vring_free(wil, vring, 0);
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid)
+{
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = cpu_to_le16(size),
+ },
+ .ringid = id,
+ .cidxtid = mk_cidxtid(cid, tid),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 0,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+ wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(txdata, 0, sizeof(*txdata));
+ spin_lock_init(&txdata->lock);
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ wil->vring2cid_tid[id][0] = cid;
+ wil->vring2cid_tid[id][1] = tid;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ txdata->enabled = 1;
+ if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+ wil_addba_tx_request(wil, id, agg_wsize);
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
+{
+ int rc;
+ struct wmi_bcast_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = cpu_to_le16(size),
+ },
+ .ringid = id,
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+ wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(txdata, 0, sizeof(*txdata));
+ spin_lock_init(&txdata->lock);
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
+ wil->vring2cid_tid[id][1] = 0; /* TID */
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ txdata->enabled = 1;
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
+
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ if (!vring->va)
+ return;
+
+ wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+
+ spin_lock_bh(&txdata->lock);
+ txdata->enabled = 0; /* no Tx can be in progress or start anew */
+ spin_unlock_bh(&txdata->lock);
+ /* make sure NAPI won't touch this vring */
+ if (test_bit(wil_status_napi_en, wil->status))
+ napi_synchronize(&wil->napi_tx);
+
+ wil_vring_free(wil, vring, 1);
+ memset(txdata, 0, sizeof(*txdata));
+}
+
+static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ int i;
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil_find_cid(wil, eth->h_dest);
+
+ if (cid < 0)
+ return NULL;
+
+ if (!wil->sta[cid].data_port_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ return NULL;
+
+ /* TODO: fix for multiple TID */
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if (wil->vring2cid_tid[i][0] == cid) {
+ struct vring *v = &wil->vring_tx[i];
+
+ wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
+ __func__, eth->h_dest, i);
+ if (v->va) {
+ return v;
+ } else {
+ wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ return NULL;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb);
+
+static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v;
+ int i;
+ u8 cid;
+
+ /* In the STA mode, it is expected to have only 1 VRING
+ * for the AP we connected to.
+ * find 1-st vring and see whether it is eligible for data
+ */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ continue;
+
+ cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
+
+ if (!wil->sta[cid].data_port_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ break;
+
+ wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
+
+ return v;
+ }
+
+ wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+}
+
+/* Use one of 2 strategies:
+ *
+ * 1. New (real broadcast):
+ * use dedicated broadcast vring
+ * 2. Old (pseudo-DMS):
+ * Find 1-st vring and return it;
+ * duplicate skb and send it to other active vrings;
+ * in all cases override dest address to unicast peer's address
+ * Use old strategy when new is not supported yet:
+ * - for PBSS
+ * - for secure link
+ */
+static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v;
+ int i = wil->bcast_vring;
+
+ if (i < 0)
+ return NULL;
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ return NULL;
+
+ return v;
+}
+
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb, int vring_index)
+{
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil->vring2cid_tid[vring_index][0];
+
+ ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
+}
+
+static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v, *v2;
+ struct sk_buff *skb2;
+ int i;
+ u8 cid;
+ struct ethhdr *eth = (void *)skb->data;
+ char *src = eth->h_source;
+
+ /* find 1-st vring eligible for data */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ continue;
+
+ cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ /* don't Tx back to source when re-routing Rx->Tx at the AP */
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
+ goto found;
+ }
+
+ wil_dbg_txrx(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+
+found:
+ wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb, i);
+
+ /* find other active vrings and duplicate skb for each */
+ for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
+ v2 = &wil->vring_tx[i];
+ if (!v2->va)
+ continue;
+ cid = wil->vring2cid_tid[i][0];
+ if (cid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
+ continue;
+
+ skb2 = skb_copy(skb, GFP_ATOMIC);
+ if (skb2) {
+ wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb2, i);
+ wil_tx_vring(wil, v2, skb2);
+ } else {
+ wil_err(wil, "skb_copy failed\n");
+ }
+ }
+
+ return v;
+}
+
+static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = wil->wdev;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP)
+ return wil_find_tx_bcast_2(wil, skb);
+
+ if (wil->privacy)
+ return wil_find_tx_bcast_2(wil, skb);
+
+ return wil_find_tx_bcast_1(wil, skb);
+}
+
+static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
+ int vring_index)
+{
+ wil_desc_addr_set(&d->dma.addr, pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = cpu_to_le16((u16)len);
+ d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static inline
+void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
+{
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+}
+
+static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
+ struct vring_tx_desc *d,
+ struct sk_buff *skb)
+{
+ int protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ protocol = ip_hdr(skb)->protocol;
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |=
+ (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ case IPPROTO_UDP:
+ /* L4 header len: UDP header length */
+ d->dma.d0 |=
+ (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ d->dma.ip_length = skb_network_header_len(skb);
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+
+ return 0;
+}
+
+static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct vring_tx_desc dd, *d = &dd;
+ volatile struct vring_tx_desc *_d;
+ u32 swhead = vring->swhead;
+ int avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f = 0;
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ uint i = swhead;
+ dma_addr_t pa;
+ int used;
+ bool mcast = (vring_index == wil->bcast_vring);
+ uint len = skb_headlen(skb);
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+
+ if (unlikely(!txdata->enabled))
+ return -EINVAL;
+
+ if (unlikely(avail < 1 + nr_frags)) {
+ wil_err_ratelimited(wil,
+ "Tx ring[%2d] full. No space for %d fragments\n",
+ vring_index, 1 + nr_frags);
+ return -ENOMEM;
+ }
+ _d = &vring->va[i].tx;
+
+ pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
+ skb_headlen(skb), skb->data, &pa);
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
+ /* 1-st segment */
+ wil_tx_desc_map(d, pa, len, vring_index);
+ if (unlikely(mcast)) {
+ d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
+ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
+ /* set MCS 1 */
+ d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
+ /* packet mode 2 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
+ (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
+ }
+ }
+ /* Process TCP/UDP checksum offloading */
+ if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+ wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
+ vring_index);
+ goto dma_error;
+ }
+
+ vring->ctx[i].nr_frags = nr_frags;
+ wil_tx_desc_set_nr_frags(d, nr_frags);
+
+ /* middle segments */
+ for (; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+
+ *_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+ i = (swhead + f + 1) % vring->size;
+ _d = &vring->va[i].tx;
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
+ wil_tx_desc_map(d, pa, len, vring_index);
+ /* no need to check return code -
+ * if it succeeded for 1-st descriptor,
+ * it will succeed here too
+ */
+ wil_tx_desc_offload_cksum_set(wil, d, skb);
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ *_d = *d;
+ wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
+ wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
+ /* performance monitoring */
+ used = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used, used + nr_frags + 1)) {
+ txdata->idle += get_cycles() - txdata->last_idle;
+ wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
+ vring_index, used, used + nr_frags + 1);
+ }
+
+ /* advance swhead */
+ wil_vring_advance_head(vring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
+ vring->swhead);
+ trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
+ iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ nr_frags = f + 1; /* frags mapped + one for skb head */
+ for (f = 0; f < nr_frags; f++) {
+ struct wil_ctx *ctx;
+
+ i = (swhead + f) % vring->size;
+ ctx = &vring->ctx[i];
+ _d = &vring->va[i].tx;
+ *d = *_d;
+ _d->dma.status = TX_DMA_STATUS_DU;
+ wil_txdesc_unmap(dev, d, ctx);
+
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+
+ memset(ctx, 0, sizeof(*ctx));
+ }
+
+ return -EINVAL;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ int vring_index = vring - wil->vring_tx;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+ int rc;
+
+ spin_lock(&txdata->lock);
+ rc = __wil_tx_vring(wil, vring, skb);
+ spin_unlock(&txdata->lock);
+ return rc;
+}
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct ethhdr *eth = (void *)skb->data;
+ bool bcast = is_multicast_ether_addr(eth->h_dest);
+ struct vring *vring;
+ static bool pr_once_fw;
+ int rc;
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
+ if (!pr_once_fw) {
+ wil_err(wil, "FW not ready\n");
+ pr_once_fw = true;
+ }
+ goto drop;
+ }
+ if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
+ wil_err(wil, "FW not connected\n");
+ goto drop;
+ }
+ if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ pr_once_fw = false;
+
+ /* find vring */
+ if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
+ /* in STA mode (ESS), all to same VRING */
+ vring = wil_find_tx_vring_sta(wil, skb);
+ } else { /* direct communication, find matching VRING */
+ vring = bcast ? wil_find_tx_bcast(wil, skb) :
+ wil_find_tx_ucast(wil, skb);
+ }
+ if (unlikely(!vring)) {
+ wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+
+ /* do we still have enough room in the vring? */
+ if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
+ }
+
+ switch (rc) {
+ case 0:
+ /* statistics will be updated on the tx_complete */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ return NETDEV_TX_BUSY;
+ default:
+ break; /* goto drop; */
+ }
+ drop:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+static inline bool wil_need_txstat(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (void *)skb->data;
+
+ return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+}
+
+static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
+{
+ if (unlikely(wil_need_txstat(skb)))
+ skb_complete_wifi_ack(skb, acked);
+ else
+ acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Return number of descriptors cleared
+ *
+ * Safe to call from IRQ
+ */
+int wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct device *dev = wil_to_dev(wil);
+ struct vring *vring = &wil->vring_tx[ringid];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ int done = 0;
+ int cid = wil->vring2cid_tid[ringid][0];
+ struct wil_net_stats *stats = NULL;
+ volatile struct vring_tx_desc *_d;
+ int used_before_complete;
+ int used_new;
+
+ if (unlikely(!vring->va)) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return 0;
+ }
+
+ if (unlikely(!txdata->enabled)) {
+ wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
+ return 0;
+ }
+
+ wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+
+ used_before_complete = wil_vring_used_tx(vring);
+
+ if (cid < WIL6210_MAX_CID)
+ stats = &wil->sta[cid].stats;
+
+ while (!wil_vring_is_empty(vring)) {
+ int new_swtail;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ /**
+ * For the fragmented skb, HW will set DU bit only for the
+ * last fragment. look for it
+ */
+ int lf = (vring->swtail + ctx->nr_frags) % vring->size;
+ /* TODO: check we are not past head */
+
+ _d = &vring->va[lf].tx;
+ if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
+ break;
+
+ new_swtail = (lf + 1) % vring->size;
+ while (vring->swtail != new_swtail) {
+ struct vring_tx_desc dd, *d = &dd;
+ u16 dmalen;
+ struct sk_buff *skb;
+
+ ctx = &vring->ctx[vring->swtail];
+ skb = ctx->skb;
+ _d = &vring->va[vring->swtail].tx;
+
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+ d->dma.error);
+ wil_dbg_txrx(wil,
+ "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ ringid, vring->swtail, dmalen,
+ d->dma.status, d->dma.error);
+ wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_txdesc_unmap(dev, d, ctx);
+
+ if (skb) {
+ if (likely(d->dma.error == 0)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (stats) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ }
+ } else {
+ ndev->stats.tx_errors++;
+ if (stats)
+ stats->tx_errors++;
+ }
+ wil_consume_skb(skb, d->dma.error == 0);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
+ vring->swtail = wil_vring_next_tail(vring);
+ done++;
+ }
+ }
+
+ /* performance monitoring */
+ used_new = wil_vring_used_tx(vring);
+ if (wil_val_in_range(vring_idle_trsh,
+ used_new, used_before_complete)) {
+ wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
+ ringid, used_before_complete, used_new);
+ txdata->last_idle = get_cycles();
+ }
+
+ if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
+ wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+ }
+
+ return done;
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000..d90c8aa20
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* default size of MAC Tx/Rx buffers */
+#define TXRX_BUF_LEN_DEFAULT (2048)
+
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+
+/* Common representation of physical address in Vring */
+struct vring_dma_addr {
+ __le32 addr_low;
+ __le16 addr_high;
+} __packed;
+
+static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+{
+ return le32_to_cpu(addr->addr_low) |
+ ((u64)le16_to_cpu(addr->addr_high) << 32);
+}
+
+static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+ dma_addr_t pa)
+{
+ addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+ addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+}
+
+/* Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrupt_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..30 : reserved1:3
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5..14 : reserved0:10
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+/* Tx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8 : cmd_eop:1 This descriptor is the last one in the packet
+ * bit 9 : reserved
+ * bit 10 : cmd_dma_it:1 immediate interrupt
+ * bit 11..12 : SBD - Segment Buffer Details
+ * 00 - Header Segment
+ * 01 - First Data Segment
+ * 10 - Medium Data Segment
+ * 11 - Last Data Segment
+ * bit 13 : TSE - TCP Segmentation Enable
+ * bit 14 : IIC - Directs the HW to Insert IPv4 Checksum
+ * bit 15 : ITC - Directs the HW to Insert TCP/UDP Checksum
+ * bit 16..20 : QID - The target QID that the packet should be stored
+ * in the MAC.
+ * bit 21 : PO - Pseudo header Offload:
+ * 0 - Use the pseudo header value from the TCP checksum field
+ * 1- Calculate Pseudo header Checksum
+ * bit 22 : NC - No UDP Checksum
+ * bit 23..29 : reserved
+ * bit 30..31 : L4T - Layer 4 Type: 00 - UDP , 10 - TCP , 10, 11 - Reserved
+ * If L4Len equal 0, no L4 at all
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
+ * offload feature
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * [byte 12] error
+ * bit 0 2 : mac_status:3
+ * bit 3 7 : reserved:5
+ * [byte 13] status
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 7 : reserved:7
+ * [word 7] length
+ */
+struct vring_tx_dma {
+ u32 d0;
+ struct vring_dma_addr addr;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ __le16 length;
+} __packed;
+
+/* Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : cid:3 The Source index that was found during parsing the TA.
+ * This field is used to define the source of the packet
+ * bit 7 : reserved:1
+ * bit 8.. 9 : mid:2 The MAC virtual number
+ * bit 10..11 : frame_type:2 : The FC (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1 FC (b14)
+ * bit 8.. 9 : ds_bits:2 FC (b9-8)
+ * bit 10 : a_msdu_present:1 QoS (b7)
+ * bit 11 : a_msdu_type:1 QoS (b8)
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4 this signal tells the DMA to assert an interrupt
+ * after it writes the packet
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3.. 4 : fc_protocol_ver:1 The FC (b1-0) - Protocol Version
+ * bit 5 : fc_order:1 The FC Control (b15) -Order
+ * bit 6.. 7 : qos_ack_policy:2 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1 QoS (b15)
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+/* Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length. The field is only valid if
+ * L4I bit is set
+ * bit 8 : cmd_eop:1 set to 1
+ * bit 9 : cmd_rt:1 set to 1
+ * bit 10 : cmd_dma_it:1 immediate interrupt
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14 It is valid when the PII is set.
+ * When the FFM bit is set bits 29-27 are used for for
+ * Flex Filter Match. Matching Index to one of the L2
+ * EtherType Flex Filter
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * 00 - UDP, 01 - TCP, 10, 11 - reserved
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8 The filed is valid only if the L3I bit is set
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
+ * [dword 3]
+ * [byte 12] error
+ * bit 0 : FCS:1
+ * bit 1 : MIC:1
+ * bit 2 : Key miss:1
+ * bit 3 : Replay:1
+ * bit 4 : L3:1 IPv4 checksum
+ * bit 5 : L4:1 TCP/UDP checksum
+ * bit 6 7 : reserved:2
+ * [byte 13] status
+ * bit 0 : DU:1 Descriptor Used
+ * bit 1 : EOP:1 The descriptor indicates the End of Packet
+ * bit 2 : error:1
+ * bit 3 : MI:1 MAC Interrupt is asserted (according to parser decision)
+ * bit 4 : L3I:1 L3 identified and checksum calculated
+ * bit 5 : L4I:1 L4 identified and checksum calculated
+ * bit 6 : PII:1 PHY Info Included in the packet
+ * bit 7 : FFM:1 EtherType Flex Filter Match
+ * [word 7] length
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+
+/* Error field, offload bits */
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
+
+/* Status field */
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_ERROR BIT(2)
+
+#define RX_DMA_STATUS_L3I BIT(4)
+#define RX_DMA_STATUS_L4I BIT(5)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+
+struct vring_rx_dma {
+ u32 d0;
+ struct vring_dma_addr addr;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ __le16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+ struct vring_tx_desc tx;
+ struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 0, 3);
+}
+
+static inline int wil_rxdesc_cid(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 4, 6);
+}
+
+static inline int wil_rxdesc_mid(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+static inline int wil_rxdesc_subtype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 12, 15);
+}
+
+static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 16, 27);
+}
+
+static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 28, 31);
+}
+
+static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_mcs(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 13, 14);
+}
+
+static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
+{
+ return (void *)skb->cb;
+}
+
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn);
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r);
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000..8be60df31
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/timex.h>
+#include "wil_platform.h"
+
+extern bool no_fw_recovery;
+extern unsigned int mtu_max;
+extern unsigned short rx_ring_overflow_thrsh;
+extern int agg_wsize;
+extern u32 vring_idle_trsh;
+extern bool rx_align_2;
+
+#define WIL_NAME "wil6210"
+#define WIL_FW_NAME "/*(DEBLOBBED)*/" /* code */
+#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
+
+#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL_TX_Q_LEN_DEFAULT (4000)
+#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
+#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
+/* limit ring size in range [32..32k] */
+#define WIL_RING_SIZE_ORDER_MIN (5)
+#define WIL_RING_SIZE_ORDER_MAX (15)
+#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
+#define WIL6210_MAX_CID (8) /* HW limit */
+#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
+#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
+#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
+/* Hardware offload block adds the following:
+ * 26 bytes - 3-address QoS data header
+ * 8 bytes - IV + EIV (for GCMP)
+ * 8 bytes - SNAP
+ * 16 bytes - MIC (for GCMP)
+ * 4 bytes - CRC
+ */
+#define WIL_MAX_MPDU_OVERHEAD (62)
+
+/* Calculate MAC buffer size for the firmware. It includes all overhead,
+ * as it will go over the air, and need to be 8 byte aligned
+ */
+static inline u32 wil_mtu2macbuf(u32 mtu)
+{
+ return ALIGN(mtu + WIL_MAX_MPDU_OVERHEAD, 8);
+}
+
+/* MTU for Ethernet need to take into account 8-byte SNAP header
+ * to be added when encapsulating Ethernet frame into 802.11
+ */
+#define WIL_MAX_ETH_MTU (IEEE80211_MAX_DATA_LEN_DMG - 8)
+/* Max supported by wil6210 value for interrupt threshold is 5sec. */
+#define WIL6210_ITR_TRSH_MAX (5000000)
+#define WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
+#define WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
+#define WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
+#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
+#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
+#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
+#define WIL6210_RX_HIGH_TRSH_INIT (0)
+#define WIL6210_RX_HIGH_TRSH_DEFAULT \
+ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+struct RGF_BL {
+ u32 ready; /* 0x880A3C bit [0] */
+#define BIT_BL_READY BIT(0)
+ u32 version; /* 0x880A40 version of the BL struct */
+ u32 rf_type; /* 0x880A44 ID of the connected RF */
+ u32 baseband_type; /* 0x880A48 ID of the baseband */
+ u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
+ u8 pad[2];
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USAGE_1 (0x880004)
+#define RGF_USER_USAGE_6 (0x880018)
+#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
+ #define HW_MACHINE_BOOT_DONE (0x3fffffd)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+ #define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+ #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_BL (0x880A3C) /* Boot Loader */
+#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
+#define RGF_USER_CLKS_CTL_0 (0x880abc)
+ #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */
+ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+ #define BIT_HPAL_PERST_FROM_PAD BIT(6)
+ #define BIT_CAR_PERST_RST BIT(7)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18)
+#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
+#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
+ #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+ #define BIT_DMA_EP_RX_ICR_RX_HTRSH BIT(1)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
+
+/* Legacy interrupt moderation control (before Sparrow v2)*/
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881c64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* Offload control (Sparrow B0+) */
+#define RGF_DMA_OFUL_NID_0 (0x881cd4)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN BIT(0)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_TR_EN BIT(1)
+ #define BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC BIT(2)
+ #define BIT_DMA_OFUL_NID_0_TX_EXT_A3_SRC BIT(3)
+
+/* New (sparrow v2+) interrupt moderation control */
+#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
+#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
+#define RGF_DMA_ITR_TX_CNT_DATA (0x881d38)
+#define RGF_DMA_ITR_TX_CNT_CTL (0x881d3c)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_TX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_TX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_TX_IDL_CNT_TRSH (0x881d60)
+#define RGF_DMA_ITR_TX_IDL_CNT_DATA (0x881d64)
+#define RGF_DMA_ITR_TX_IDL_CNT_CTL (0x881d68)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_TX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+#define RGF_DMA_ITR_RX_DESQ_NO_MOD (0x881d50)
+#define RGF_DMA_ITR_RX_CNT_TRSH (0x881d44)
+#define RGF_DMA_ITR_RX_CNT_DATA (0x881d48)
+#define RGF_DMA_ITR_RX_CNT_CTL (0x881d4c)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_CNT_CTL_REACHED_TRESH BIT(4)
+ #define BIT_DMA_ITR_RX_CNT_CTL_CROSS_EN BIT(5)
+ #define BIT_DMA_ITR_RX_CNT_CTL_FREE_RUNNIG BIT(6)
+#define RGF_DMA_ITR_RX_IDL_CNT_TRSH (0x881d54)
+#define RGF_DMA_ITR_RX_IDL_CNT_DATA (0x881d58)
+#define RGF_DMA_ITR_RX_IDL_CNT_CTL (0x881d5c)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EN BIT(0)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
+ #define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_HP_CTRL (0x88265c)
+#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
+
+/* MAC timer, usec, for packet lifetime */
+#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
+
+#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
+#define RGF_CAF_OSC_CONTROL (0x88afa4)
+ #define BIT_CAF_OSC_XTAL_EN BIT(0)
+#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
+ #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+
+#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
+ #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
+
+enum {
+ HW_VER_UNKNOWN,
+ HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
+};
+
+/* popular locations */
+#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+ offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+
+/* Hardware definitions end */
+struct fw_map {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+ const char *name; /* for debugfs */
+};
+
+/* array size should be in sync with actual definition in the wmi.c */
+extern const struct fw_map fw_mapping[7];
+
+/**
+ * mk_cidxtid - construct @cidxtid field
+ * @cid: CID value
+ * @tid: TID value
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline u8 mk_cidxtid(u8 cid, u8 tid)
+{
+ return ((tid & 0xf) << 4) | (cid & 0xf);
+}
+
+/**
+ * parse_cidxtid - parse @cidxtid field
+ * @cid: store CID value here
+ * @tid: store TID value here
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
+{
+ *cid = cidxtid & 0xf;
+ *tid = (cidxtid >> 4) & 0xf;
+}
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+/**
+ * struct wil6210_mbox_hdr_wmi - WMI header
+ *
+ * @mid: MAC ID
+ * 00 - default, created by FW
+ * 01..0f - WiFi ports, driver to create
+ * 10..fe - debug
+ * ff - broadcast
+ * @id: command/event ID
+ * @timestamp: FW fills for events, free-running msec timer
+ */
+struct wil6210_mbox_hdr_wmi {
+ u8 mid;
+ u8 reserved;
+ __le16 id;
+ __le32 timestamp;
+} __packed;
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+enum { /* for wil_ctx.mapped_as */
+ wil_mapped_as_none = 0,
+ wil_mapped_as_single = 1,
+ wil_mapped_as_page = 2,
+};
+
+/**
+ * struct wil_ctx - software context for Vring descriptor
+ */
+struct wil_ctx {
+ struct sk_buff *skb;
+ u8 nr_frags;
+ u8 mapped_as;
+};
+
+union vring_desc;
+
+struct vring {
+ dma_addr_t pa;
+ volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+ u16 size; /* number of vring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ struct wil_ctx *ctx; /* ctx[size] - software context */
+};
+
+/**
+ * Additional data for Tx Vring
+ */
+struct vring_tx_data {
+ int enabled;
+ cycles_t idle, last_idle, begin;
+ u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
+ u16 agg_timeout;
+ u8 agg_amsdu;
+ bool addba_in_progress; /* if set, agg_xxx is for request in progress */
+ spinlock_t lock;
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0,
+ wil_status_fwconnecting,
+ wil_status_fwconnected,
+ wil_status_dontscan,
+ wil_status_reset_done,
+ wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+ wil_status_last /* keep last */
+};
+
+struct pci_dev;
+
+/**
+ * struct tid_ampdu_rx - TID aggregation information (Rx).
+ *
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @reorder_time: jiffies when skb was added
+ * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
+ * @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
+ * @head_seq_num: head sequence number in reordering buffer.
+ * @stored_mpdu_num: number of MPDUs in reordering buffer
+ * @ssn: Starting Sequence Number expected to be aggregated.
+ * @buf_size: buffer size for incoming A-MPDUs
+ * @timeout: reset timer value (in TUs).
+ * @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ *
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
+ *
+ */
+struct wil_tid_ampdu_rx {
+ struct sk_buff **reorder_buf;
+ unsigned long *reorder_time;
+ struct timer_list session_timer;
+ struct timer_list reorder_timer;
+ unsigned long last_rx;
+ u16 head_seq_num;
+ u16 stored_mpdu_num;
+ u16 ssn;
+ u16 buf_size;
+ u16 timeout;
+ u16 ssn_last_drop;
+ u8 dialog_token;
+ bool first_time; /* is it 1-st time this buffer used? */
+};
+
+enum wil_sta_status {
+ wil_sta_unused = 0,
+ wil_sta_conn_pending = 1,
+ wil_sta_connected = 2,
+};
+
+#define WIL_STA_TID_NUM (16)
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ unsigned long rx_dropped;
+ u16 last_mcs_rx;
+};
+
+/**
+ * struct wil_sta_info - data for peer
+ *
+ * Peer identified by its CID (connection ID)
+ * NIC performs beam forming for each peer;
+ * if no beam forming done, frame exchange is not
+ * possible.
+ */
+struct wil_sta_info {
+ u8 addr[ETH_ALEN];
+ enum wil_sta_status status;
+ struct wil_net_stats stats;
+ bool data_port_open; /* can send any data, not only EAPOL */
+ /* Rx BACK */
+ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
+ spinlock_t tid_rx_lock; /* guarding tid_rx array */
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+};
+
+enum {
+ fw_recovery_idle = 0,
+ fw_recovery_pending = 1,
+ fw_recovery_running = 2,
+};
+
+enum {
+ hw_capability_last
+};
+
+struct wil_back_rx {
+ struct list_head list;
+ /* request params, converted to CPU byte order - what we asked for */
+ u8 cidxtid;
+ u8 dialog_token;
+ u16 ba_param_set;
+ u16 ba_timeout;
+ u16 ba_seq_ctrl;
+};
+
+struct wil_back_tx {
+ struct list_head list;
+ /* request params, converted to CPU byte order - what we asked for */
+ u8 ringid;
+ u8 agg_wsize;
+ u16 agg_timeout;
+};
+
+struct wil_probe_client_req {
+ struct list_head list;
+ u64 cookie;
+ u8 cid;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ int n_msi;
+ struct wireless_dev *wdev;
+ void __iomem *csr;
+ DECLARE_BITMAP(status, wil_status_last);
+ u32 fw_version;
+ u32 hw_version;
+ const char *hw_name;
+ DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ u8 n_mids; /* number of additional MIDs as reported by FW */
+ u32 recovery_count; /* num of FW recovery attempts in a short time */
+ u32 recovery_state; /* FW recovery state machine */
+ unsigned long last_fw_recovery; /* jiffies of last fw recovery */
+ wait_queue_head_t wq; /* for all wait_event() use */
+ /* profile */
+ u32 monitor_flags;
+ u32 privacy; /* secure connection? */
+ int sinfo_gen;
+ u32 ap_isolate; /* no intra-BSS communication */
+ /* interrupt moderation */
+ u32 tx_max_burst_duration;
+ u32 tx_interframe_timeout;
+ u32 rx_max_burst_duration;
+ u32 rx_interframe_timeout;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ struct completion wmi_call;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wq_service;
+ struct work_struct connect_worker;
+ struct work_struct disconnect_worker;
+ struct work_struct fw_error_worker; /* for FW error recovery */
+ struct timer_list connect_timer;
+ struct timer_list scan_timer; /* detect scan timeout */
+ int pending_connect_cid;
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ /* BACK */
+ struct list_head back_rx_pending;
+ struct mutex back_rx_mutex; /* protect @back_rx_pending */
+ struct work_struct back_rx_worker;
+ struct list_head back_tx_pending;
+ struct mutex back_tx_mutex; /* protect @back_tx_pending */
+ struct work_struct back_tx_worker;
+ /* keep alive */
+ struct list_head probe_client_pending;
+ struct mutex probe_client_mutex; /* protect @probe_client_pending */
+ struct work_struct probe_client_worker;
+ /* DMA related */
+ struct vring vring_rx;
+ struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+ struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
+ u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_sta_info sta[WIL6210_MAX_CID];
+ int bcast_vring;
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* statistics */
+ atomic_t isr_count_rx, isr_count_tx;
+ /* debugfs */
+ struct dentry *debug;
+ struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+
+ void *platform_handle;
+ struct wil_platform_ops platform_ops;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+__printf(2, 3)
+void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+#define wil_dbg(wil, fmt, arg...) do { \
+ netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
+ wil_dbg_trace(wil, fmt, ##arg); \
+} while (0)
+
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+static inline
+void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
+
+static inline
+void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil, bool no_fw);
+void wil_fw_error_recovery(struct wil6210_priv *wil);
+void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+int wil_up(struct wil6210_priv *wil);
+int __wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+int __wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
+void wil_set_ethtoolops(struct net_device *ndev);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
+int wmi_rxon(struct wil6210_priv *wil, bool on);
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
+int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
+int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
+int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
+int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl);
+void wil_back_rx_worker(struct work_struct *work);
+void wil_back_rx_flush(struct wil6210_priv *wil);
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
+void wil_back_tx_worker(struct work_struct *work);
+void wil_back_tx_flush(struct wil6210_priv *wil);
+
+void wil6210_clear_irq(struct wil6210_priv *wil);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil_mask_irq(struct wil6210_priv *wil);
+void wil_unmask_irq(struct wil6210_priv *wil);
+void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
+void wil_disable_irq(struct wil6210_priv *wil);
+void wil_enable_irq(struct wil6210_priv *wil);
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+ struct station_info *sinfo);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
+int wmi_pcp_stop(struct wil6210_priv *wil);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
+ u16 reason_code, bool from_event);
+void wil_probe_client_flush(struct wil6210_priv *wil);
+void wil_probe_client_worker(struct work_struct *work);
+
+int wil_rx_init(struct wil6210_priv *wil, u16 size);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
+int wil_bcast_init(struct wil6210_priv *wil);
+void wil_bcast_fini(struct wil6210_priv *wil);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int wil_tx_complete(struct wil6210_priv *wil, int ringid);
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil, int *quota);
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
+int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
new file mode 100644
index 000000000..976a071ba
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "linux/device.h"
+#include "wil_platform.h"
+
+/**
+ * wil_platform_init() - wil6210 platform module init
+ *
+ * The function must be called before all other functions in this module.
+ * It returns a handle which is used with the rest of the API
+ *
+ */
+void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops)
+{
+ void *handle = NULL;
+
+ if (!ops) {
+ dev_err(dev, "Invalid parameter. Cannot init platform module\n");
+ return NULL;
+ }
+
+ /* platform specific init functions should be called here */
+
+ return handle;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
new file mode 100644
index 000000000..158c73b04
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL_PLATFORM_H__
+#define __WIL_PLATFORM_H__
+
+struct device;
+
+/**
+ * struct wil_platform_ops - wil platform module callbacks
+ */
+struct wil_platform_ops {
+ int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
+ int (*suspend)(void *handle);
+ int (*resume)(void *handle);
+ void (*uninit)(void *handle);
+};
+
+void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops);
+
+#endif /* __WIL_PLATFORM_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000..9fe2085be
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,1357 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "wmi.h"
+#include "trace.h"
+
+static uint max_assoc_sta = WIL6210_MAX_CID;
+module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
+
+int agg_wsize; /* = 0; */
+module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
+ " 0 - use default; < 0 - don't auto-establish");
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
+ * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ */
+const struct fw_map fw_mapping[] = {
+ {0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM 256k */
+ {0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM 32k */
+ {0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */
+ {0x880000, 0x88a000, 0x880000, "rgf"}, /* various RGF 40k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf 4k */
+ {0x8c0000, 0x949000, 0x8c0000, "upper"}, /* upper area 548k */
+ /*
+ * 920000..930000 ucode code RAM
+ * 930000..932000 ucode data RAM
+ * 932000..949000 back-door debug data
+ */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .mid = 0,
+ .id = cpu_to_le16(cmdid),
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+
+ if (sizeof(cmd) + len > r->entry_size) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
+ return -EAGAIN;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ return -EBUSY;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ r->tail = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ return -EBUSY;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ return -EINVAL;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ iowrite32(1, wil->csr + HOSTADDR(r->head) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* advance next ptr */
+ iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.head));
+
+ trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
+
+ /* interrupt to FW */
+ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+ return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_ready_event *evt = d;
+
+ wil->fw_version = le32_to_cpu(evt->sw_version);
+ wil->n_mids = evt->numof_additional_mids;
+
+ wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ evt->mac, wil->n_mids);
+ /* ignore MAC address, we already have it from the boot loader */
+ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+ "%d", wil->fw_version);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ wil_dbg_wmi(wil, "WMI: got FW ready event\n");
+
+ wil_set_recovery_state(wil, fw_recovery_idle);
+ set_bit(wil_status_fwready, wil->status);
+ /* let the reset sequence continue */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int ch_no = data->info.channel+1;
+ u32 freq = ieee80211_channel_to_frequency(ch_no,
+ IEEE80211_BAND_60GHZ);
+ struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+ s32 signal = data->info.sqi;
+ __le16 fc = rx_mgmt_frame->frame_control;
+ u32 d_len = le32_to_cpu(data->info.len);
+ u16 d_status = le16_to_cpu(data->info.status);
+
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+ data->info.channel, data->info.mcs, data->info.snr,
+ data->info.sqi);
+ wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ le16_to_cpu(fc));
+ wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+ wil_dbg_wmi(wil, "TSF : 0x%016llx\n", tsf);
+ wil_dbg_wmi(wil, "Beacon interval : %d\n", bi);
+ wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
+ ie_len, true);
+
+ bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+ d_len, signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+ } else {
+ cfg80211_rx_mgmt(wil->wdev, freq, signal,
+ (void *)rx_mgmt_frame, d_len, 0);
+ }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ if (wil->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ bool aborted = (data->status != WMI_SCAN_SUCCESS);
+
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
+ wil->scan_request, aborted);
+
+ del_timer_sync(&wil->scan_timer);
+ cfg80211_scan_done(wil->scan_request, aborted);
+ wil->scan_request = NULL;
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ if (evt->cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
+ return;
+ }
+
+ ch = evt->channel + 1;
+ wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
+ wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (!test_bit(wil_status_fwconnecting, wil->status)) {
+ wil_err(wil, "Not in connecting state\n");
+ return;
+ }
+ del_timer_sync(&wil->connect_timer);
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo.assoc_req_ies = assoc_req_ie;
+ sinfo.assoc_req_ies_len = assoc_req_ielen;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ }
+ clear_bit(wil_status_fwconnecting, wil->status);
+ set_bit(wil_status_fwconnected, wil->status);
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
+ wil->sta[evt->cid].status = wil_sta_conn_pending;
+
+ wil->pending_connect_cid = evt->cid;
+ queue_work(wil->wq_service, &wil->connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_disconnect_event *evt = d;
+ u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
+
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ evt->bssid, reason_code, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(wil, evt->bssid, reason_code, true);
+ mutex_unlock(&wil->mutex);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ int cid;
+ struct wil_net_stats *stats = NULL;
+
+ wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
+ evt->src_mac);
+
+ cid = wil_find_cid(wil, evt->src_mac);
+ if (cid >= 0)
+ stats = &wil->sta[cid].stats;
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+
+ eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ ether_addr_copy(eth->h_dest, ndev->dev_addr);
+ ether_addr_copy(eth->h_source, evt->src_mac);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += sz;
+ if (stats) {
+ stats->rx_packets++;
+ stats->rx_bytes += sz;
+ }
+ } else {
+ ndev->stats.rx_dropped++;
+ if (stats)
+ stats->rx_dropped++;
+ }
+}
+
+static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+{
+ struct vring_tx_data *t;
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (cid != wil->vring2cid_tid[i][0])
+ continue;
+ t = &wil->vring_tx_data[i];
+ if (!t->enabled)
+ continue;
+
+ wil_addba_tx_request(wil, i, wsize);
+ }
+}
+
+static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_data_port_open_event *evt = d;
+ u8 cid = evt->cid;
+
+ wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
+
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link UP for invalid CID %d\n", cid);
+ return;
+ }
+
+ wil->sta[cid].data_port_open = true;
+ if (agg_wsize >= 0)
+ wil_addba_tx_cid(wil, cid, agg_wsize);
+}
+
+static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_wbe_link_down_event *evt = d;
+ u8 cid = evt->cid;
+
+ wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
+ cid, le32_to_cpu(evt->reason));
+
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+ return;
+ }
+
+ wil->sta[cid].data_port_open = false;
+ netif_carrier_off(ndev);
+}
+
+static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ struct wmi_vring_ba_status_event *evt = d;
+ struct vring_tx_data *txdata;
+
+ wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
+ evt->ringid,
+ evt->status == WMI_BA_AGREED ? "OK" : "N/A",
+ evt->agg_wsize, __le16_to_cpu(evt->ba_timeout),
+ evt->amsdu ? "+" : "-");
+
+ if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
+ wil_err(wil, "invalid ring id %d\n", evt->ringid);
+ return;
+ }
+
+ if (evt->status != WMI_BA_AGREED) {
+ evt->ba_timeout = 0;
+ evt->agg_wsize = 0;
+ evt->amsdu = 0;
+ }
+
+ txdata = &wil->vring_tx_data[evt->ringid];
+
+ txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
+ txdata->agg_wsize = evt->agg_wsize;
+ txdata->agg_amsdu = evt->amsdu;
+ txdata->addba_in_progress = false;
+}
+
+static void wmi_evt_addba_rx_req(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ struct wmi_rcp_addba_req_event *evt = d;
+
+ wil_addba_rx_request(wil, evt->cidxtid, evt->dialog_token,
+ evt->ba_param_set, evt->ba_timeout,
+ evt->ba_seq_ctrl);
+}
+
+static void wmi_evt_delba(struct wil6210_priv *wil, int id, void *d, int len)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wmi_delba_event *evt = d;
+ u8 cid, tid;
+ u16 reason = __le16_to_cpu(evt->reason);
+ struct wil_sta_info *sta;
+ struct wil_tid_ampdu_rx *r;
+
+ might_sleep();
+ parse_cidxtid(evt->cidxtid, &cid, &tid);
+ wil_dbg_wmi(wil, "DELBA CID %d TID %d from %s reason %d\n",
+ cid, tid,
+ evt->from_initiator ? "originator" : "recipient",
+ reason);
+ if (!evt->from_initiator) {
+ int i;
+ /* find Tx vring it belongs to */
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if ((wil->vring2cid_tid[i][0] == cid) &&
+ (wil->vring2cid_tid[i][1] == tid)) {
+ struct vring_tx_data *txdata =
+ &wil->vring_tx_data[i];
+
+ wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
+ txdata->agg_timeout = 0;
+ txdata->agg_wsize = 0;
+ txdata->addba_in_progress = false;
+
+ break; /* max. 1 matching ring */
+ }
+ }
+ if (i >= ARRAY_SIZE(wil->vring2cid_tid))
+ wil_err(wil, "DELBA: unable to find Tx vring\n");
+ return;
+ }
+
+ sta = &wil->sta[cid];
+
+ spin_lock_bh(&sta->tid_rx_lock);
+
+ r = sta->tid_rx[tid];
+ sta->tid_rx[tid] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+
+ spin_unlock_bh(&sta->tid_rx_lock);
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_priv *wil, int eventid,
+ void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+ {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
+ {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
+ {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
+ {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
+ {WMI_DELBA_EVENTID, wmi_evt_delba},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+ unsigned n;
+
+ if (!test_bit(wil_status_reset_done, wil->status)) {
+ wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
+ return;
+ }
+
+ for (n = 0;; n++) {
+ u16 len;
+ bool q;
+
+ r->head = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ break;
+
+ wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
+ r->head, r->tail);
+ /* read cmd descriptor from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ break;
+ }
+
+ /* read cmd header from descriptor */
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ break;
+ }
+ len = le16_to_cpu(hdr.len);
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+
+ /* read cmd buffer from descriptor */
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt)
+ break;
+
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* indicate */
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->id);
+ u32 tstamp = le32_to_cpu(wmi->timestamp);
+
+ wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
+ id, wmi->mid, tstamp);
+ trace_wil6210_wmi_event(wmi, &wmi[1],
+ len - sizeof(*wmi));
+ }
+ wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ iowrite32(r->tail, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ }
+ /* normally, 1 event per IRQ should be processed */
+ wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n);
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+ int rc;
+ int remain;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ rc = __wmi_send(wil, cmdid, buf, len);
+ if (rc)
+ goto out;
+
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ remain = wait_for_completion_timeout(&wil->wmi_call,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_wmi(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+ wil->reply_id = 0;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ out:
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+ struct wmi_set_mac_address_cmd cmd;
+
+ ether_addr_copy(cmd.mac, addr);
+
+ wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
+{
+ int rc;
+
+ struct wmi_pcp_start_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ .channel = chan - 1,
+ .pcp_max_assoc_sta = max_assoc_sta,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_pcp_started_event evt;
+ } __packed reply;
+
+ if (!wil->privacy)
+ cmd.disable_sec = 1;
+
+ if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) ||
+ (cmd.pcp_max_assoc_sta <= 0)) {
+ wil_info(wil,
+ "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n",
+ max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID);
+ cmd.pcp_max_assoc_sta = WIL6210_MAX_CID;
+ }
+
+ /*
+ * Processing time may be huge, in case of secure AP it takes about
+ * 3500ms for FW to start AP
+ */
+ rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
+ WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int wmi_pcp_stop(struct wil6210_priv *wil)
+{
+ return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
+ WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+ &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_p2p_cfg_cmd cmd = {
+ .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr)
+{
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key)
+{
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_len = key_len,
+ };
+
+ if (!key || (key_len > sizeof(cmd.key)))
+ return -EINVAL;
+
+ memcpy(cmd.key, key, key_len);
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+
+ if (!cmd)
+ return -ENOMEM;
+ if (!ie)
+ ie_len = 0;
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
+ kfree(cmd);
+
+ return rc;
+}
+
+/**
+ * wmi_rxon - turn radio on/off
+ * @on: turn on if true, off otherwise
+ *
+ * Only switch radio. Channel should be set separately.
+ * No timeout for rxon - radio turned on forever unless some other call
+ * turns it off
+ */
+int wmi_rxon(struct wil6210_priv *wil, bool on)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply;
+
+ wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+
+ if (on) {
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID,
+ &reply, sizeof(reply), 100);
+ if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
+ rc = -EINVAL;
+ } else {
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+ }
+
+ return rc;
+}
+
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_mem_base = cpu_to_le64(vring->pa),
+ .ring_size = cpu_to_le16(vring->size),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .reorder_type = WMI_RX_SW_REORDER,
+ .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+ int rc;
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ } else {
+ /* Initialize offload (in non-sniffer mode).
+ * Linux IP stack always calculates IP checksum
+ * HW always calculate TCP/UDP checksum
+ */
+ cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
+ }
+
+ if (rx_align_2)
+ cmd.l2_802_3_offload_ctrl |=
+ L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
+
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ return rc;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
+{
+ int rc;
+ struct wmi_temp_sense_cmd cmd = {
+ .measure_baseband_en = cpu_to_le32(!!t_bb),
+ .measure_rf_en = cpu_to_le32(!!t_rf),
+ .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_temp_sense_done_event evt;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
+ WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ return rc;
+
+ if (t_bb)
+ *t_bb = le32_to_cpu(reply.evt.baseband_t1000);
+ if (t_rf)
+ *t_rf = le32_to_cpu(reply.evt.rf_t1000);
+
+ return 0;
+}
+
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
+{
+ struct wmi_disconnect_sta_cmd cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+
+ ether_addr_copy(cmd.dst_mac, mac);
+
+ wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+
+ return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
+{
+ struct wmi_vring_ba_en_cmd cmd = {
+ .ringid = ringid,
+ .agg_max_wsize = size,
+ .ba_timeout = cpu_to_le16(timeout),
+ .amsdu = 0,
+ };
+
+ wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
+ ringid, size, timeout);
+
+ return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
+{
+ struct wmi_vring_ba_dis_cmd cmd = {
+ .ringid = ringid,
+ .reason = cpu_to_le16(reason),
+ };
+
+ wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
+ ringid, reason);
+
+ return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
+{
+ struct wmi_rcp_delba_cmd cmd = {
+ .cidxtid = cidxtid,
+ .reason = cpu_to_le16(reason),
+ };
+
+ wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
+ cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+
+ return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
+ u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
+{
+ int rc;
+ struct wmi_rcp_addba_resp_cmd cmd = {
+ .cidxtid = mk_cidxtid(cid, tid),
+ .dialog_token = token,
+ .status_code = cpu_to_le16(status),
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
+ (agg_wsize << 6)),
+ .ba_timeout = cpu_to_le16(timeout),
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_rcp_addba_resp_sent_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil,
+ "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
+ cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
+
+ rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
+ WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status) {
+ wil_err(wil, "ADDBA response failed with status %d\n",
+ le16_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_wmi(wil, "%s()\n", __func__);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(wil, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->id);
+
+ wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
+ id, wil->reply_id);
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ } else {
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ }
+ wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
+ complete(&wil->wmi_call);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_err(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ wil_dbg_wmi(wil, "Start %s\n", __func__);
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+ wil_dbg_wmi(wil, "Finished %s\n", __func__);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000..b29055315
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,1352 @@
+/*
+ * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+#define WILOCITY_MAX_ASSOC_STA (8)
+#define WILOCITY_DEFAULT_ASSOC_STA (1)
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+#define WMI_MAX_LOSS_DMG_BEACONS (32)
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_DISCONNECT_STA_CMDID = 0x0004,
+ WMI_START_SCAN_CMDID = 0x0007,
+ WMI_SET_BSS_FILTER_CMDID = 0x0009,
+ WMI_SET_PROBED_SSID_CMDID = 0x000a,
+ WMI_SET_LISTEN_INT_CMDID = 0x000b,
+ WMI_BCON_CTRL_CMDID = 0x000f,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
+ WMI_SET_APPIE_CMDID = 0x003f,
+ WMI_SET_WSC_STATUS_CMDID = 0x0041,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
+/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */
+ WMI_MEM_READ_CMDID = 0x0800,
+ WMI_MEM_WR_CMDID = 0x0801,
+ WMI_ECHO_CMDID = 0x0803,
+ WMI_DEEP_ECHO_CMDID = 0x0804,
+ WMI_CONFIG_MAC_CMDID = 0x0805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
+ WMI_FS_TUNE_CMDID = 0x080a,
+ WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_READ_RSSI_CMDID = 0x080c,
+ WMI_TEMP_SENSE_CMDID = 0x080e,
+ WMI_DC_CALIB_CMDID = 0x080f,
+ WMI_SEND_TONE_CMDID = 0x0810,
+ WMI_IQ_TX_CALIB_CMDID = 0x0811,
+ WMI_IQ_RX_CALIB_CMDID = 0x0812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x0813,
+ WMI_SET_WORK_MODE_CMDID = 0x0815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
+ WMI_MARLON_R_READ_CMDID = 0x0818,
+ WMI_MARLON_R_WRITE_CMDID = 0x0819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_RF_RX_TEST_CMDID = 0x081e,
+ WMI_CFG_RX_CHAIN_CMDID = 0x0820,
+ WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x0822,
+ WMI_VRING_BA_EN_CMDID = 0x0823,
+ WMI_VRING_BA_DIS_CMDID = 0x0824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
+ WMI_RCP_DELBA_CMDID = 0x0826,
+ WMI_SET_SSID_CMDID = 0x0827,
+ WMI_GET_SSID_CMDID = 0x0828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
+ WMI_SW_TX_REQ_CMDID = 0x082b,
+ WMI_READ_MAC_RXQ_CMDID = 0x0830,
+ WMI_READ_MAC_TXQ_CMDID = 0x0831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
+ WMI_MLME_PUSH_CMDID = 0x0835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x0837,
+ WMI_BF_SM_MGMT_CMDID = 0x0838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_BF_TRIG_CMDID = 0x083A,
+ WMI_SET_SECTORS_CMDID = 0x0849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x0851,
+ WMI_RS_MGMT_CMDID = 0x0852,
+ WMI_RF_MGMT_CMDID = 0x0853,
+ WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854,
+ WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x0862,
+ WMI_NOTIFY_REQ_CMDID = 0x0863,
+ WMI_GET_STATUS_CMDID = 0x0864,
+ WMI_UNIT_TEST_CMDID = 0x0900,
+ WMI_HICCUP_CMDID = 0x0901,
+ WMI_FLASH_READ_CMDID = 0x0902,
+ WMI_FLASH_WRITE_CMDID = 0x0903,
+ WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+ /*P2P*/
+ WMI_P2P_CFG_CMDID = 0x0910,
+ WMI_PORT_ALLOCATE_CMDID = 0x0911,
+ WMI_PORT_DELETE_CMDID = 0x0912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x0913,
+ WMI_START_LISTEN_CMDID = 0x0914,
+ WMI_START_SEARCH_CMDID = 0x0915,
+ WMI_DISCOVERY_START_CMDID = 0x0916,
+ WMI_DISCOVERY_STOP_CMDID = 0x0917,
+ WMI_PCP_START_CMDID = 0x0918,
+ WMI_PCP_STOP_CMDID = 0x0919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x091b,
+
+ WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
+ WMI_ABORT_SCAN_CMDID = 0xf007,
+ WMI_SET_PMK_CMDID = 0xf028,
+
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
+ WMI_GET_PMK_CMDID = 0xf048,
+ WMI_SET_PASSPHRASE_CMDID = 0xf049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
+ WMI_EAPOL_TX_CMDID = 0xf04c,
+ WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
+ WMI_FW_VER_CMDID = 0xf04e,
+ WMI_PMC_CMDID = 0xf04f,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_WEP = 0x02,
+ WMI_CRYPT_TKIP = 0x04,
+ WMI_CRYPT_AES = 0x08,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
+ WMI_CONNECT_SEND_REASSOC = 0x0002,
+ WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_STA_CMDID
+ */
+struct wmi_disconnect_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 disconnect_reason;
+} __packed;
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX (0)
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+#define WMI_PMK_LEN (32)
+
+struct wmi_set_pmk_cmd {
+ u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0,
+ WMI_KEY_USE_GROUP = 1,
+ WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ u8 key_usage; /* enum wmi_key_usage */
+ u8 key_len;
+ u8 key_rsc[8]; /* key replay sequence counter */
+ u8 key[WMI_MAX_KEY_LEN];
+ u8 key_op_ctrl; /* Additional Key Control information */
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+ WMI_PBC_SCAN = 2,
+ WMI_DIRECT_SCAN = 3,
+ WMI_ACTIVE_SCAN = 4,
+};
+
+struct wmi_start_scan_cmd {
+ u8 direct_scan_mac_addr[6];
+ u8 reserved[2];
+ __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+ __le32 force_scan_interval; /* Time interval between scans (ms)*/
+ u8 scan_type; /* wmi_scan_type */
+ u8 num_channels; /* how many channels follow */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[0]; /* channels ID's */
+ /* 0 - 58320 MHz */
+ /* 1 - 60480 MHz */
+ /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX (3)
+
+enum wmi_ssid_flag {
+ WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
+ WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
+ WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+ u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 flag; /* enum wmi_ssid_flag */
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+#define WMI_MAX_IE_LEN (1024)
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 reserved;
+ __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ u8 ie_info[0];
+} __packed;
+
+/*
+ * WMI_PXMT_RANGE_CFG_CMDID
+ */
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+/*
+ * WMI_PXMT_SNR2_RANGE_CFG_CMDID
+ */
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0,
+ WMI_RF_MGMT_W_ENABLE = 1,
+ WMI_RF_MGMT_GET_STATUS = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_THERMAL_THROTTLING_CTRL_CMDID
+ */
+#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
+
+struct wmi_thermal_throttling_ctrl_cmd {
+ __le32 time_on_usec;
+ __le32 time_off_usec;
+ __le32 max_txop_length_usec;
+} __packed;
+
+/*
+ * WMI_RF_RX_TEST_CMDID
+ */
+struct wmi_rf_rx_test_cmd {
+ __le32 sector;
+} __packed;
+
+/*
+ * WMI_CORR_MEASURE_CMDID
+ */
+struct wmi_corr_measure_cmd {
+ s32 freq_mhz;
+ __le32 length_samples;
+ __le32 iterations;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 pcp_max_assoc_sta;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/******* P2P ***********/
+
+/*
+ * WMI_PORT_ALLOCATE_CMDID
+ */
+enum wmi_port_role {
+ WMI_PORT_STA = 0,
+ WMI_PORT_PCP = 1,
+ WMI_PORT_AP = 2,
+ WMI_PORT_P2P_DEV = 3,
+ WMI_PORT_P2P_CLIENT = 4,
+ WMI_PORT_P2P_GO = 5,
+};
+
+struct wmi_port_allocate_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 port_role;
+ u8 mid;
+} __packed;
+
+/*
+ * WMI_PORT_DELETE_CMDID
+ */
+struct wmi_delete_port_cmd {
+ u8 mid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_P2P_CFG_CMDID
+ */
+enum wmi_discovery_mode {
+ WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
+ WMI_DISCOVERY_MODE_OFFLOAD = 1,
+ WMI_DISCOVERY_MODE_PEER2PEER = 2,
+};
+
+struct wmi_p2p_cfg_cmd {
+ u8 discovery_mode; /* wmi_discovery_mode */
+ u8 channel;
+ __le16 bcon_interval; /* base to listen/search duration calculation */
+} __packed;
+
+/*
+ * WMI_POWER_MGMT_CFG_CMDID
+ */
+enum wmi_power_source_type {
+ WMI_POWER_SOURCE_BATTERY = 0,
+ WMI_POWER_SOURCE_OTHER = 1,
+};
+
+struct wmi_power_mgmt_cfg_cmd {
+ u8 power_source; /* wmi_power_source_type */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_START_CMDID
+ */
+struct wmi_pcp_start_cmd {
+ __le16 bcon_interval;
+ u8 pcp_max_assoc_sta;
+ u8 reserved0[9];
+ u8 network_type;
+ u8 channel;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0,
+ WMI_VRING_DS_STATION = 1,
+ WMI_VRING_DS_AP = 2,
+ WMI_VRING_DS_ADDR4 = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0,
+ WMI_SCH_PRIO_HIGH = 1,
+};
+
+#define CIDXTID_CID_POS (0)
+#define CIDXTID_CID_LEN (4)
+#define CIDXTID_CID_MSK (0xF)
+#define CIDXTID_TID_POS (4)
+#define CIDXTID_TID_LEN (4)
+#define CIDXTID_TID_MSK (0xF0)
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+
+ u8 cidxtid;
+
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+ u8 mac_ctrl;
+
+ #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0,
+ WMI_VRING_CMD_MODIFY = 1,
+ WMI_VRING_CMD_DELETE = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_BCAST_VRING_CFG_CMDID
+ */
+struct wmi_bcast_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+} __packed;
+
+struct wmi_bcast_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_bcast_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+ u8 ringid;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+ u8 amsdu;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+ u8 ringid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 year;
+ u8 month;
+ u8 day;
+ __le32 interval_usec;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 miliseconds;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0,
+ WMI_SNIFFER_ON = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0,
+ WMI_SNIFFER_DP = 1,
+ WMI_SNIFFER_BOTH_PHYS = 2,
+};
+
+struct wmi_sniffer_cfg {
+ __le32 mode; /* enum wmi_sniffer_cfg_mode */
+ __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0,
+ WMI_RX_CHAIN_DEL = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+ WMI_DECAP_TYPE_NONE = 2,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+};
+
+enum wmi_cfg_rx_chain_cmd_reorder_type {
+ WMI_RX_HW_REORDER = 0,
+ WMI_RX_SW_REORDER = 1,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
+ u8 l2_802_3_offload_ctrl;
+
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+ u8 l2_nwifi_offload_ctrl;
+
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+ u8 l3_l4_ctrl;
+
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+ u8 ring_ctrl;
+
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reorder_type;
+ u8 reserved;
+ struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+ u8 cidxtid;
+ u8 dialog_token;
+ __le16 status_code;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+ u8 cidxtid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+ u8 cidxtid;
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/*
+ * WMI_TEMP_SENSE_CMDID
+ *
+ * Measure MAC and radio temperatures
+ */
+
+/* Possible modes for temperature measurement */
+enum wmi_temperature_measure_mode {
+ TEMPERATURE_USE_OLD_VALUE = 0x1,
+ TEMPERATURE_MEASURE_NOW = 0x2,
+};
+
+struct wmi_temp_sense_cmd {
+ __le32 measure_baseband_en;
+ __le32 measure_rf_en;
+ __le32 measure_mode;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100a,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180a,
+ WMI_CORR_MEASURE_EVENTID = 0x180b,
+ WMI_READ_RSSI_EVENTID = 0x180c,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
+
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+
+ WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
+
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ /*P2P*/
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191a,
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+enum wmi_fw_status {
+ WMI_FW_STATUS_SUCCESS,
+ WMI_FW_STATUS_FAILURE,
+};
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0,
+ WMI_RF_DISABLED_HW = 1,
+ WMI_RF_DISABLED_SW = 2,
+ WMI_RF_DISABLED_HW_SW = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_THERMAL_THROTTLING_STATUS_EVENTID
+ */
+struct wmi_thermal_throttling_status_event {
+ __le32 time_on_usec;
+ __le32 time_off_usec;
+ __le32 max_txop_length_usec;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+ u8 major;
+ u8 minor;
+ __le16 subminor;
+ __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ WMI_11AD_CAPABILITY = 7,
+ WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ u8 phy_capability; /* enum wmi_phy_capability */
+ u8 numof_additional_mids;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+ __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ __le64 tsf;
+ __le32 snr_val;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+ u8 sqi;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 reserved2[3];
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
+ WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
+ WMI_DIS_REASON_DISCONNECT_CMD = 3,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 4,
+ WMI_DIS_REASON_AUTH_FAILED = 5,
+ WMI_DIS_REASON_ASSOC_FAILED = 6,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 8,
+ WMI_DIS_REASON_INVALID_PROFILE = 10,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 12,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 13,
+ WMI_DIS_REASON_IBSS_MERGE = 14,
+};
+
+struct wmi_disconnect_event {
+ __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
+ u8 bssid[WMI_MAC_LEN]; /* set if known */
+ u8 disconnect_reason; /* see wmi_disconnect_reason */
+ u8 assoc_resp_len; /* not used */
+ u8 assoc_info[0]; /* not used */
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+enum scan_status {
+ WMI_SCAN_SUCCESS = 0,
+ WMI_SCAN_FAILED = 1,
+ WMI_SCAN_ABORTED = 2,
+ WMI_SCAN_REJECTED = 3,
+};
+
+struct wmi_scan_complete_event {
+ __le32 status; /* scan_status */
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0,
+ WMI_BA_NON_AGREED = 1,
+ /* BA_EN in middle of teardown flow */
+ WMI_BA_TD_WIP = 2,
+ /* BA_DIS or BA_EN in middle of BA SETUP flow */
+ WMI_BA_SETUP_WIP = 3,
+ /* BA_EN when the BA session is already active */
+ WMI_BA_SESSION_ACTIVE = 4,
+ /* BA_DIS when the BA session is not active */
+ WMI_BA_SESSION_NOT_ACTIVE = 5,
+};
+
+struct wmi_vring_ba_status_event {
+ __le16 status; /* enum wmi_vring_ba_status */
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+ u8 amsdu;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+ u8 cidxtid;
+ u8 from_initiator;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+struct wmi_rcp_addba_resp_sent_event {
+ u8 cidxtid;
+ u8 reserved;
+ __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+ u8 cidxtid;
+ u8 dialog_token;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ __le16 ba_timeout;
+ __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0,
+ WMI_WBE_REASON_RX_DISASSOC = 1,
+ WMI_WBE_REASON_BAD_PHY_LINK = 2,
+};
+
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_P2P_CFG_DONE_EVENTID
+ */
+struct wmi_p2p_cfg_done_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+* WMI_PORT_ALLOCATED_EVENTID
+*/
+struct wmi_port_allocated_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+* WMI_PORT_DELETED_EVENTID
+*/
+struct wmi_port_deleted_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_LISTEN_STARTED_EVENTID
+ */
+struct wmi_listen_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SEARCH_STARTED_EVENTID
+ */
+struct wmi_search_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_STARTED_EVENTID
+ */
+struct wmi_pcp_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_FACTOR_EVENTID
+ */
+struct wmi_pcp_factor_event {
+ __le32 pcp_factor;
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
+ WMI_TX_SW_STATUS_FAILED_TX = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+ u8 status; /* enum wmi_sw_tx_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_CORR_MEASURE_EVENTID
+ */
+struct wmi_corr_measure_event {
+ s32 i;
+ s32 q;
+ s32 image_i;
+ s32 image_q;
+} __packed;
+
+/*
+ * WMI_READ_RSSI_EVENTID
+ */
+struct wmi_read_rssi_event {
+ __le32 ina_rssi_adc_dbm;
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 snr;
+ u8 range;
+ u8 sqi;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ u8 qid;
+ u8 mid;
+ u8 cid;
+ u8 channel; /* From Radio MNGR */
+} __packed;
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+ u8 payload[0];
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+ __le32 echoed_value;
+} __packed;
+
+/*
+ * WMI_TEMP_SENSE_DONE_EVENTID
+ *
+ * Measure MAC and radio temperatures
+ */
+struct wmi_temp_sense_done_event {
+ __le32 baseband_t1000;
+ __le32 rf_t1000;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */